summaryrefslogtreecommitdiff
path: root/numpy
diff options
context:
space:
mode:
Diffstat (limited to 'numpy')
-rw-r--r--numpy/add_newdocs.py16
-rw-r--r--numpy/core/code_generators/generate_umath.py50
-rw-r--r--numpy/core/code_generators/ufunc_docstrings.py2
-rw-r--r--numpy/core/include/numpy/fenv/fenv.c38
-rw-r--r--numpy/core/include/numpy/fenv/fenv.h224
-rw-r--r--numpy/core/include/numpy/npy_math.h43
-rw-r--r--numpy/core/numeric.py55
-rw-r--r--numpy/core/records.py96
-rw-r--r--numpy/core/setup.py6
-rw-r--r--numpy/core/setup_common.py33
-rw-r--r--numpy/core/src/multiarray/cblasfuncs.c2
-rw-r--r--numpy/core/src/multiarray/descriptor.c48
-rw-r--r--numpy/core/src/multiarray/scalartypes.c.src9
-rw-r--r--numpy/core/src/multiarray/shape.c2
-rw-r--r--numpy/core/src/npymath/ieee754.c.src6
-rw-r--r--numpy/core/src/npymath/npy_math_complex.c.src1651
-rw-r--r--numpy/core/src/private/npy_config.h53
-rw-r--r--numpy/core/src/umath/funcs.inc.src358
-rw-r--r--numpy/core/src/umath/ufunc_type_resolution.c2
-rw-r--r--numpy/core/src/umath/umath_tests.c.src3
-rw-r--r--numpy/core/tests/test_numeric.py13
-rw-r--r--numpy/core/tests/test_records.py47
-rw-r--r--numpy/core/tests/test_umath.py104
-rw-r--r--numpy/distutils/fcompiler/intel.py2
-rw-r--r--numpy/distutils/lib2def.py2
-rw-r--r--numpy/doc/structured_arrays.py6
-rw-r--r--numpy/fft/fftpack.c11
-rw-r--r--numpy/fft/fftpack_litemodule.c5
-rw-r--r--numpy/lib/_iotools.py7
-rw-r--r--numpy/lib/arraysetops.py7
-rw-r--r--numpy/lib/financial.py10
-rw-r--r--numpy/lib/npyio.py43
-rw-r--r--numpy/lib/setup.py12
-rw-r--r--numpy/lib/shape_base.py2
-rw-r--r--numpy/lib/tests/test__iotools.py6
-rw-r--r--numpy/lib/tests/test_financial.py41
-rw-r--r--numpy/lib/tests/test_io.py95
-rw-r--r--numpy/lib/tests/test_twodim_base.py31
-rw-r--r--numpy/lib/twodim_base.py8
-rw-r--r--numpy/ma/core.py35
-rw-r--r--numpy/ma/extras.py15
-rw-r--r--numpy/ma/tests/test_core.py227
-rw-r--r--numpy/ma/tests/test_extras.py15
-rw-r--r--numpy/random/mtrand/mtrand.pyx8
-rw-r--r--numpy/testing/utils.py7
45 files changed, 2511 insertions, 945 deletions
diff --git a/numpy/add_newdocs.py b/numpy/add_newdocs.py
index 72aaf5ec7..7dd8c5649 100644
--- a/numpy/add_newdocs.py
+++ b/numpy/add_newdocs.py
@@ -664,13 +664,13 @@ add_newdoc('numpy.core.multiarray', 'array',
nested sequence, or if a copy is needed to satisfy any of the other
requirements (`dtype`, `order`, etc.).
order : {'C', 'F', 'A'}, optional
- Specify the order of the array. If order is 'C' (default), then the
- array will be in C-contiguous order (last-index varies the
- fastest). If order is 'F', then the returned array
- will be in Fortran-contiguous order (first-index varies the
- fastest). If order is 'A', then the returned array may
- be in any order (either C-, Fortran-contiguous, or even
- discontiguous).
+ Specify the order of the array. If order is 'C', then the array
+ will be in C-contiguous order (last-index varies the fastest).
+ If order is 'F', then the returned array will be in
+ Fortran-contiguous order (first-index varies the fastest).
+ If order is 'A' (default), then the returned array may be
+ in any order (either C-, Fortran-contiguous, or even discontiguous),
+ unless a copy is required, in which case it will be C-contiguous.
subok : bool, optional
If True, then sub-classes will be passed-through, otherwise
the returned array will be forced to be a base-class array (default).
@@ -885,7 +885,7 @@ add_newdoc('numpy.core.multiarray', 'zeros',
>>> np.zeros(5)
array([ 0., 0., 0., 0., 0.])
- >>> np.zeros((5,), dtype=numpy.int)
+ >>> np.zeros((5,), dtype=np.int)
array([0, 0, 0, 0, 0])
>>> np.zeros((2, 1))
diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py
index 9dbeb76cd..e3c4fbaa2 100644
--- a/numpy/core/code_generators/generate_umath.py
+++ b/numpy/core/code_generators/generate_umath.py
@@ -33,9 +33,9 @@ class TypeDescription(object):
----------
type : str
Character representing the nominal type.
- func_data : str or None or FullTypeDescr, optional
- The string representing the expression to insert into the data array, if
- any.
+ func_data : str or None or FullTypeDescr or FuncNameSuffix, optional
+ The string representing the expression to insert into the data
+ array, if any.
in_ : str or None, optional
The typecode(s) of the inputs.
out : str or None, optional
@@ -127,10 +127,11 @@ class Ufunc(object):
import string
if sys.version_info[0] < 3:
- UPPER_TABLE = string.maketrans(string.ascii_lowercase, string.ascii_uppercase)
+ UPPER_TABLE = string.maketrans(string.ascii_lowercase,
+ string.ascii_uppercase)
else:
UPPER_TABLE = bytes.maketrans(bytes(string.ascii_lowercase, "ascii"),
- bytes(string.ascii_uppercase, "ascii"))
+ bytes(string.ascii_uppercase, "ascii"))
def english_upper(s):
""" Apply English case rules to convert ASCII strings to all upper case.
@@ -151,7 +152,8 @@ def english_upper(s):
Examples
--------
>>> from numpy.lib.utils import english_upper
- >>> english_upper('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
+ >>> s = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_'
+ >>> english_upper(s)
'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'
>>> english_upper('')
''
@@ -870,9 +872,9 @@ chartotype2 = {'e': 'ee_e',
# 3) add function.
def make_arrays(funcdict):
- # functions array contains an entry for every type implemented
- # NULL should be placed where PyUfunc_ style function will be filled in later
- #
+ # functions array contains an entry for every type implemented NULL
+ # should be placed where PyUfunc_ style function will be filled in
+ # later
code1list = []
code2list = []
names = sorted(funcdict.keys())
@@ -891,24 +893,25 @@ def make_arrays(funcdict):
thedict = chartotype1 # one input and one output
for t in uf.type_descriptions:
- if t.func_data not in (None, FullTypeDescr) and not isinstance(t.func_data, FuncNameSuffix):
+ if (t.func_data not in (None, FullTypeDescr) and
+ not isinstance(t.func_data, FuncNameSuffix)):
funclist.append('NULL')
astype = ''
if not t.astype is None:
astype = '_As_%s' % thedict[t.astype]
- astr = '%s_functions[%d] = PyUFunc_%s%s;' % \
- (name, k, thedict[t.type], astype)
+ astr = ('%s_functions[%d] = PyUFunc_%s%s;' %
+ (name, k, thedict[t.type], astype))
code2list.append(astr)
if t.type == 'O':
- astr = '%s_data[%d] = (void *) %s;' % \
- (name, k, t.func_data)
+ astr = ('%s_data[%d] = (void *) %s;' %
+ (name, k, t.func_data))
code2list.append(astr)
datalist.append('(void *)NULL')
elif t.type == 'P':
datalist.append('(void *)"%s"' % t.func_data)
else:
- astr = '%s_data[%d] = (void *) %s;' % \
- (name, k, t.func_data)
+ astr = ('%s_data[%d] = (void *) %s;' %
+ (name, k, t.func_data))
code2list.append(astr)
datalist.append('(void *)NULL')
#datalist.append('(void *)%s' % t.func_data)
@@ -916,11 +919,13 @@ def make_arrays(funcdict):
elif t.func_data is FullTypeDescr:
tname = english_upper(chartoname[t.type])
datalist.append('(void *)NULL')
- funclist.append('%s_%s_%s_%s' % (tname, t.in_, t.out, name))
+ funclist.append(
+ '%s_%s_%s_%s' % (tname, t.in_, t.out, name))
elif isinstance(t.func_data, FuncNameSuffix):
datalist.append('(void *)NULL')
tname = english_upper(chartoname[t.type])
- funclist.append('%s_%s_%s' % (tname, name, t.func_data.suffix))
+ funclist.append(
+ '%s_%s_%s' % (tname, name, t.func_data.suffix))
else:
datalist.append('(void *)NULL')
tname = english_upper(chartoname[t.type])
@@ -934,11 +939,11 @@ def make_arrays(funcdict):
funcnames = ', '.join(funclist)
signames = ', '.join(siglist)
datanames = ', '.join(datalist)
- code1list.append("static PyUFuncGenericFunction %s_functions[] = { %s };" \
+ code1list.append("static PyUFuncGenericFunction %s_functions[] = {%s};"
% (name, funcnames))
- code1list.append("static void * %s_data[] = { %s };" \
+ code1list.append("static void * %s_data[] = {%s};"
% (name, datanames))
- code1list.append("static char %s_signatures[] = { %s };" \
+ code1list.append("static char %s_signatures[] = {%s};"
% (name, signames))
return "\n".join(code1list), "\n".join(code2list)
@@ -972,8 +977,7 @@ r"""f = PyUFunc_FromFuncAndData(%s_functions, %s_data, %s_signatures, %d,
name, docstring))
if uf.typereso != None:
mlist.append(
- r"((PyUFuncObject *)f)->type_resolver = &%s;" %
- uf.typereso)
+ r"((PyUFuncObject *)f)->type_resolver = &%s;" % uf.typereso)
mlist.append(r"""PyDict_SetItemString(dictionary, "%s", f);""" % name)
mlist.append(r"""Py_DECREF(f);""")
code3list.append('\n'.join(mlist))
diff --git a/numpy/core/code_generators/ufunc_docstrings.py b/numpy/core/code_generators/ufunc_docstrings.py
index 328e43ca6..bfa3ad221 100644
--- a/numpy/core/code_generators/ufunc_docstrings.py
+++ b/numpy/core/code_generators/ufunc_docstrings.py
@@ -2310,7 +2310,7 @@ add_newdoc('numpy.core.umath', 'fmax',
Returns
-------
y : ndarray or scalar
- The minimum of `x1` and `x2`, element-wise. Returns scalar if
+ The maximum of `x1` and `x2`, element-wise. Returns scalar if
both `x1` and `x2` are scalars.
See Also
diff --git a/numpy/core/include/numpy/fenv/fenv.c b/numpy/core/include/numpy/fenv/fenv.c
deleted file mode 100644
index 9a8d1be10..000000000
--- a/numpy/core/include/numpy/fenv/fenv.c
+++ /dev/null
@@ -1,38 +0,0 @@
-/*-
- * Copyright (c) 2004 David Schultz <das@FreeBSD.ORG>
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * $FreeBSD$
- */
-
-#include <sys/types.h>
-#include "fenv.h"
-
-const fenv_t npy__fe_dfl_env = {
- 0xffff0000,
- 0xffff0000,
- 0xffffffff,
- { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff }
-};
diff --git a/numpy/core/include/numpy/fenv/fenv.h b/numpy/core/include/numpy/fenv/fenv.h
deleted file mode 100644
index 79a215fc3..000000000
--- a/numpy/core/include/numpy/fenv/fenv.h
+++ /dev/null
@@ -1,224 +0,0 @@
-/*-
- * Copyright (c) 2004 David Schultz <das@FreeBSD.ORG>
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * $FreeBSD$
- */
-
-#ifndef _FENV_H_
-#define _FENV_H_
-
-#include <sys/cdefs.h>
-#include <sys/types.h>
-
-typedef struct {
- __uint32_t __control;
- __uint32_t __status;
- __uint32_t __tag;
- char __other[16];
-} fenv_t;
-
-typedef __uint16_t fexcept_t;
-
-/* Exception flags */
-#define FE_INVALID 0x01
-#define FE_DENORMAL 0x02
-#define FE_DIVBYZERO 0x04
-#define FE_OVERFLOW 0x08
-#define FE_UNDERFLOW 0x10
-#define FE_INEXACT 0x20
-#define FE_ALL_EXCEPT (FE_DIVBYZERO | FE_DENORMAL | FE_INEXACT | \
- FE_INVALID | FE_OVERFLOW | FE_UNDERFLOW)
-
-/* Rounding modes */
-#define FE_TONEAREST 0x0000
-#define FE_DOWNWARD 0x0400
-#define FE_UPWARD 0x0800
-#define FE_TOWARDZERO 0x0c00
-#define _ROUND_MASK (FE_TONEAREST | FE_DOWNWARD | \
- FE_UPWARD | FE_TOWARDZERO)
-
-__BEGIN_DECLS
-
-/* Default floating-point environment */
-extern const fenv_t npy__fe_dfl_env;
-#define FE_DFL_ENV (&npy__fe_dfl_env)
-
-#define __fldcw(__cw) __asm __volatile("fldcw %0" : : "m" (__cw))
-#define __fldenv(__env) __asm __volatile("fldenv %0" : : "m" (__env))
-#define __fnclex() __asm __volatile("fnclex")
-#define __fnstenv(__env) __asm __volatile("fnstenv %0" : "=m" (*(__env)))
-#define __fnstcw(__cw) __asm __volatile("fnstcw %0" : "=m" (*(__cw)))
-#define __fnstsw(__sw) __asm __volatile("fnstsw %0" : "=am" (*(__sw)))
-#define __fwait() __asm __volatile("fwait")
-
-static __inline int
-feclearexcept(int __excepts)
-{
- fenv_t __env;
-
- if (__excepts == FE_ALL_EXCEPT) {
- __fnclex();
- } else {
- __fnstenv(&__env);
- __env.__status &= ~__excepts;
- __fldenv(__env);
- }
- return (0);
-}
-
-static __inline int
-fegetexceptflag(fexcept_t *__flagp, int __excepts)
-{
- __uint16_t __status;
-
- __fnstsw(&__status);
- *__flagp = __status & __excepts;
- return (0);
-}
-
-static __inline int
-fesetexceptflag(const fexcept_t *__flagp, int __excepts)
-{
- fenv_t __env;
-
- __fnstenv(&__env);
- __env.__status &= ~__excepts;
- __env.__status |= *__flagp & __excepts;
- __fldenv(__env);
- return (0);
-}
-
-static __inline int
-feraiseexcept(int __excepts)
-{
- fexcept_t __ex = __excepts;
-
- fesetexceptflag(&__ex, __excepts);
- __fwait();
- return (0);
-}
-
-static __inline int
-fetestexcept(int __excepts)
-{
- __uint16_t __status;
-
- __fnstsw(&__status);
- return (__status & __excepts);
-}
-
-static __inline int
-fegetround(void)
-{
- int __control;
-
- __fnstcw(&__control);
- return (__control & _ROUND_MASK);
-}
-
-static __inline int
-fesetround(int __round)
-{
- int __control;
-
- if (__round & ~_ROUND_MASK)
- return (-1);
- __fnstcw(&__control);
- __control &= ~_ROUND_MASK;
- __control |= __round;
- __fldcw(__control);
- return (0);
-}
-
-static __inline int
-fegetenv(fenv_t *__envp)
-{
- int __control;
-
- /*
- * fnstenv masks all exceptions, so we need to save and
- * restore the control word to avoid this side effect.
- */
- __fnstcw(&__control);
- __fnstenv(__envp);
- __fldcw(__control);
- return (0);
-}
-
-static __inline int
-feholdexcept(fenv_t *__envp)
-{
-
- __fnstenv(__envp);
- __fnclex();
- return (0);
-}
-
-static __inline int
-fesetenv(const fenv_t *__envp)
-{
-
- __fldenv(*__envp);
- return (0);
-}
-
-static __inline int
-feupdateenv(const fenv_t *__envp)
-{
- __uint16_t __status;
-
- __fnstsw(&__status);
- __fldenv(*__envp);
- feraiseexcept(__status & FE_ALL_EXCEPT);
- return (0);
-}
-
-#if __BSD_VISIBLE
-
-static __inline int
-fesetmask(int __mask)
-{
- int __control;
-
- __fnstcw(&__control);
- __mask = (__control | FE_ALL_EXCEPT) & ~__mask;
- __fldcw(__mask);
- return (~__control & FE_ALL_EXCEPT);
-}
-
-static __inline int
-fegetmask(void)
-{
- int __control;
-
- __fnstcw(&__control);
- return (~__control & FE_ALL_EXCEPT);
-}
-
-#endif /* __BSD_VISIBLE */
-
-__END_DECLS
-
-#endif /* !_FENV_H_ */
diff --git a/numpy/core/include/numpy/npy_math.h b/numpy/core/include/numpy/npy_math.h
index 0137e0556..8f5d8e6c7 100644
--- a/numpy/core/include/numpy/npy_math.h
+++ b/numpy/core/include/numpy/npy_math.h
@@ -259,7 +259,7 @@ float npy_nextafterf(float x, float y);
float npy_spacingf(float x);
/*
- * float C99 math functions
+ * long double C99 math functions
*/
npy_longdouble npy_sinl(npy_longdouble x);
@@ -425,6 +425,19 @@ npy_cdouble npy_csqrt(npy_cdouble z);
npy_cdouble npy_ccos(npy_cdouble z);
npy_cdouble npy_csin(npy_cdouble z);
+npy_cdouble npy_ctan(npy_cdouble z);
+
+npy_cdouble npy_ccosh(npy_cdouble z);
+npy_cdouble npy_csinh(npy_cdouble z);
+npy_cdouble npy_ctanh(npy_cdouble z);
+
+npy_cdouble npy_cacos(npy_cdouble z);
+npy_cdouble npy_casin(npy_cdouble z);
+npy_cdouble npy_catan(npy_cdouble z);
+
+npy_cdouble npy_cacosh(npy_cdouble z);
+npy_cdouble npy_casinh(npy_cdouble z);
+npy_cdouble npy_catanh(npy_cdouble z);
/*
* Single precision complex functions
@@ -440,6 +453,20 @@ npy_cfloat npy_csqrtf(npy_cfloat z);
npy_cfloat npy_ccosf(npy_cfloat z);
npy_cfloat npy_csinf(npy_cfloat z);
+npy_cfloat npy_ctanf(npy_cfloat z);
+
+npy_cfloat npy_ccoshf(npy_cfloat z);
+npy_cfloat npy_csinhf(npy_cfloat z);
+npy_cfloat npy_ctanhf(npy_cfloat z);
+
+npy_cfloat npy_cacosf(npy_cfloat z);
+npy_cfloat npy_casinf(npy_cfloat z);
+npy_cfloat npy_catanf(npy_cfloat z);
+
+npy_cfloat npy_cacoshf(npy_cfloat z);
+npy_cfloat npy_casinhf(npy_cfloat z);
+npy_cfloat npy_catanhf(npy_cfloat z);
+
/*
* Extended precision complex functions
@@ -455,6 +482,20 @@ npy_clongdouble npy_csqrtl(npy_clongdouble z);
npy_clongdouble npy_ccosl(npy_clongdouble z);
npy_clongdouble npy_csinl(npy_clongdouble z);
+npy_clongdouble npy_ctanl(npy_clongdouble z);
+
+npy_clongdouble npy_ccoshl(npy_clongdouble z);
+npy_clongdouble npy_csinhl(npy_clongdouble z);
+npy_clongdouble npy_ctanhl(npy_clongdouble z);
+
+npy_clongdouble npy_cacosl(npy_clongdouble z);
+npy_clongdouble npy_casinl(npy_clongdouble z);
+npy_clongdouble npy_catanl(npy_clongdouble z);
+
+npy_clongdouble npy_cacoshl(npy_clongdouble z);
+npy_clongdouble npy_casinhl(npy_clongdouble z);
+npy_clongdouble npy_catanhl(npy_clongdouble z);
+
/*
* Functions that set the floating point error
diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py
index a464562c4..7a0fa4b62 100644
--- a/numpy/core/numeric.py
+++ b/numpy/core/numeric.py
@@ -1024,9 +1024,8 @@ def outer(a, b, out=None):
Second input vector. Input is flattened if
not already 1-dimensional.
out : (M, N) ndarray, optional
- A location where the result is stored
-
.. versionadded:: 1.9.0
+ A location where the result is stored
Returns
-------
@@ -2206,42 +2205,7 @@ def identity(n, dtype=None):
from numpy import eye
return eye(n, dtype=dtype)
-def _allclose_points(a, b, rtol=1.e-5, atol=1.e-8):
- """
- This is the point-wise inner calculation of 'allclose', which is subtly
- different from 'isclose'. Provided as a comparison routine for use in
- testing.assert_allclose.
- See those routines for further details.
-
- """
- x = array(a, copy=False, ndmin=1)
- y = array(b, copy=False, ndmin=1)
-
- # make sure y is an inexact type to avoid abs(MIN_INT); will cause
- # casting of x later.
- dtype = multiarray.result_type(y, 1.)
- y = array(y, dtype=dtype, copy=False)
-
- xinf = isinf(x)
- yinf = isinf(y)
- if any(xinf) or any(yinf):
- # Check that x and y have inf's only in the same positions
- if not all(xinf == yinf):
- return False
- # Check that sign of inf's in x and y is the same
- if not all(x[xinf] == y[xinf]):
- return False
-
- x = x[~xinf]
- y = y[~xinf]
-
- # ignore invalid fpe's
- with errstate(invalid='ignore'):
- r = less_equal(abs(x - y), atol + rtol * abs(y))
-
- return r
-
-def allclose(a, b, rtol=1.e-5, atol=1.e-8):
+def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns True if two arrays are element-wise equal within a tolerance.
@@ -2262,6 +2226,10 @@ def allclose(a, b, rtol=1.e-5, atol=1.e-8):
The relative tolerance parameter (see Notes).
atol : float
The absolute tolerance parameter (see Notes).
+ equal_nan : bool
+ .. versionadded:: 1.10.0
+ Whether to compare NaN's as equal. If True, NaN's in `a` will be
+ considered equal to NaN's in `b` in the output array.
Returns
-------
@@ -2294,9 +2262,11 @@ def allclose(a, b, rtol=1.e-5, atol=1.e-8):
False
>>> np.allclose([1.0, np.nan], [1.0, np.nan])
False
+ >>> np.allclose([1.0, np.nan], [1.0, np.nan], equal_nan=True)
+ True
"""
- return all(_allclose_points(a, b, rtol=rtol, atol=atol))
+ return all(isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan))
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
@@ -2366,6 +2336,13 @@ def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
x = array(a, copy=False, subok=True, ndmin=1)
y = array(b, copy=False, subok=True, ndmin=1)
+
+ # Make sure y is an inexact type to avoid bad behavior on abs(MIN_INT).
+ # This will cause casting of x later. Also, make sure to allow subclasses
+ # (e.g., for numpy.ma).
+ dt = multiarray.result_type(y, 1.)
+ y = array(y, dtype=dt, copy=False, subok=True)
+
xfin = isfinite(x)
yfin = isfinite(y)
if all(xfin) and all(yfin):
diff --git a/numpy/core/records.py b/numpy/core/records.py
index 23680711f..243645436 100644
--- a/numpy/core/records.py
+++ b/numpy/core/records.py
@@ -40,7 +40,6 @@ import sys
import os
from . import numeric as sb
-from .defchararray import chararray
from . import numerictypes as nt
from numpy.compat import isfileobj, bytes, long
@@ -215,6 +214,12 @@ class format_parser:
class record(nt.void):
"""A data-type scalar that allows field access as attribute lookup.
"""
+
+ # manually set name and module so that this class's type shows up
+ # as numpy.record when printed
+ __name__ = 'record'
+ __module__ = 'numpy'
+
def __repr__(self):
return self.__str__()
@@ -232,17 +237,15 @@ class record(nt.void):
res = fielddict.get(attr, None)
if res:
obj = self.getfield(*res[:2])
- # if it has fields return a recarray,
- # if it's a string ('SU') return a chararray
+ # if it has fields return a record,
# otherwise return the object
try:
dt = obj.dtype
except AttributeError:
+ #happens if field is Object type
return obj
if dt.fields:
- return obj.view(obj.__class__)
- if dt.char in 'SU':
- return obj.view(chararray)
+ return obj.view((record, obj.dtype.descr))
return obj
else:
raise AttributeError("'record' object has no "
@@ -388,6 +391,12 @@ class recarray(ndarray):
dtype=[('x', '<i4'), ('y', '<f8'), ('z', '<i4')])
"""
+
+ # manually set name and module so that this class's type shows
+ # up as "numpy.recarray" when printed
+ __name__ = 'recarray'
+ __module__ = 'numpy'
+
def __new__(subtype, shape, dtype=None, buf=None, offset=0, strides=None,
formats=None, names=None, titles=None,
byteorder=None, aligned=False, order='C'):
@@ -406,29 +415,37 @@ class recarray(ndarray):
return self
def __getattribute__(self, attr):
+ # See if ndarray has this attr, and return it if so. (note that this
+ # means a field with the same name as an ndarray attr cannot be
+ # accessed by attribute).
try:
return object.__getattribute__(self, attr)
except AttributeError: # attr must be a fieldname
pass
+
+ # look for a field with this name
fielddict = ndarray.__getattribute__(self, 'dtype').fields
try:
res = fielddict[attr][:2]
except (TypeError, KeyError):
- raise AttributeError("record array has no attribute %s" % attr)
+ raise AttributeError("recarray has no attribute %s" % attr)
obj = self.getfield(*res)
- # if it has fields return a recarray, otherwise return
- # normal array
- if obj.dtype.fields:
- return obj
- if obj.dtype.char in 'SU':
- return obj.view(chararray)
- return obj.view(ndarray)
-# Save the dictionary
-# If the attr is a field name and not in the saved dictionary
-# Undo any "setting" of the attribute and do a setfield
-# Thus, you can't create attributes on-the-fly that are field names.
+ # At this point obj will always be a recarray, since (see
+ # PyArray_GetField) the type of obj is inherited. Next, if obj.dtype is
+ # non-structured, convert it to an ndarray. If obj is structured leave
+ # it as a recarray, but make sure to convert to the same dtype.type (eg
+ # to preserve numpy.record type if present), since nested structured
+ # fields do not inherit type.
+ if obj.dtype.fields:
+ return obj.view(dtype=(self.dtype.type, obj.dtype.descr))
+ else:
+ return obj.view(ndarray)
+ # Save the dictionary.
+ # If the attr is a field name and not in the saved dictionary
+ # Undo any "setting" of the attribute and do a setfield
+ # Thus, you can't create attributes on-the-fly that are field names.
def __setattr__(self, attr, val):
newattr = attr not in self.__dict__
try:
@@ -456,13 +473,40 @@ class recarray(ndarray):
def __getitem__(self, indx):
obj = ndarray.__getitem__(self, indx)
- if (isinstance(obj, ndarray) and obj.dtype.isbuiltin):
- return obj.view(ndarray)
- return obj
- def __repr__(self) :
- ret = ndarray.__repr__(self)
- return ret.replace("recarray", "rec.array", 1)
+ # copy behavior of getattr, except that here
+ # we might also be returning a single element
+ if isinstance(obj, ndarray):
+ if obj.dtype.fields:
+ return obj.view(dtype=(self.dtype.type, obj.dtype.descr))
+ else:
+ return obj.view(type=ndarray)
+ else:
+ # return a single element
+ return obj
+
+ def __repr__(self):
+ # get data/shape string. logic taken from numeric.array_repr
+ if self.size > 0 or self.shape==(0,):
+ lst = sb.array2string(self, separator=', ')
+ else:
+ # show zero-length shape unless it is (0,)
+ lst = "[], shape=%s" % (repr(self.shape),)
+
+ if self.dtype.type is record:
+ # If this is a full record array (has numpy.record dtype),
+ # represent it using the rec.array function. Since rec.array
+ # converts dtype to a numpy.record for us, use only dtype.descr,
+ # not repr(dtype).
+ lf = '\n'+' '*len("rec.array(")
+ return ('rec.array(%s, %sdtype=%s)' %
+ (lst, lf, repr(self.dtype.descr)))
+ else:
+ # otherwise represent it using np.array plus a view
+ # (There is currently (v1.10) no other easy way to create it)
+ lf = '\n'+' '*len("array(")
+ return ('array(%s, %sdtype=%s).view(numpy.recarray)' %
+ (lst, lf, str(self.dtype)))
def field(self, attr, val=None):
if isinstance(attr, int):
@@ -477,8 +521,6 @@ class recarray(ndarray):
obj = self.getfield(*res)
if obj.dtype.fields:
return obj
- if obj.dtype.char in 'SU':
- return obj.view(chararray)
return obj.view(ndarray)
else:
return self.setfield(val, *res)
@@ -589,7 +631,7 @@ def fromrecords(recList, dtype=None, shape=None, formats=None, names=None,
>>> r.col1
array([456, 2])
>>> r.col2
- chararray(['dbe', 'de'],
+ array(['dbe', 'de'],
dtype='|S3')
>>> import pickle
>>> print pickle.loads(pickle.dumps(r))
diff --git a/numpy/core/setup.py b/numpy/core/setup.py
index 75d64d81b..7f0649158 100644
--- a/numpy/core/setup.py
+++ b/numpy/core/setup.py
@@ -630,15 +630,9 @@ def configuration(parent_package='',top_path=None):
deps = [join('src', 'npymath', '_signbit.c'),
join('include', 'numpy', '*object.h'),
- 'include/numpy/fenv/fenv.c',
- 'include/numpy/fenv/fenv.h',
join(codegen_dir, 'genapi.py'),
]
- # Don't install fenv unless we need them.
- if sys.platform == 'cygwin':
- config.add_data_dir('include/numpy/fenv')
-
#######################################################################
# dummy module #
#######################################################################
diff --git a/numpy/core/setup_common.py b/numpy/core/setup_common.py
index 0b18bc6c6..6abbe5ff2 100644
--- a/numpy/core/setup_common.py
+++ b/numpy/core/setup_common.py
@@ -107,6 +107,7 @@ OPTIONAL_HEADERS = [
# sse headers only enabled automatically on amd64/x32 builds
"xmmintrin.h", # SSE
"emmintrin.h", # SSE2
+ "features.h", # for glibc version linux
]
# optional gcc compiler builtins and their call arguments and optional a
@@ -138,23 +139,29 @@ OPTIONAL_FUNCTION_ATTRIBUTES = [('__attribute__((optimize("unroll-loops")))',
OPTIONAL_VARIABLE_ATTRIBUTES = ["__thread", "__declspec(thread)"]
# Subset of OPTIONAL_STDFUNCS which may alreay have HAVE_* defined by Python.h
-OPTIONAL_STDFUNCS_MAYBE = ["expm1", "log1p", "acosh", "atanh", "asinh", "hypot",
- "copysign", "ftello", "fseeko"]
+OPTIONAL_STDFUNCS_MAYBE = [
+ "expm1", "log1p", "acosh", "atanh", "asinh", "hypot", "copysign",
+ "ftello", "fseeko"
+ ]
# C99 functions: float and long double versions
-C99_FUNCS = ["sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", "floor",
- "ceil", "rint", "trunc", "sqrt", "log10", "log", "log1p", "exp",
- "expm1", "asin", "acos", "atan", "asinh", "acosh", "atanh",
- "hypot", "atan2", "pow", "fmod", "modf", 'frexp', 'ldexp',
- "exp2", "log2", "copysign", "nextafter", "cbrt"]
-
+C99_FUNCS = [
+ "sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", "floor", "ceil",
+ "rint", "trunc", "sqrt", "log10", "log", "log1p", "exp", "expm1",
+ "asin", "acos", "atan", "asinh", "acosh", "atanh", "hypot", "atan2",
+ "pow", "fmod", "modf", 'frexp', 'ldexp', "exp2", "log2", "copysign",
+ "nextafter", "cbrt"
+ ]
C99_FUNCS_SINGLE = [f + 'f' for f in C99_FUNCS]
C99_FUNCS_EXTENDED = [f + 'l' for f in C99_FUNCS]
-
-C99_COMPLEX_TYPES = ['complex double', 'complex float', 'complex long double']
-
-C99_COMPLEX_FUNCS = ['creal', 'cimag', 'cabs', 'carg', 'cexp', 'csqrt', 'clog',
- 'ccos', 'csin', 'cpow']
+C99_COMPLEX_TYPES = [
+ 'complex double', 'complex float', 'complex long double'
+ ]
+C99_COMPLEX_FUNCS = [
+ "cabs", "cacos", "cacosh", "carg", "casin", "casinh", "catan",
+ "catanh", "ccos", "ccosh", "cexp", "cimag", "clog", "conj", "cpow",
+ "cproj", "creal", "csin", "csinh", "csqrt", "ctan", "ctanh"
+ ]
def fname2def(name):
return "HAVE_%s" % name.upper()
diff --git a/numpy/core/src/multiarray/cblasfuncs.c b/numpy/core/src/multiarray/cblasfuncs.c
index f8f508ed0..9c7c26725 100644
--- a/numpy/core/src/multiarray/cblasfuncs.c
+++ b/numpy/core/src/multiarray/cblasfuncs.c
@@ -685,7 +685,7 @@ cblas_innerproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2)
int j, l, lda, ldb;
int nd;
double prior1, prior2;
- PyArrayObject *ret;
+ PyArrayObject *ret = NULL;
npy_intp dimensions[NPY_MAXDIMS];
PyTypeObject *subtype;
diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c
index 9cf66020d..0993190b7 100644
--- a/numpy/core/src/multiarray/descriptor.c
+++ b/numpy/core/src/multiarray/descriptor.c
@@ -3159,6 +3159,8 @@ arraydescr_struct_dict_str(PyArray_Descr *dtype, int includealignedflag)
static PyObject *
arraydescr_struct_str(PyArray_Descr *dtype, int includealignflag)
{
+ PyObject *sub;
+
/*
* The list str representation can't include the 'align=' flag,
* so if it is requested and the struct has the aligned flag set,
@@ -3166,10 +3168,52 @@ arraydescr_struct_str(PyArray_Descr *dtype, int includealignflag)
*/
if (!(includealignflag && (dtype->flags&NPY_ALIGNED_STRUCT)) &&
is_dtype_struct_simple_unaligned_layout(dtype)) {
- return arraydescr_struct_list_str(dtype);
+ sub = arraydescr_struct_list_str(dtype);
+ }
+ else {
+ sub = arraydescr_struct_dict_str(dtype, includealignflag);
+ }
+
+ /* If the data type has a non-void (subclassed) type, show it */
+ if (dtype->type_num == NPY_VOID && dtype->typeobj != &PyVoidArrType_Type) {
+ /*
+ * Note: We cannot get the type name from dtype->typeobj->tp_name
+ * because its value depends on whether the type is dynamically or
+ * statically allocated. Instead use __name__ and __module__.
+ * See https://docs.python.org/2/c-api/typeobj.html.
+ */
+
+ PyObject *str_name, *namestr, *str_module, *modulestr, *ret;
+
+ str_name = PyUString_FromString("__name__");
+ namestr = PyObject_GetAttr((PyObject*)(dtype->typeobj), str_name);
+ Py_DECREF(str_name);
+
+ if (namestr == NULL) {
+ /* this should never happen since types always have __name__ */
+ PyErr_Format(PyExc_RuntimeError,
+ "dtype does not have a __name__ attribute");
+ return NULL;
+ }
+
+ str_module = PyUString_FromString("__module__");
+ modulestr = PyObject_GetAttr((PyObject*)(dtype->typeobj), str_module);
+ Py_DECREF(str_module);
+
+ ret = PyUString_FromString("(");
+ if (modulestr != NULL) {
+ /* Note: if modulestr == NULL, the type is unpicklable */
+ PyUString_ConcatAndDel(&ret, modulestr);
+ PyUString_ConcatAndDel(&ret, PyUString_FromString("."));
+ }
+ PyUString_ConcatAndDel(&ret, namestr);
+ PyUString_ConcatAndDel(&ret, PyUString_FromString(", "));
+ PyUString_ConcatAndDel(&ret, sub);
+ PyUString_ConcatAndDel(&ret, PyUString_FromString(")"));
+ return ret;
}
else {
- return arraydescr_struct_dict_str(dtype, includealignflag);
+ return sub;
}
}
diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src
index 4099326f1..252e5b726 100644
--- a/numpy/core/src/multiarray/scalartypes.c.src
+++ b/numpy/core/src/multiarray/scalartypes.c.src
@@ -114,12 +114,17 @@ gentype_alloc(PyTypeObject *type, Py_ssize_t nitems)
const size_t size = _PyObject_VAR_SIZE(type, nitems + 1);
obj = (PyObject *)PyObject_Malloc(size);
+ /*
+ * Fixme. Need to check for no memory.
+ * If we don't need to zero memory, we could use
+ * PyObject_{New, NewVar} for this whole function.
+ */
memset(obj, 0, size);
if (type->tp_itemsize == 0) {
- PyObject_INIT(obj, type);
+ PyObject_Init(obj, type);
}
else {
- (void) PyObject_INIT_VAR((PyVarObject *)obj, type, nitems);
+ (void) PyObject_InitVar((PyVarObject *)obj, type, nitems);
}
return obj;
}
diff --git a/numpy/core/src/multiarray/shape.c b/numpy/core/src/multiarray/shape.c
index f1e81ff6b..03bfc6a7a 100644
--- a/numpy/core/src/multiarray/shape.c
+++ b/numpy/core/src/multiarray/shape.c
@@ -948,7 +948,7 @@ PyArray_Ravel(PyArrayObject *arr, NPY_ORDER order)
/* For KEEPORDER, check if we can make a flattened view */
else {
npy_stride_sort_item strideperm[NPY_MAXDIMS];
- npy_intp stride, base_stride = NPY_MIN_INTP;
+ npy_intp stride = 0, base_stride = NPY_MIN_INTP;
int i, ndim = PyArray_NDIM(arr);
PyArray_CreateSortedStridePerm(PyArray_NDIM(arr),
diff --git a/numpy/core/src/npymath/ieee754.c.src b/numpy/core/src/npymath/ieee754.c.src
index b8f6889e1..0370ea6c7 100644
--- a/numpy/core/src/npymath/ieee754.c.src
+++ b/numpy/core/src/npymath/ieee754.c.src
@@ -615,13 +615,7 @@ void npy_set_floatstatus_invalid(void)
#elif defined(__GLIBC__) || defined(__APPLE__) || \
defined(__CYGWIN__) || defined(__MINGW32__) || \
(defined(__FreeBSD__) && (__FreeBSD_version >= 502114))
-
-# if defined(__GLIBC__) || defined(__APPLE__) || \
- defined(__MINGW32__) || defined(__FreeBSD__)
# include <fenv.h>
-# elif defined(__CYGWIN__)
-# include "numpy/fenv/fenv.h"
-# endif
int npy_get_floatstatus(void)
{
diff --git a/numpy/core/src/npymath/npy_math_complex.c.src b/numpy/core/src/npymath/npy_math_complex.c.src
index 9cbfd32ae..94cb7e29d 100644
--- a/numpy/core/src/npymath/npy_math_complex.c.src
+++ b/numpy/core/src/npymath/npy_math_complex.c.src
@@ -1,10 +1,13 @@
/*
+ * vim: syntax=c
+ *
* Implement some C99-compatible complex math functions
*
- * Most of the code is taken from the msun library in FreeBSD (HEAD @ 30th June
- * 2009), under the following license:
+ * Most of the code is taken from the msun library in FreeBSD (HEAD @ 4th
+ * October 2013), under the following license:
*
- * Copyright (c) 2007 David Schultz <das@FreeBSD.ORG>
+ * Copyright (c) 2007, 2011 David Schultz <das@FreeBSD.ORG>
+ * Copyright (c) 2012 Stephen Montgomery-Smith <stephen@FreeBSD.ORG>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -31,9 +34,12 @@
#include "npy_math_common.h"
#include "npy_math_private.h"
-/*==========================================================
- * Custom implementation of missing complex C99 functions
- *=========================================================*/
+
+#define raise_inexact() do { volatile npy_float junk = 1 + tiny; } while(0)
+
+
+static npy_float tiny = 3.9443045e-31f;
+
/**begin repeat
* #type = npy_float, npy_double, npy_longdouble#
@@ -41,23 +47,171 @@
* #c = f, , l#
* #C = F, , L#
* #TMAX = FLT_MAX, DBL_MAX, LDBL_MAX#
+ * #TMIN = FLT_MIN, DBL_MIN, LDBL_MIN#
+ * #TMANT_DIG = FLT_MANT_DIG, DBL_MANT_DIG, LDBL_MANT_DIG#
+ * #TEPS = FLT_EPSILON, DBL_EPSILON, LDBL_EPSILON#
+ * #precision = 1, 2, 3#
*/
+
+/*==========================================================
+ * Constants
+ *=========================================================*/
+static const @ctype@ c_1@c@ = {1.0@C@, 0.0};
+static const @ctype@ c_half@c@ = {0.5@C@, 0.0};
+static const @ctype@ c_i@c@ = {0.0, 1.0@C@};
+static const @ctype@ c_ihalf@c@ = {0.0, 0.5@C@};
+
+/*==========================================================
+ * Helper functions
+ *
+ * These are necessary because we do not count on using a
+ * C99 compiler.
+ *=========================================================*/
+static NPY_INLINE
+@ctype@
+cadd@c@(@ctype@ a, @ctype@ b)
+{
+ return npy_cpack@c@(npy_creal@c@(a) + npy_creal@c@(b),
+ npy_cimag@c@(a) + npy_cimag@c@(b));
+}
+
+static NPY_INLINE
+@ctype@
+csub@c@(@ctype@ a, @ctype@ b)
+{
+ return npy_cpack@c@(npy_creal@c@(a) - npy_creal@c@(b),
+ npy_cimag@c@(a) - npy_cimag@c@(b));
+}
+
+static NPY_INLINE
+@ctype@
+cmul@c@(@ctype@ a, @ctype@ b)
+{
+ @type@ ar, ai, br, bi;
+ ar = npy_creal@c@(a);
+ ai = npy_cimag@c@(a);
+ br = npy_creal@c@(b);
+ bi = npy_cimag@c@(b);
+ return npy_cpack@c@(ar*br - ai*bi, ar*bi + ai*br);
+}
+
+static NPY_INLINE
+@ctype@
+cdiv@c@(@ctype@ a, @ctype@ b)
+{
+ @type@ ar, ai, br, bi, abs_br, abs_bi;
+ ar = npy_creal@c@(a);
+ ai = npy_cimag@c@(a);
+ br = npy_creal@c@(b);
+ bi = npy_cimag@c@(b);
+ abs_br = npy_fabs@c@(br);
+ abs_bi = npy_fabs@c@(bi);
+
+ if (abs_br >= abs_bi) {
+ if (abs_br == 0 && abs_bi == 0) {
+ /* divide by zeros should yield a complex inf or nan */
+ return npy_cpack@c@(ar/abs_br, ai/abs_bi);
+ }
+ else {
+ @type@ rat = bi/br;
+ @type@ scl = 1.0@C@/(br+bi*rat);
+ return npy_cpack@c@((ar + ai*rat)*scl, (ai - ar*rat)*scl);
+ }
+ }
+ else {
+ @type@ rat = br/bi;
+ @type@ scl = 1.0@C@/(bi + br*rat);
+ return npy_cpack@c@((ar*rat + ai)*scl, (ai*rat - ar)*scl);
+ }
+}
+
+static NPY_INLINE
+@ctype@
+cneg@c@(@ctype@ a)
+{
+ return npy_cpack@c@(-npy_creal@c@(a), -npy_cimag@c@(a));
+}
+
+static NPY_INLINE
+@ctype@
+cmuli@c@(@ctype@ a)
+{
+ return npy_cpack@c@(-npy_cimag@c@(a), npy_creal@c@(a));
+}
+
+/*==========================================================
+ * Custom implementation of missing complex C99 functions
+ *=========================================================*/
+
#ifndef HAVE_CABS@C@
-@type@ npy_cabs@c@(@ctype@ z)
+@type@
+npy_cabs@c@(@ctype@ z)
{
return npy_hypot@c@(npy_creal@c@(z), npy_cimag@c@(z));
}
#endif
#ifndef HAVE_CARG@C@
-@type@ npy_carg@c@(@ctype@ z)
+@type@
+npy_carg@c@(@ctype@ z)
{
return npy_atan2@c@(npy_cimag@c@(z), npy_creal@c@(z));
}
#endif
+/*
+ * cexp and (ccos, csin)h functions need to calculate exp scaled by another
+ * number. This can be difficult if exp(x) overflows. By doing this way, we
+ * don't risk overflowing exp. This likely raises floating-point exceptions,
+ * if we decide that we care.
+ *
+ * This is only useful over a limited range, (see below) an expects that the
+ * input values are in this range.
+ *
+ * This is based on the technique used in FreeBSD's __frexp_exp and
+ * __ldexp_(c)exp functions by David Schultz.
+ *
+ * SCALED_CEXP_LOWER = log(FLT_MAX)
+ * SCALED_CEXP_UPPER = log(2) + log(FLT_MAX) - log(FLT_TRUE_MIN),
+ * where FLT_TRUE_MIN is the smallest possible subnormal number.
+ */
+
+#define SCALED_CEXP_LOWERF 88.722839f
+#define SCALED_CEXP_UPPERF 192.69492f
+#define SCALED_CEXP_LOWER 710.47586007394386
+#define SCALED_CEXP_UPPER 1454.9159319953251
+#define SCALED_CEXP_LOWERL 11357.216553474703895L
+#define SCALED_CEXP_UPPERL 22756.021937783004509L
+
+static
+@ctype@
+_npy_scaled_cexp@c@(@type@ x, @type@ y, npy_int expt)
+{
+#if @precision@ == 1
+ const npy_int k = 235;
+#endif
+#if @precision@ == 2
+ const npy_int k = 1799;
+#endif
+#if @precision@ == 3
+ const npy_int k = 19547;
+#endif
+ const @type@ kln2 = k * NPY_LOGE2@c@;
+ @type@ mant, mantcos, mantsin;
+ npy_int ex, excos, exsin;
+
+ mant = npy_frexp@c@(npy_exp@c@(x - kln2), &ex);
+ mantcos = npy_frexp@c@(npy_cos@c@(y), &excos);
+ mantsin = npy_frexp@c@(npy_sin@c@(y), &exsin);
+
+ expt += ex + k;
+ return npy_cpack@c@( npy_ldexp@c@(mant * mantcos, expt + excos),
+ npy_ldexp@c@(mant * mantsin, expt + exsin));
+}
+
#ifndef HAVE_CEXP@C@
-@ctype@ npy_cexp@c@(@ctype@ z)
+@ctype@
+npy_cexp@c@(@ctype@ z)
{
@type@ x, c, s;
@type@ r, i;
@@ -67,46 +221,60 @@
i = npy_cimag@c@(z);
if (npy_isfinite(r)) {
- x = npy_exp@c@(r);
+ if (r >= SCALED_CEXP_LOWER@C@ && r <= SCALED_CEXP_UPPER@C@) {
+ ret = _npy_scaled_cexp@c@(r, i, 0);
+ }
+ else {
+ x = npy_exp@c@(r);
- c = npy_cos@c@(i);
- s = npy_sin@c@(i);
+ c = npy_cos@c@(i);
+ s = npy_sin@c@(i);
- if (npy_isfinite(i)) {
- ret = npy_cpack@c@(x * c, x * s);
- } else {
- ret = npy_cpack@c@(NPY_NAN, npy_copysign@c@(NPY_NAN, i));
+ if (npy_isfinite(i)) {
+ ret = npy_cpack@c@(x * c, x * s);
+ }
+ else {
+ ret = npy_cpack@c@(NPY_NAN@C@, npy_copysign@c@(NPY_NAN@C@, i));
+ }
}
- } else if (npy_isnan(r)) {
+ }
+ else if (npy_isnan(r)) {
/* r is nan */
if (i == 0) {
- ret = npy_cpack@c@(r, 0);
- } else {
- ret = npy_cpack@c@(r, npy_copysign@c@(NPY_NAN, i));
+ ret = z;
+ }
+ else {
+ ret = npy_cpack@c@(r, npy_copysign@c@(NPY_NAN@C@, i));
}
- } else {
+ }
+ else {
/* r is +- inf */
if (r > 0) {
if (i == 0) {
ret = npy_cpack@c@(r, i);
- } else if (npy_isfinite(i)) {
+ }
+ else if (npy_isfinite(i)) {
c = npy_cos@c@(i);
s = npy_sin@c@(i);
ret = npy_cpack@c@(r * c, r * s);
- } else {
+ }
+ else {
/* x = +inf, y = +-inf | nan */
- ret = npy_cpack@c@(r, NPY_NAN);
+ npy_set_floatstatus_invalid();
+ ret = npy_cpack@c@(r, NPY_NAN@C@);
}
- } else {
+ }
+ else {
if (npy_isfinite(i)) {
x = npy_exp@c@(r);
c = npy_cos@c@(i);
s = npy_sin@c@(i);
ret = npy_cpack@c@(x * c, x * s);
- } else {
+ }
+ else {
/* x = -inf, y = nan | +i inf */
ret = npy_cpack@c@(0, 0);
}
@@ -118,9 +286,72 @@
#endif
#ifndef HAVE_CLOG@C@
-@ctype@ npy_clog@c@(@ctype@ z)
+/* algorithm from cpython, rev. d86f5686cef9
+ *
+ * The usual formula for the real part is log(hypot(z.real, z.imag)).
+ * There are four situations where this formula is potentially
+ * problematic:
+ *
+ * (1) the absolute value of z is subnormal. Then hypot is subnormal,
+ * so has fewer than the usual number of bits of accuracy, hence may
+ * have large relative error. This then gives a large absolute error
+ * in the log. This can be solved by rescaling z by a suitable power
+ * of 2.
+ *
+ * (2) the absolute value of z is greater than DBL_MAX (e.g. when both
+ * z.real and z.imag are within a factor of 1/sqrt(2) of DBL_MAX)
+ * Again, rescaling solves this.
+ *
+ * (3) the absolute value of z is close to 1. In this case it's
+ * difficult to achieve good accuracy, at least in part because a
+ * change of 1ulp in the real or imaginary part of z can result in a
+ * change of billions of ulps in the correctly rounded answer.
+ *
+ * (4) z = 0. The simplest thing to do here is to call the
+ * floating-point log with an argument of 0, and let its behaviour
+ * (returning -infinity, signaling a floating-point exception, setting
+ * errno, or whatever) determine that of c_log. So the usual formula
+ * is fine here.
+*/
+@ctype@
+npy_clog@c@(@ctype@ z)
{
- return npy_cpack@c@(npy_log@c@ (npy_cabs@c@ (z)), npy_carg@c@ (z));
+ @type@ ax = npy_fabs@c@(npy_creal@c@(z));
+ @type@ ay = npy_fabs@c@(npy_cimag@c@(z));
+ @type@ rr, ri;
+
+ if (ax > @TMAX@/4 || ay > @TMAX@/4) {
+ rr = npy_log@c@(npy_hypot@c@(ax/2, ay/2)) + NPY_LOGE2@c@;
+ }
+ else if (ax < @TMIN@ && ay < @TMIN@) {
+ if (ax > 0 || ay > 0) {
+ /* catch cases where hypot(ax, ay) is subnormal */
+ rr = npy_log@c@(npy_hypot@c@(npy_ldexp@c@(ax, @TMANT_DIG@),
+ npy_ldexp@c@(ay, @TMANT_DIG@))) - @TMANT_DIG@*NPY_LOGE2@c@;
+ }
+ else {
+ /* log(+/-0 +/- 0i) */
+ /* raise divide-by-zero floating point exception */
+ rr = -1.0@c@ / npy_creal@c@(z);
+ rr = npy_copysign@c@(rr, -1);
+ ri = npy_carg@c@(z);
+ return npy_cpack@c@(rr, ri);
+ }
+ }
+ else {
+ @type@ h = npy_hypot@c@(ax, ay);
+ if (0.71 <= h && h <= 1.73) {
+ @type@ am = ax > ay ? ax : ay; /* max(ax, ay) */
+ @type@ an = ax > ay ? ay : ax; /* min(ax, ay) */
+ rr = npy_log1p@c@((am-1)*(am+1)+an*an)/2;
+ }
+ else {
+ rr = npy_log@c@(h);
+ }
+ }
+ ri = npy_carg@c@(z);
+
+ return npy_cpack@c@(rr, ri);
}
#endif
@@ -129,7 +360,8 @@
/* We risk spurious overflow for components >= DBL_MAX / (1 + sqrt(2)). */
#define THRESH (@TMAX@ / (1 + NPY_SQRT2@c@))
-@ctype@ npy_csqrt@c@(@ctype@ z)
+@ctype@
+npy_csqrt@c@(@ctype@ z)
{
@ctype@ result;
@type@ a, b;
@@ -140,10 +372,12 @@
b = npy_cimag@c@(z);
/* Handle special cases. */
- if (a == 0 && b == 0)
+ if (a == 0 && b == 0) {
return (npy_cpack@c@(0, b));
- if (npy_isinf(b))
- return (npy_cpack@c@(NPY_INFINITY, b));
+ }
+ if (npy_isinf(b)) {
+ return (npy_cpack@c@(NPY_INFINITY@C@, b));
+ }
if (npy_isnan(a)) {
t = (b - b) / (b - b); /* raise invalid if b is not a NaN */
return (npy_cpack@c@(a, t)); /* return NaN + NaN i */
@@ -155,10 +389,12 @@
* csqrt(-inf + NaN i) = NaN +- inf i
* csqrt(-inf + y i) = 0 + inf i
*/
- if (npy_signbit(a))
+ if (npy_signbit(a)) {
return (npy_cpack@c@(npy_fabs@c@(b - b), npy_copysign@c@(a, b)));
- else
+ }
+ else {
return (npy_cpack@c@(a, npy_copysign@c@(b - b, b)));
+ }
}
/*
* The remaining special case (b is NaN) is handled just fine by
@@ -170,61 +406,1333 @@
a *= 0.25;
b *= 0.25;
scale = 1;
- } else {
+ }
+ else {
scale = 0;
}
/* Algorithm 312, CACM vol 10, Oct 1967. */
if (a >= 0) {
- t = npy_sqrt@c@((a + npy_hypot@c@(a, b)) * 0.5);
+ t = npy_sqrt@c@((a + npy_hypot@c@(a, b)) * 0.5@c@);
result = npy_cpack@c@(t, b / (2 * t));
- } else {
- t = npy_sqrt@c@((-a + npy_hypot@c@(a, b)) * 0.5);
+ }
+ else {
+ t = npy_sqrt@c@((-a + npy_hypot@c@(a, b)) * 0.5@c@);
result = npy_cpack@c@(npy_fabs@c@(b) / (2 * t), npy_copysign@c@(t, b));
}
/* Rescale. */
- if (scale)
+ if (scale) {
return (npy_cpack@c@(npy_creal@c@(result) * 2, npy_cimag@c@(result)));
- else
+ }
+ else {
return (result);
+ }
}
#undef THRESH
#endif
-#ifndef HAVE_CPOW@C@
-@ctype@ npy_cpow@c@ (@ctype@ x, @ctype@ y)
+/*
+ * Always use this function because of the multiplication for small
+ * integer powers, but in the body use cpow if it is available.
+ */
+
+/* private function for use in npy_pow{f, ,l} */
+#ifdef HAVE_CPOW@C@
+static @ctype@
+sys_cpow@c@(@ctype@ x, @ctype@ y)
{
- @ctype@ b;
- @type@ br, bi, yr, yi;
+ __@ctype@_to_c99_cast xcast;
+ __@ctype@_to_c99_cast ycast;
+ __@ctype@_to_c99_cast ret;
+ xcast.npy_z = x;
+ ycast.npy_z = y;
+ ret.c99_z = cpow@c@(xcast.c99_z, ycast.c99_z);
+ return ret.npy_z;
+}
+#endif
- yr = npy_creal@c@(y);
- yi = npy_cimag@c@(y);
- b = npy_clog@c@(x);
- br = npy_creal@c@(b);
- bi = npy_cimag@c@(b);
- return npy_cexp@c@(npy_cpack@c@(br * yr - bi * yi, br * yi + bi * yr));
-}
+@ctype@
+npy_cpow@c@ (@ctype@ a, @ctype@ b)
+{
+ npy_intp n;
+ @type@ ar = npy_creal@c@(a);
+ @type@ br = npy_creal@c@(b);
+ @type@ ai = npy_cimag@c@(a);
+ @type@ bi = npy_cimag@c@(b);
+ @ctype@ r;
+
+ if (br == 0. && bi == 0.) {
+ return npy_cpack@c@(1., 0.);
+ }
+ if (ar == 0. && ai == 0.) {
+ if (br > 0 && bi == 0) {
+ return npy_cpack@c@(0., 0.);
+ }
+ else {
+ volatile @type@ tmp = NPY_INFINITY@C@;
+ /*
+ * NB: there are four complex zeros; c0 = (+-0, +-0), so that
+ * unlike for reals, c0**p, with `p` negative is in general
+ * ill-defined.
+ *
+ * c0**z with z complex is also ill-defined.
+ */
+ r = npy_cpack@c@(NPY_NAN@C@, NPY_NAN@C@);
+
+ /* Raise invalid */
+ tmp -= NPY_INFINITY@C@;
+ ar = tmp;
+ return r;
+ }
+ }
+ if (bi == 0 && (n=(npy_intp)br) == br) {
+ if (n == 1) {
+ /* unroll: handle inf better */
+ return npy_cpack@c@(ar, ai);
+ }
+ else if (n == 2) {
+ /* unroll: handle inf better */
+ return cmul@c@(a, a);
+ }
+ else if (n == 3) {
+ /* unroll: handle inf better */
+ return cmul@c@(a, cmul@c@(a, a));
+ }
+ else if (n > -100 && n < 100) {
+ @ctype@ p, aa;
+ npy_intp mask = 1;
+ if (n < 0) {
+ n = -n;
+ }
+ aa = c_1@c@;
+ p = npy_cpack@c@(ar, ai);
+ while (1) {
+ if (n & mask) {
+ aa = cmul@c@(aa,p);
+ }
+ mask <<= 1;
+ if (n < mask || mask <= 0) {
+ break;
+ }
+ p = cmul@c@(p,p);
+ }
+ r = npy_cpack@c@(npy_creal@c@(aa), npy_cimag@c@(aa));
+ if (br < 0) {
+ r = cdiv@c@(c_1@c@, r);
+ }
+ return r;
+ }
+ }
+
+#ifdef HAVE_CPOW@C@
+ return sys_cpow@c@(a, b);
+
+#else
+ {
+ @ctype@ loga = npy_clog@c@(a);
+
+ ar = npy_creal@c@(loga);
+ ai = npy_cimag@c@(loga);
+ return npy_cexp@c@(npy_cpack@c@(ar*br - ai*bi, ar*bi + ai*br));
+ }
+
#endif
+}
+
#ifndef HAVE_CCOS@C@
-@ctype@ npy_ccos@c@(@ctype@ z)
+@ctype@
+npy_ccos@c@(@ctype@ z)
{
- @type@ x, y;
+ /* ccos(z) = ccosh(I * z) */
+ return npy_ccosh@c@(npy_cpack@c@(-npy_cimag@c@(z), npy_creal@c@(z)));
+}
+#endif
+
+#ifndef HAVE_CSIN@C@
+@ctype@
+npy_csin@c@(@ctype@ z)
+{
+ /* csin(z) = -I * csinh(I * z) */
+ z = npy_csinh@c@(npy_cpack@c@(-npy_cimag@c@(z), npy_creal@c@(z)));
+ return npy_cpack@c@(npy_cimag@c@(z), -npy_creal@c@(z));
+}
+#endif
+
+#ifndef HAVE_CTAN@C@
+@ctype@
+npy_ctan@c@(@ctype@ z)
+{
+ /* ctan(z) = -I * ctanh(I * z) */
+ z = npy_ctanh@c@(npy_cpack@c@(-npy_cimag@c@(z), npy_creal@c@(z)));
+ return (npy_cpack@c@(npy_cimag@c@(z), -npy_creal@c@(z)));
+}
+#endif
+
+#ifndef HAVE_CCOSH@C@
+/*
+ * Taken from the msun library in FreeBSD, rev 226599.
+ *
+ * Hyperbolic cosine of a complex argument z = x + i y.
+ *
+ * cosh(z) = cosh(x+iy)
+ * = cosh(x) cos(y) + i sinh(x) sin(y).
+ *
+ * Exceptional values are noted in the comments within the source code.
+ * These values and the return value were taken from n1124.pdf.
+ *
+ * CCOSH_BIG is chosen such that
+ * spacing(0.5 * exp(CCOSH_BIG)) > 0.5*exp(-CCOSH_BIG)
+ * although the exact value assigned to CCOSH_BIG is not so important
+ */
+@ctype@
+npy_ccosh@c@(@ctype@ z)
+{
+#if @precision@ == 1
+ const npy_float CCOSH_BIG = 9.0f;
+ const npy_float CCOSH_HUGE = 1.70141183e+38f;
+#endif
+#if @precision@ == 2
+ const npy_double CCOSH_BIG = 22.0;
+ const npy_double CCOSH_HUGE = 8.9884656743115795e+307;
+#endif
+#if @precision@ >= 3
+#if NPY_SIZEOF_LONGDOUBLE == NPY_SIZEOF_DOUBLE
+ const npy_longdouble CCOSH_BIG = 22.0L;
+ const npy_longdouble CCOSH_HUGE = 8.9884656743115795e+307L;
+#else
+ const npy_longdouble CCOSH_BIG = 24.0L;
+ const npy_longdouble CCOSH_HUGE = 5.94865747678615882543e+4931L;
+#endif
+#endif
+
+ @type@ x, y, h, absx;
+ npy_int xfinite, yfinite;
+
x = npy_creal@c@(z);
y = npy_cimag@c@(z);
- return npy_cpack@c@(npy_cos@c@(x) * npy_cosh@c@(y), -(npy_sin@c@(x) * npy_sinh@c@(y)));
+
+ xfinite = npy_isfinite(x);
+ yfinite = npy_isfinite(y);
+
+ /* Handle the nearly-non-exceptional cases where x and y are finite. */
+ if (xfinite && yfinite) {
+ if (y == 0) {
+ return npy_cpack@c@(npy_cosh@c@(x), x * y);
+ }
+ absx = npy_fabs@c@(x);
+ if (absx < CCOSH_BIG) {
+ /* small x: normal case */
+ return npy_cpack@c@(npy_cosh@c@(x) * npy_cos@c@(y),
+ npy_sinh@c@(x) * npy_sin@c@(y));
+ }
+
+ /* |x| >= 22, so cosh(x) ~= exp(|x|) */
+ if (absx < SCALED_CEXP_LOWER@C@) {
+ /* x < 710: exp(|x|) won't overflow */
+ h = npy_exp@c@(absx) * 0.5@c@;
+ return npy_cpack@c@(h * npy_cos@c@(y),
+ npy_copysign@c@(h, x) * npy_sin@c@(y));
+ }
+ else if (absx < SCALED_CEXP_UPPER@C@) {
+ /* x < 1455: scale to avoid overflow */
+ z = _npy_scaled_cexp@c@(absx, y, -1);
+ return npy_cpack@c@(npy_creal@c@(z),
+ npy_cimag@c@(z) * npy_copysign@c@(1, x));
+ }
+ else {
+ /* x >= 1455: the result always overflows */
+ h = CCOSH_HUGE * x;
+ return npy_cpack@c@(h * h * npy_cos@c@(y), h * npy_sin@c@(y));
+ }
+ }
+
+ /*
+ * cosh(+-0 +- I Inf) = dNaN + I sign(d(+-0, dNaN))0.
+ * The sign of 0 in the result is unspecified. Choice = normally
+ * the same as dNaN. Raise the invalid floating-point exception.
+ *
+ * cosh(+-0 +- I NaN) = d(NaN) + I sign(d(+-0, NaN))0.
+ * The sign of 0 in the result is unspecified. Choice = normally
+ * the same as d(NaN).
+ */
+ if (x == 0 && !yfinite) {
+ return npy_cpack@c@(y - y, npy_copysign@c@(0, x * (y - y)));
+ }
+
+ /*
+ * cosh(+-Inf +- I 0) = +Inf + I (+-)(+-)0.
+ *
+ * cosh(NaN +- I 0) = d(NaN) + I sign(d(NaN, +-0))0.
+ * The sign of 0 in the result is unspecified.
+ */
+ if (y == 0 && !xfinite) {
+ return npy_cpack@c@(x * x, npy_copysign@c@(0, x) * y);
+ }
+
+ /*
+ * cosh(x +- I Inf) = dNaN + I dNaN.
+ * Raise the invalid floating-point exception for finite nonzero x.
+ *
+ * cosh(x + I NaN) = d(NaN) + I d(NaN).
+ * Optionally raises the invalid floating-point exception for finite
+ * nonzero x. Choice = don't raise (except for signaling NaNs).
+ */
+ if (xfinite && !yfinite) {
+ return npy_cpack@c@(y - y, x * (y - y));
+ }
+
+ /*
+ * cosh(+-Inf + I NaN) = +Inf + I d(NaN).
+ *
+ * cosh(+-Inf +- I Inf) = +Inf + I dNaN.
+ * The sign of Inf in the result is unspecified. Choice = always +.
+ * Raise the invalid floating-point exception.
+ *
+ * cosh(+-Inf + I y) = +Inf cos(y) +- I Inf sin(y)
+ */
+ if (npy_isinf(x)) {
+ if (!yfinite) {
+ return npy_cpack@c@(x * x, x * (y - y));
+ }
+ return npy_cpack@c@((x * x) * npy_cos@c@(y), x * npy_sin@c@(y));
+ }
+
+ /*
+ * cosh(NaN + I NaN) = d(NaN) + I d(NaN).
+ *
+ * cosh(NaN +- I Inf) = d(NaN) + I d(NaN).
+ * Optionally raises the invalid floating-point exception.
+ * Choice = raise.
+ *
+ * cosh(NaN + I y) = d(NaN) + I d(NaN).
+ * Optionally raises the invalid floating-point exception for finite
+ * nonzero y. Choice = don't raise (except for signaling NaNs).
+ */
+ return npy_cpack@c@((x * x) * (y - y), (x + x) * (y - y));
}
+#undef CCOSH_BIG
+#undef CCOSH_HUGE
#endif
-#ifndef HAVE_CSIN@C@
-@ctype@ npy_csin@c@(@ctype@ z)
+#ifndef HAVE_CSINH@C@
+/*
+ * Taken from the msun library in FreeBSD, rev 226599.
+ *
+ * Hyperbolic sine of a complex argument z = x + i y.
+ *
+ * sinh(z) = sinh(x+iy)
+ * = sinh(x) cos(y) + i cosh(x) sin(y).
+ *
+ * Exceptional values are noted in the comments within the source code.
+ * These values and the return value were taken from n1124.pdf.
+ */
+@ctype@
+npy_csinh@c@(@ctype@ z)
+{
+#if @precision@ == 1
+ const npy_float CSINH_BIG = 9.0f;
+ const npy_float CSINH_HUGE = 1.70141183e+38f;
+#endif
+#if @precision@ == 2
+ const npy_double CSINH_BIG = 22.0;
+ const npy_double CSINH_HUGE = 8.9884656743115795e+307;
+#endif
+#if @precision@ >= 3
+#if NPY_SIZEOF_LONGDOUBLE == NPY_SIZEOF_DOUBLE
+ const npy_longdouble CSINH_BIG = 22.0L;
+ const npy_longdouble CSINH_HUGE = 8.9884656743115795e+307L;
+#else
+ const npy_longdouble CSINH_BIG = 24.0L;
+ const npy_longdouble CSINH_HUGE = 5.94865747678615882543e+4931L;
+#endif
+#endif
+
+ @type@ x, y, h, absx;
+ npy_int xfinite, yfinite;
+
+ x = npy_creal@c@(z);
+ y = npy_cimag@c@(z);
+
+ xfinite = npy_isfinite(x);
+ yfinite = npy_isfinite(y);
+
+ /* Handle the nearly-non-exceptional cases where x and y are finite. */
+ if (xfinite && yfinite) {
+ if (y == 0) {
+ return npy_cpack@c@(npy_sinh@c@(x), y);
+ }
+ absx = npy_fabs@c@(x);
+ if (absx < CSINH_BIG) {
+ /* small x: normal case */
+ return npy_cpack@c@(npy_sinh@c@(x) * npy_cos@c@(y),
+ npy_cosh@c@(x) * npy_sin@c@(y));
+ }
+
+ /* |x| >= 22, so cosh(x) ~= exp(|x|) */
+ if (absx < SCALED_CEXP_LOWER@C@) {
+ /* x < 710: exp(|x|) won't overflow */
+ h = npy_exp@c@(npy_fabs@c@(x)) * 0.5@c@;
+ return npy_cpack@c@(npy_copysign@c@(h, x) * npy_cos@c@(y),
+ h * npy_sin@c@(y));
+ }
+ else if (x < SCALED_CEXP_UPPER@C@) {
+ /* x < 1455: scale to avoid overflow */
+ z = _npy_scaled_cexp@c@(absx, y, -1);
+ return npy_cpack@c@(npy_creal@c@(z) * npy_copysign@c@(1, x),
+ npy_cimag@c@(z));
+ }
+ else {
+ /* x >= 1455: the result always overflows */
+ h = CSINH_HUGE * x;
+ return npy_cpack@c@(h * npy_cos@c@(y), h * h * npy_sin@c@(y));
+ }
+ }
+
+ /*
+ * sinh(+-0 +- I Inf) = sign(d(+-0, dNaN))0 + I dNaN.
+ * The sign of 0 in the result is unspecified. Choice = normally
+ * the same as dNaN. Raise the invalid floating-point exception.
+ *
+ * sinh(+-0 +- I NaN) = sign(d(+-0, NaN))0 + I d(NaN).
+ * The sign of 0 in the result is unspecified. Choice = normally
+ * the same as d(NaN).
+ */
+ if (x == 0 && !yfinite) {
+ return npy_cpack@c@(npy_copysign@c@(0, x * (y - y)), y - y);
+ }
+
+ /*
+ * sinh(+-Inf +- I 0) = +-Inf + I +-0.
+ *
+ * sinh(NaN +- I 0) = d(NaN) + I +-0.
+ */
+ if (y == 0 && !xfinite) {
+ if (npy_isnan(x)) {
+ return z;
+ }
+ return npy_cpack@c@(x, npy_copysign@c@(0, y));
+ }
+
+ /*
+ * sinh(x +- I Inf) = dNaN + I dNaN.
+ * Raise the invalid floating-point exception for finite nonzero x.
+ *
+ * sinh(x + I NaN) = d(NaN) + I d(NaN).
+ * Optionally raises the invalid floating-point exception for finite
+ * nonzero x. Choice = don't raise (except for signaling NaNs).
+ */
+ if (xfinite && !yfinite) {
+ return npy_cpack@c@(y - y, x * (y - y));
+ }
+
+ /*
+ * sinh(+-Inf + I NaN) = +-Inf + I d(NaN).
+ * The sign of Inf in the result is unspecified. Choice = normally
+ * the same as d(NaN).
+ *
+ * sinh(+-Inf +- I Inf) = +Inf + I dNaN.
+ * The sign of Inf in the result is unspecified. Choice = always +.
+ * Raise the invalid floating-point exception.
+ *
+ * sinh(+-Inf + I y) = +-Inf cos(y) + I Inf sin(y)
+ */
+ if (!xfinite && !npy_isnan(x)) {
+ if (!yfinite) {
+ return npy_cpack@c@(x * x, x * (y - y));
+ }
+ return npy_cpack@c@(x * npy_cos@c@(y),
+ NPY_INFINITY@C@ * npy_sin@c@(y));
+ }
+
+ /*
+ * sinh(NaN + I NaN) = d(NaN) + I d(NaN).
+ *
+ * sinh(NaN +- I Inf) = d(NaN) + I d(NaN).
+ * Optionally raises the invalid floating-point exception.
+ * Choice = raise.
+ *
+ * sinh(NaN + I y) = d(NaN) + I d(NaN).
+ * Optionally raises the invalid floating-point exception for finite
+ * nonzero y. Choice = don't raise (except for signaling NaNs).
+ */
+ return npy_cpack@c@((x * x) * (y - y), (x + x) * (y - y));
+}
+#undef CSINH_BIG
+#undef CSINH_HUGE
+#endif
+
+#ifndef HAVE_CTANH@C@
+/*
+ * Taken from the msun library in FreeBSD, rev 226600.
+ *
+ * Hyperbolic tangent of a complex argument z = x + i y.
+ *
+ * The algorithm is from:
+ *
+ * W. Kahan. Branch Cuts for Complex Elementary Functions or Much
+ * Ado About Nothing's Sign Bit. In The State of the Art in
+ * Numerical Analysis, pp. 165 ff. Iserles and Powell, eds., 1987.
+ *
+ * Method:
+ *
+ * Let t = tan(x)
+ * beta = 1/cos^2(y)
+ * s = sinh(x)
+ * rho = cosh(x)
+ *
+ * We have:
+ *
+ * tanh(z) = sinh(z) / cosh(z)
+ *
+ * sinh(x) cos(y) + i cosh(x) sin(y)
+ * = ---------------------------------
+ * cosh(x) cos(y) + i sinh(x) sin(y)
+ *
+ * cosh(x) sinh(x) / cos^2(y) + i tan(y)
+ * = -------------------------------------
+ * 1 + sinh^2(x) / cos^2(y)
+ *
+ * beta rho s + i t
+ * = ----------------
+ * 1 + beta s^2
+ *
+ * Modifications:
+ *
+ * I omitted the original algorithm's handling of overflow in tan(x) after
+ * verifying with nearpi.c that this can't happen in IEEE single or double
+ * precision. I also handle large x differently.
+ */
+
+#define TANH_HUGE 22.0
+#define TANHF_HUGE 11.0F
+#define TANHL_HUGE 42.0L
+
+@ctype@
+npy_ctanh@c@(@ctype@ z)
{
@type@ x, y;
+ @type@ t, beta, s, rho, denom;
+
+ x = npy_creal@c@(z);
+ y = npy_cimag@c@(z);
+
+ /*
+ * ctanh(NaN + i 0) = NaN + i 0
+ *
+ * ctanh(NaN + i y) = NaN + i NaN for y != 0
+ *
+ * The imaginary part has the sign of x*sin(2*y), but there's no
+ * special effort to get this right.
+ *
+ * ctanh(+-Inf +- i Inf) = +-1 +- 0
+ *
+ * ctanh(+-Inf + i y) = +-1 + 0 sin(2y) for y finite
+ *
+ * The imaginary part of the sign is unspecified. This special
+ * case is only needed to avoid a spurious invalid exception when
+ * y is infinite.
+ */
+ if (!npy_isfinite(x)) {
+ if (npy_isnan(x)) {
+ return npy_cpack@c@(x, (y == 0 ? y : x * y));
+ }
+ return npy_cpack@c@(npy_copysign@c@(1,x),
+ npy_copysign@c@(0,
+ npy_isinf(y) ?
+ y : npy_sin@c@(y) * npy_cos@c@(y)));
+ }
+
+ /*
+ * ctanh(x + i NAN) = NaN + i NaN
+ * ctanh(x +- i Inf) = NaN + i NaN
+ */
+ if (!npy_isfinite(y)) {
+ return (npy_cpack@c@(y - y, y - y));
+ }
+
+ /*
+ * ctanh(+-huge + i +-y) ~= +-1 +- i 2sin(2y)/exp(2x), using the
+ * approximation sinh^2(huge) ~= exp(2*huge) / 4.
+ * We use a modified formula to avoid spurious overflow.
+ */
+ if (npy_fabs@c@(x) >= TANH@C@_HUGE) {
+ @type@ exp_mx = npy_exp@c@(-npy_fabs@c@(x));
+ return npy_cpack@c@(npy_copysign@c@(1, x),
+ 4 * npy_sin@c@(y) * npy_cos@c@(y) *
+ exp_mx * exp_mx);
+ }
+
+ /* Kahan's algorithm */
+ t = npy_tan@c@(y);
+ beta = 1 + t * t; /* = 1 / cos^2(y) */
+ s = npy_sinh@c@(x);
+ rho = npy_sqrt@c@(1 + s * s); /* = cosh(x) */
+ denom = 1 + beta * s * s;
+ return (npy_cpack@c@((beta * rho * s) / denom, t / denom));
+}
+#undef TANH_HUGE
+#undef TANHF_HUGE
+#undef TANHL_HUGE
+#endif
+
+#if !defined (HAVE_CACOS@C@) || !defined (HAVE_CASINH@C@)
+/*
+ * Complex inverse trig functions taken from the msum library in FreeBSD
+ * revision 251404
+ *
+ * The algorithm is very close to that in "Implementing the complex arcsine
+ * and arccosine functions using exception handling" by T. E. Hull, Thomas F.
+ * Fairgrieve, and Ping Tak Peter Tang, published in ACM Transactions on
+ * Mathematical Software, Volume 23 Issue 3, 1997, Pages 299-335,
+ * http://dl.acm.org/citation.cfm?id=275324.
+ *
+ * Throughout we use the convention z = x + I*y.
+ *
+ * casinh(z) = sign(x)*log(A+sqrt(A*A-1)) + I*asin(B)
+ * where
+ * A = (|z+I| + |z-I|) / 2
+ * B = (|z+I| - |z-I|) / 2 = y/A
+ *
+ * These formulas become numerically unstable:
+ * (a) for Re(casinh(z)) when z is close to the line segment [-I, I] (that
+ * is, Re(casinh(z)) is close to 0);
+ * (b) for Im(casinh(z)) when z is close to either of the intervals
+ * [I, I*infinity) or (-I*infinity, -I] (that is, |Im(casinh(z))| is
+ * close to PI/2).
+ *
+ * These numerical problems are overcome by defining
+ * f(a, b) = (hypot(a, b) - b) / 2 = a*a / (hypot(a, b) + b) / 2
+ * Then if A < A_crossover, we use
+ * log(A + sqrt(A*A-1)) = log1p((A-1) + sqrt((A-1)*(A+1)))
+ * A-1 = f(x, 1+y) + f(x, 1-y)
+ * and if B > B_crossover, we use
+ * asin(B) = atan2(y, sqrt(A*A - y*y)) = atan2(y, sqrt((A+y)*(A-y)))
+ * A-y = f(x, y+1) + f(x, y-1)
+ * where without loss of generality we have assumed that x and y are
+ * non-negative.
+ *
+ * Much of the difficulty comes because the intermediate computations may
+ * produce overflows or underflows. This is dealt with in the paper by Hull
+ * et al by using exception handling. We do this by detecting when
+ * computations risk underflow or overflow. The hardest part is handling the
+ * underflows when computing f(a, b).
+ *
+ * Note that the function f(a, b) does not appear explicitly in the paper by
+ * Hull et al, but the idea may be found on pages 308 and 309. Introducing the
+ * function f(a, b) allows us to concentrate many of the clever tricks in this
+ * paper into one function.
+ */
+
+/*
+ * Function f(a, b, hypot_a_b) = (hypot(a, b) - b) / 2.
+ * Pass hypot(a, b) as the third argument.
+ */
+static NPY_INLINE @type@
+_f@c@(@type@ a, @type@ b, @type@ hypot_a_b)
+{
+ if (b < 0) {
+ return ((hypot_a_b - b) / 2);
+ }
+ if (b == 0) {
+ return (a / 2);
+ }
+ return (a * a / (hypot_a_b + b) / 2);
+}
+
+/*
+ * All the hard work is contained in this function.
+ * x and y are assumed positive or zero, and less than RECIP_EPSILON.
+ * Upon return:
+ * rx = Re(casinh(z)) = -Im(cacos(y + I*x)).
+ * B_is_usable is set to 1 if the value of B is usable.
+ * If B_is_usable is set to 0, sqrt_A2my2 = sqrt(A*A - y*y), and new_y = y.
+ * If returning sqrt_A2my2 has potential to result in an underflow, it is
+ * rescaled, and new_y is similarly rescaled.
+ */
+static NPY_INLINE void
+_do_hard_work@c@(@type@ x, @type@ y, @type@ *rx,
+ npy_int *B_is_usable, @type@ *B, @type@ *sqrt_A2my2, @type@ *new_y)
+{
+#if @precision@ == 1
+ const npy_float A_crossover = 10.0f;
+ const npy_float B_crossover = 0.6417f;
+ const npy_float FOUR_SQRT_MIN = 4.3368086899420177e-19f;
+#endif
+#if @precision@ == 2
+ const npy_double A_crossover = 10.0;
+ const npy_double B_crossover = 0.6417;
+ const npy_double FOUR_SQRT_MIN = 5.9666725849601654e-154;
+#endif
+#if @precision@ == 3
+ const npy_longdouble A_crossover = 10.0l;
+ const npy_longdouble B_crossover = 0.6417l;
+#if NPy_SIZEOF_LONGDOUBLE == NPY_SIZEOF_DOUBLE
+ const npy_longdouble FOUR_SQRT_MIN = 5.9666725849601654e-154;
+#else
+ const npy_longdouble FOUR_SQRT_MIN = 7.3344154702193886625e-2466l;
+#endif
+#endif
+ @type@ R, S, A; /* A, B, R, and S are as in Hull et al. */
+ @type@ Am1, Amy; /* A-1, A-y. */
+
+ R = npy_hypot@c@(x, y + 1); /* |z+I| */
+ S = npy_hypot@c@(x, y - 1); /* |z-I| */
+
+ /* A = (|z+I| + |z-I|) / 2 */
+ A = (R + S) / 2;
+ /*
+ * Mathematically A >= 1. There is a small chance that this will not
+ * be so because of rounding errors. So we will make certain it is
+ * so.
+ */
+ if (A < 1) {
+ A = 1;
+ }
+
+ if (A < A_crossover) {
+ /*
+ * Am1 = fp + fm, where fp = f(x, 1+y), and fm = f(x, 1-y).
+ * rx = log1p(Am1 + sqrt(Am1*(A+1)))
+ */
+ if (y == 1 && x < @TEPS@ * @TEPS@ / 128) {
+ /*
+ * fp is of order x^2, and fm = x/2.
+ * A = 1 (inexactly).
+ */
+ *rx = npy_sqrt@c@(x);
+ }
+ else if (x >= @TEPS@ * npy_fabs@c@(y - 1)) {
+ /*
+ * Underflow will not occur because
+ * x >= DBL_EPSILON^2/128 >= FOUR_SQRT_MIN
+ */
+ Am1 = _f@c@(x, 1 + y, R) + _f@c@(x, 1 - y, S);
+ *rx = npy_log1p@c@(Am1 + npy_sqrt@c@(Am1 * (A + 1)));
+ }
+ else if (y < 1) {
+ /*
+ * fp = x*x/(1+y)/4, fm = x*x/(1-y)/4, and
+ * A = 1 (inexactly).
+ */
+ *rx = x / npy_sqrt@c@((1 - y) * (1 + y));
+ }
+ else { /* if (y > 1) */
+ /*
+ * A-1 = y-1 (inexactly).
+ */
+ *rx = npy_log1p@c@((y - 1) + npy_sqrt@c@((y - 1) * (y + 1)));
+ }
+ }
+ else {
+ *rx = npy_log@c@(A + npy_sqrt@c@(A * A - 1));
+ }
+
+ *new_y = y;
+
+ if (y < FOUR_SQRT_MIN) {
+ /*
+ * Avoid a possible underflow caused by y/A. For casinh this
+ * would be legitimate, but will be picked up by invoking atan2
+ * later on. For cacos this would not be legitimate.
+ */
+ *B_is_usable = 0;
+ *sqrt_A2my2 = A * (2 / @TEPS@);
+ *new_y = y * (2 / @TEPS@);
+ return;
+ }
+
+ /* B = (|z+I| - |z-I|) / 2 = y/A */
+ *B = y / A;
+ *B_is_usable = 1;
+
+ if (*B > B_crossover) {
+ *B_is_usable = 0;
+ /*
+ * Amy = fp + fm, where fp = f(x, y+1), and fm = f(x, y-1).
+ * sqrt_A2my2 = sqrt(Amy*(A+y))
+ */
+ if (y == 1 && x < @TEPS@ / 128) {
+ /*
+ * fp is of order x^2, and fm = x/2.
+ * A = 1 (inexactly).
+ */
+ *sqrt_A2my2 = npy_sqrt@c@(x) * npy_sqrt@c@((A + y) / 2);
+ }
+ else if (x >= @TEPS@ * npy_fabs@c@(y - 1)) {
+ /*
+ * Underflow will not occur because
+ * x >= DBL_EPSILON/128 >= FOUR_SQRT_MIN
+ * and
+ * x >= DBL_EPSILON^2 >= FOUR_SQRT_MIN
+ */
+ Amy = _f@c@(x, y + 1, R) + _f@c@(x, y - 1, S);
+ *sqrt_A2my2 = npy_sqrt@c@(Amy * (A + y));
+ }
+ else if (y > 1) {
+ /*
+ * fp = x*x/(y+1)/4, fm = x*x/(y-1)/4, and
+ * A = y (inexactly).
+ *
+ * y < RECIP_EPSILON. So the following
+ * scaling should avoid any underflow problems.
+ */
+ *sqrt_A2my2 = x * (4 / @TEPS@ / @TEPS@) * y /
+ npy_sqrt@c@((y + 1) * (y - 1));
+ *new_y = y * (4 / @TEPS@ / @TEPS@);
+ }
+ else { /* if (y < 1) */
+ /*
+ * fm = 1-y >= DBL_EPSILON, fp is of order x^2, and
+ * A = 1 (inexactly).
+ */
+ *sqrt_A2my2 = npy_sqrt@c@((1 - y) * (1 + y));
+ }
+ }
+}
+
+/*
+ * Optimized version of clog() for |z| finite and larger than ~RECIP_EPSILON.
+ */
+static NPY_INLINE void
+_clog_for_large_values@c@(@type@ x, @type@ y,
+ @type@ *rr, @type@ *ri)
+{
+#if @precision@ == 1
+ const npy_float QUARTER_SQRT_MAX = 4.611685743549481e+18f;
+ const npy_float SQRT_MIN = 1.0842021724855044e-19f;
+ #endif
+#if @precision@ == 2
+ const npy_double QUARTER_SQRT_MAX = 3.3519519824856489e+153;
+ const npy_double SQRT_MIN = 1.4916681462400413e-154;
+ #endif
+#if @precision@ == 3
+#if NPY_SIZEOF_LONGDOUBLE == NPY_SIZEOF_DOUBLE
+ const npy_longdouble QUARTER_SQRT_MAX = 3.3519519824856489e+153;
+ const npy_longdouble SQRT_MIN = 1.4916681462400413e-154;
+#else
+ const npy_longdouble QUARTER_SQRT_MAX = 2.7268703390485398235e+2465l;
+ const npy_longdouble SQRT_MIN = 1.8336038675548471656e-2466l;
+#endif
+#endif
+ @type@ ax, ay, t;
+
+ ax = npy_fabs@c@(x);
+ ay = npy_fabs@c@(y);
+ if (ax < ay) {
+ t = ax;
+ ax = ay;
+ ay = t;
+ }
+
+ /*
+ * Avoid overflow in hypot() when x and y are both very large.
+ * Divide x and y by E, and then add 1 to the logarithm. This depends
+ * on E being larger than sqrt(2).
+ * Dividing by E causes an insignificant loss of accuracy; however
+ * this method is still poor since it is uneccessarily slow.
+ */
+ if (ax > @TMAX@ / 2) {
+ *rr = npy_log@c@(npy_hypot@c@(x / NPY_E@c@, y / NPY_E@c@)) + 1;
+ }
+ /*
+ * Avoid overflow when x or y is large. Avoid underflow when x or
+ * y is small.
+ */
+ else if (ax > QUARTER_SQRT_MAX || ay < SQRT_MIN) {
+ *rr = npy_log@c@(npy_hypot@c@(x, y));
+ }
+ else {
+ *rr = npy_log@c@(ax * ax + ay * ay) / 2;
+ }
+ *ri = npy_atan2@c@(y, x);
+}
+#endif
+
+#ifndef HAVE_CACOS@C@
+@ctype@
+npy_cacos@c@(@ctype@ z)
+{
+#if @precision@ == 1
+ /* this is sqrt(6*EPS) */
+ const npy_float SQRT_6_EPSILON = 8.4572793338e-4f;
+ /* chosen such that pio2_hi + pio2_lo == pio2_hi but causes FE_INEXACT. */
+ const volatile npy_float pio2_lo = 7.5497899549e-9f;
+#endif
+#if @precision@ == 2
+ const npy_double SQRT_6_EPSILON = 3.65002414998885671e-08;
+ const volatile npy_double pio2_lo = 6.1232339957367659e-17;
+#endif
+#if @precision@ == 3
+ const npy_longdouble SQRT_6_EPSILON = 8.0654900873493277169e-10l;
+ const volatile npy_longdouble pio2_lo = 2.710505431213761085e-20l;
+#endif
+ const @type@ RECIP_EPSILON = 1.0@c@ / @TEPS@;
+ const @type@ pio2_hi = NPY_PI_2@c@;
+ @type@ x, y, ax, ay, wx, wy, rx, ry, B, sqrt_A2mx2, new_x;
+ npy_int sx, sy;
+ npy_int B_is_usable;
+
x = npy_creal@c@(z);
y = npy_cimag@c@(z);
- return npy_cpack@c@(npy_sin@c@(x) * npy_cosh@c@(y), npy_cos@c@(x) * npy_sinh@c@(y));
+ sx = npy_signbit(x);
+ sy = npy_signbit(y);
+ ax = npy_fabs@c@(x);
+ ay = npy_fabs@c@(y);
+
+ if (npy_isnan(x) || npy_isnan(y)) {
+ /* cacos(+-Inf + I*NaN) = NaN + I*opt(-)Inf */
+ if (npy_isinf(x)) {
+ return npy_cpack@c@(y + y, -NPY_INFINITY@C@);
+ }
+ /* cacos(NaN + I*+-Inf) = NaN + I*-+Inf */
+ if (npy_isinf(y)) {
+ return npy_cpack@c@(x + x, -y);
+ }
+ /* cacos(0 + I*NaN) = PI/2 + I*NaN with inexact */
+ if (x == 0) {
+ return npy_cpack@c@(pio2_hi + pio2_lo, y + y);
+ }
+ /*
+ * All other cases involving NaN return NaN + I*NaN.
+ * C99 leaves it optional whether to raise invalid if one of
+ * the arguments is not NaN, so we opt not to raise it.
+ */
+ return npy_cpack@c@(NPY_NAN@C@, NPY_NAN@C@);
+ }
+
+ if (ax > RECIP_EPSILON || ay > RECIP_EPSILON) {
+ /* clog...() will raise inexact unless x or y is infinite. */
+ _clog_for_large_values@c@(x, y, &wx, &wy);
+ rx = npy_fabs@c@(wy);
+ ry = wx + NPY_LOGE2@c@;
+ if (sy == 0) {
+ ry = -ry;
+ }
+ return npy_cpack@c@(rx, ry);
+ }
+
+ /* Avoid spuriously raising inexact for z = 1. */
+ if (x == 1 && y == 0) {
+ return npy_cpack@c@(0, -y);
+ }
+
+ /* All remaining cases are inexact. */
+ raise_inexact();
+
+ if (ax < SQRT_6_EPSILON / 4 && ay < SQRT_6_EPSILON / 4) {
+ return npy_cpack@c@(pio2_hi - (x - pio2_lo), -y);
+ }
+
+ _do_hard_work@c@(ay, ax, &ry, &B_is_usable, &B, &sqrt_A2mx2, &new_x);
+ if (B_is_usable) {
+ if (sx == 0) {
+ rx = npy_acos@c@(B);
+ }
+ else {
+ rx = npy_acos@c@(-B);
+ }
+ }
+ else {
+ if (sx == 0) {
+ rx = npy_atan2@c@(sqrt_A2mx2, new_x);
+ }
+ else {
+ rx = npy_atan2@c@(sqrt_A2mx2, -new_x);
+ }
+ }
+ if (sy == 0) {
+ ry = -ry;
+ }
+ return npy_cpack@c@(rx, ry);
+}
+#endif
+
+#ifndef HAVE_CASIN@C@
+@ctype@
+npy_casin@c@(@ctype@ z)
+{
+ /* casin(z) = I * conj( casinh(I * conj(z)) ) */
+ z = npy_casinh@c@(npy_cpack@c@(npy_cimag@c@(z), npy_creal@c@(z)));
+ return npy_cpack@c@(npy_cimag@c@(z), npy_creal@c@(z));
+}
+#endif
+
+#ifndef HAVE_CATAN@C@
+@ctype@
+npy_catan@c@(@ctype@ z)
+{
+ /* catan(z) = I * conj( catanh(I * conj(z)) ) */
+ z = npy_catanh@c@(npy_cpack@c@(npy_cimag@c@(z), npy_creal@c@(z)));
+ return npy_cpack@c@(npy_cimag@c@(z), npy_creal@c@(z));
+}
+#endif
+
+#ifndef HAVE_CACOSH@C@
+@ctype@
+npy_cacosh@c@(@ctype@ z)
+{
+ /*
+ * cacosh(z) = I*cacos(z) or -I*cacos(z)
+ * where the sign is chosen so Re(cacosh(z)) >= 0.
+ */
+ @ctype@ w;
+ @type@ rx, ry;
+
+ w = npy_cacos@c@(z);
+ rx = npy_creal@c@(w);
+ ry = npy_cimag@c@(w);
+ /* cacosh(NaN + I*NaN) = NaN + I*NaN */
+ if (npy_isnan(rx) && npy_isnan(ry)) {
+ return npy_cpack@c@(ry, rx);
+ }
+ /* cacosh(NaN + I*+-Inf) = +Inf + I*NaN */
+ /* cacosh(+-Inf + I*NaN) = +Inf + I*NaN */
+ if (npy_isnan(rx)) {
+ return npy_cpack@c@(npy_fabs@c@(ry), rx);
+ }
+ /* cacosh(0 + I*NaN) = NaN + I*NaN */
+ if (npy_isnan(ry)) {
+ return npy_cpack@c@(ry, ry);
+ }
+ return npy_cpack@c@(npy_fabs@c@(ry), npy_copysign@c@(rx, npy_cimag@c@(z)));
+}
+#endif
+
+#ifndef HAVE_CASINH@C@
+/*
+ * casinh(z) = z + O(z^3) as z -> 0
+ *
+ * casinh(z) = sign(x)*clog(sign(x)*z) + O(1/z^2) as z -> infinity
+ * The above formula works for the imaginary part as well, because
+ * Im(casinh(z)) = sign(x)*atan2(sign(x)*y, fabs(x)) + O(y/z^3)
+ * as z -> infinity, uniformly in y
+ */
+@ctype@
+npy_casinh@c@(@ctype@ z)
+{
+#if @precision@ == 1
+ /* this is sqrt(6*EPS) */
+ const npy_float SQRT_6_EPSILON = 8.4572793338e-4f;
+ /* chosen such that pio2_hi + pio2_lo == pio2_hi but causes FE_INEXACT. */
+ const volatile npy_float pio2_lo = 7.5497899549e-9f;
+#endif
+#if @precision@ == 2
+ const npy_double SQRT_6_EPSILON = 3.65002414998885671e-08;
+ const volatile npy_double pio2_lo = 6.1232339957367659e-17;
+#endif
+#if @precision@ == 3
+ const npy_longdouble SQRT_6_EPSILON = 8.0654900873493277169e-10l;
+ const volatile npy_longdouble pio2_lo = 2.710505431213761085e-20l;
+#endif
+ const @type@ RECIP_EPSILON = 1.0@c@ / @TEPS@;
+ const @type@ pio2_hi = NPY_PI_2@c@;
+ @type@ x, y, ax, ay, wx, wy, rx, ry, B, sqrt_A2my2, new_y;
+ npy_int B_is_usable;
+
+ x = npy_creal@c@(z);
+ y = npy_cimag@c@(z);
+ ax = npy_fabs@c@(x);
+ ay = npy_fabs@c@(y);
+
+ if (npy_isnan(x) || npy_isnan(y)) {
+ /* casinh(+-Inf + I*NaN) = +-Inf + I*NaN */
+ if (npy_isinf(x)) {
+ return npy_cpack@c@(x, y + y);
+ }
+ /* casinh(NaN + I*+-Inf) = opt(+-)Inf + I*NaN */
+ if (npy_isinf(y)) {
+ return npy_cpack@c@(y, x + x);
+ }
+ /* casinh(NaN + I*0) = NaN + I*0 */
+ if (y == 0) {
+ return npy_cpack@c@(x + x, y);
+ }
+ /*
+ * All other cases involving NaN return NaN + I*NaN.
+ * C99 leaves it optional whether to raise invalid if one of
+ * the arguments is not NaN, so we opt not to raise it.
+ */
+ return npy_cpack@c@(NPY_NAN@C@, NPY_NAN@C@);
+ }
+
+ if (ax > RECIP_EPSILON || ay > RECIP_EPSILON) {
+ /* clog...() will raise inexact unless x or y is infinite. */
+ if (npy_signbit(x) == 0) {
+ _clog_for_large_values@c@(x, y, &wx, &wy);
+ wx += NPY_LOGE2@c@;
+ }
+ else {
+ _clog_for_large_values@c@(-x, -y, &wx, &wy);
+ wx += NPY_LOGE2@c@;
+ }
+ return npy_cpack@c@(npy_copysign@c@(wx, x), npy_copysign@c@(wy, y));
+ }
+
+ /* Avoid spuriously raising inexact for z = 0. */
+ if (x == 0 && y == 0) {
+ return (z);
+ }
+
+ /* All remaining cases are inexact. */
+ raise_inexact();
+
+ if (ax < SQRT_6_EPSILON / 4 && ay < SQRT_6_EPSILON / 4) {
+ return (z);
+ }
+
+ _do_hard_work@c@(ax, ay, &rx, &B_is_usable, &B, &sqrt_A2my2, &new_y);
+ if (B_is_usable) {
+ ry = npy_asin@c@(B);
+ }
+ else {
+ ry = npy_atan2@c@(new_y, sqrt_A2my2);
+ }
+ return npy_cpack@c@(npy_copysign@c@(rx, x), npy_copysign@c@(ry, y));
+}
+#endif
+
+#ifndef HAVE_CATANH@C@
+/*
+ * sum_squares(x,y) = x*x + y*y (or just x*x if y*y would underflow).
+ * Assumes x*x and y*y will not overflow.
+ * Assumes x and y are finite.
+ * Assumes y is non-negative.
+ * Assumes fabs(x) >= DBL_EPSILON.
+ */
+static NPY_INLINE @type@
+_sum_squares@c@(@type@ x, @type@ y)
+{
+#if @precision@ == 1
+const npy_float SQRT_MIN = 1.0842022e-19f;
+#endif
+#if @precision@ == 2
+const npy_double SQRT_MIN = 1.4916681462400413e-154; /* sqrt(DBL_MIN) */
+#endif
+#if @precision@ == 3
+/* this is correct for 80 bit long doubles */
+const npy_longdouble SQRT_MIN = 1.8336038675548471656e-2466l;
+#endif
+ /* Avoid underflow when y is small. */
+ if (y < SQRT_MIN) {
+ return (x * x);
+ }
+
+ return (x * x + y * y);
+}
+
+/*
+ * real_part_reciprocal(x, y) = Re(1/(x+I*y)) = x/(x*x + y*y).
+ * Assumes x and y are not NaN, and one of x and y is larger than
+ * RECIP_EPSILON. We avoid unwarranted underflow. It is important to not use
+ * the code creal(1/z), because the imaginary part may produce an unwanted
+ * underflow.
+ * This is only called in a context where inexact is always raised before
+ * the call, so no effort is made to avoid or force inexact.
+ */
+#if @precision@ == 1
+#define BIAS (FLT_MAX_EXP - 1)
+#define CUTOFF (FLT_MANT_DIG / 2 + 1)
+static NPY_INLINE npy_float
+_real_part_reciprocalf(npy_float x, npy_float y)
+{
+ npy_float scale;
+ npy_uint32 hx, hy;
+ npy_int32 ix, iy;
+
+ GET_FLOAT_WORD(hx, x);
+ ix = hx & 0x7f800000;
+ GET_FLOAT_WORD(hy, y);
+ iy = hy & 0x7f800000;
+ if (ix - iy >= CUTOFF << 23 || npy_isinf(x)) {
+ return (1 / x);
+ }
+ if (iy - ix >= CUTOFF << 23) {
+ return (x / y / y);
+ }
+ if (ix <= (BIAS + FLT_MAX_EXP / 2 - CUTOFF) << 23) {
+ return (x / (x * x + y * y));
+ }
+ SET_FLOAT_WORD(scale, 0x7f800000 - ix);
+ x *= scale;
+ y *= scale;
+ return (x / (x * x + y * y) * scale);
+}
+#undef BIAS
+#undef CUTOFF
+#endif
+#if @precision@ == 2
+#define BIAS (DBL_MAX_EXP - 1)
+/* more guard digits are useful iff there is extra precision. */
+#define CUTOFF (DBL_MANT_DIG / 2 + 1) /* just half or 1 guard digit */
+static NPY_INLINE npy_double
+_real_part_reciprocal(npy_double x, npy_double y)
+{
+ npy_double scale;
+ npy_uint32 hx, hy;
+ npy_int32 ix, iy;
+
+ /*
+ * This code is inspired by the C99 document n1124.pdf, Section G.5.1,
+ * example 2.
+ */
+ GET_HIGH_WORD(hx, x);
+ ix = hx & 0x7ff00000;
+ GET_HIGH_WORD(hy, y);
+ iy = hy & 0x7ff00000;
+ if (ix - iy >= CUTOFF << 20 || npy_isinf(x)) {
+ /* +-Inf -> +-0 is special */
+ return (1 / x);
+ }
+ if (iy - ix >= CUTOFF << 20) {
+ /* should avoid double div, but hard */
+ return (x / y / y);
+ }
+ if (ix <= (BIAS + DBL_MAX_EXP / 2 - CUTOFF) << 20) {
+ return (x / (x * x + y * y));
+ }
+ scale = 1;
+ SET_HIGH_WORD(scale, 0x7ff00000 - ix); /* 2**(1-ilogb(x)) */
+ x *= scale;
+ y *= scale;
+ return (x / (x * x + y * y) * scale);
+}
+#undef BIAS
+#undef CUTOFF
+#endif
+#if @precision@ == 3
+#ifndef HAVE_LDOUBLE_DOUBLE_DOUBLE_BE
+#define BIAS (LDBL_MAX_EXP - 1)
+#define CUTOFF (LDBL_MANT_DIG / 2 + 1)
+static NPY_INLINE npy_longdouble
+_real_part_reciprocall(npy_longdouble x,
+ npy_longdouble y)
+{
+ npy_longdouble scale;
+ union IEEEl2bitsrep ux, uy, us;
+ npy_int32 ix, iy;
+
+ ux.e = x;
+ ix = GET_LDOUBLE_EXP(ux);
+ uy.e = y;
+ iy = GET_LDOUBLE_EXP(uy);
+ if (ix - iy >= CUTOFF || npy_isinf(x)) {
+ return (1/x);
+ }
+ if (iy - ix >= CUTOFF) {
+ return (x/y/y);
+ }
+ if (ix <= BIAS + LDBL_MAX_EXP / 2 - CUTOFF) {
+ return (x/(x*x + y*y));
+ }
+ us.e = 1;
+ SET_LDOUBLE_EXP(us, 0x7fff - ix);
+ scale = us.e;
+ x *= scale;
+ y *= scale;
+ return (x/(x*x + y*y) * scale);
+}
+#undef BIAS
+#undef CUTOFF
+#else
+static NPY_INLINE npy_longdouble
+_real_part_reciprocall(npy_longdouble x,
+ npy_longdouble y)
+{
+ return x/(x*x + y*y);
+}
+#endif
+#endif
+
+@ctype@
+npy_catanh@c@(@ctype@ z)
+{
+#if @precision@ == 1
+ /* this is sqrt(3*EPS) */
+ const npy_float SQRT_3_EPSILON = 5.9801995673e-4f;
+ /* chosen such that pio2_hi + pio2_lo == pio2_hi but causes FE_INEXACT. */
+ const volatile npy_float pio2_lo = 7.5497899549e-9f;
+#endif
+#if @precision@ == 2
+ const npy_double SQRT_3_EPSILON = 2.5809568279517849e-8;
+ const volatile npy_double pio2_lo = 6.1232339957367659e-17;
+#endif
+#if @precision@ == 3
+ const npy_longdouble SQRT_3_EPSILON = 5.70316273435758915310e-10l;
+ const volatile npy_longdouble pio2_lo = 2.710505431213761085e-20l;
+#endif
+ const @type@ RECIP_EPSILON = 1.0@c@ / @TEPS@;
+ const @type@ pio2_hi = NPY_PI_2@c@;
+ @type@ x, y, ax, ay, rx, ry;
+
+ x = npy_creal@c@(z);
+ y = npy_cimag@c@(z);
+ ax = npy_fabs@c@(x);
+ ay = npy_fabs@c@(y);
+
+ /* This helps handle many cases. */
+ if (y == 0 && ax <= 1) {
+ return npy_cpack@c@(npy_atanh@c@(x), y);
+ }
+
+ /* To ensure the same accuracy as atan(), and to filter out z = 0. */
+ if (x == 0) {
+ return npy_cpack@c@(x, npy_atan@c@(y));
+ }
+
+ if (npy_isnan(x) || npy_isnan(y)) {
+ /* catanh(+-Inf + I*NaN) = +-0 + I*NaN */
+ if (npy_isinf(x)) {
+ return npy_cpack@c@(npy_copysign@c@(0, x), y + y);
+ }
+ /* catanh(NaN + I*+-Inf) = sign(NaN)0 + I*+-PI/2 */
+ if (npy_isinf(y)) {
+ return npy_cpack@c@(npy_copysign@c@(0, x),
+ npy_copysign@c@(pio2_hi + pio2_lo, y));
+ }
+ /*
+ * All other cases involving NaN return NaN + I*NaN.
+ * C99 leaves it optional whether to raise invalid if one of
+ * the arguments is not NaN, so we opt not to raise it.
+ */
+ return npy_cpack@c@(NPY_NAN@C@, NPY_NAN@C@);
+ }
+
+ if (ax > RECIP_EPSILON || ay > RECIP_EPSILON) {
+ return npy_cpack@c@(_real_part_reciprocal@c@(x, y),
+ npy_copysign@c@(pio2_hi + pio2_lo, y));
+ }
+
+ if (ax < SQRT_3_EPSILON / 2 && ay < SQRT_3_EPSILON / 2) {
+ /*
+ * z = 0 was filtered out above. All other cases must raise
+ * inexact, but this is the only only that needs to do it
+ * explicitly.
+ */
+ raise_inexact();
+ return (z);
+ }
+
+ if (ax == 1 && ay < @TEPS@) {
+ rx = (NPY_LOGE2@c@ - npy_log@c@(ay)) / 2;
+ }
+ else {
+ rx = npy_log1p@c@(4 * ax / _sum_squares@c@(ax - 1, ay)) / 4;
+ }
+
+ if (ax == 1) {
+ ry = npy_atan2@c@(2, -ay) / 2;
+ }
+ else if (ay < @TEPS@) {
+ ry = npy_atan2@c@(2 * ay, (1 - ax) * (1 + ax)) / 2;
+ }
+ else {
+ ry = npy_atan2@c@(2 * ay, (1 - ax) * (1 + ax) - ay * ay) / 2;
+ }
+
+ return npy_cpack@c@(npy_copysign@c@(rx, x), npy_copysign@c@(ry, y));
}
#endif
/**end repeat**/
@@ -245,7 +1753,8 @@
* #KIND = CABS,CARG#
*/
#ifdef HAVE_@KIND@@C@
-@type@ npy_@kind@@c@(@ctype@ z)
+@type@
+npy_@kind@@c@(@ctype@ z)
{
__@ctype@_to_c99_cast z1;
z1.npy_z = z;
@@ -255,11 +1764,14 @@
/**end repeat1**/
/**begin repeat1
- * #kind = cexp,clog,csqrt,ccos,csin#
- * #KIND = CEXP,CLOG,CSQRT,CCOS,CSIN#
+ * #kind = cexp,clog,csqrt,ccos,csin,ctan,ccosh,csinh,ctanh,
+ * cacos,casin,catan,cacosh,casinh,catanh#
+ * #KIND = CEXP,CLOG,CSQRT,CCOS,CSIN,CTAN,CCOSH,CSINH,CTANH,
+ * CACOS,CASIN,CATAN,CACOSH,CASINH,CATANH#
*/
#ifdef HAVE_@KIND@@C@
-@ctype@ npy_@kind@@c@(@ctype@ z)
+@ctype@
+npy_@kind@@c@(@ctype@ z)
{
__@ctype@_to_c99_cast z1;
__@ctype@_to_c99_cast ret;
@@ -270,22 +1782,5 @@
#endif
/**end repeat1**/
-/**begin repeat1
- * #kind = cpow#
- * #KIND = CPOW#
- */
-#ifdef HAVE_@KIND@@C@
-@ctype@ npy_@kind@@c@(@ctype@ x, @ctype@ y)
-{
- __@ctype@_to_c99_cast xcast;
- __@ctype@_to_c99_cast ycast;
- __@ctype@_to_c99_cast ret;
- xcast.npy_z = x;
- ycast.npy_z = y;
- ret.c99_z = @kind@@c@(xcast.c99_z, ycast.c99_z);
- return ret.npy_z;
-}
-#endif
-/**end repeat1**/
/**end repeat**/
diff --git a/numpy/core/src/private/npy_config.h b/numpy/core/src/private/npy_config.h
index 882913e2f..6e98dc7e9 100644
--- a/numpy/core/src/private/npy_config.h
+++ b/numpy/core/src/private/npy_config.h
@@ -4,12 +4,6 @@
#include "config.h"
#include "numpy/numpyconfig.h"
-/* Disable broken MS math functions */
-#if defined(_MSC_VER) || defined(__MINGW32_VERSION)
-#undef HAVE_ATAN2
-#undef HAVE_HYPOT
-#endif
-
/*
* largest alignment the copy loops might require
* required as string, void and complex types might get copied using larger
@@ -21,9 +15,56 @@
*/
#define NPY_MAX_COPY_ALIGNMENT 16
+/* blacklist */
+
/* Disable broken Sun Workshop Pro math functions */
#ifdef __SUNPRO_C
+
+#undef HAVE_ATAN2
+#undef HAVE_ATAN2F
+#undef HAVE_ATAN2L
+
+#endif
+
+/* Disable broken MS math functions */
+#if defined(_MSC_VER) || defined(__MINGW32_VERSION)
+
#undef HAVE_ATAN2
+#undef HAVE_ATAN2F
+#undef HAVE_ATAN2L
+
+#undef HAVE_HYPOT
+#undef HAVE_HYPOTF
+#undef HAVE_HYPOTL
+
#endif
+/* Disable broken gnu trig functions on linux */
+#if defined(__linux__) && defined(__GNUC__)
+
+#if defined(HAVE_FEATURES_H)
+#include <features.h>
+#define TRIG_OK __GLIBC_PREREQ(2, 16)
+#else
+#define TRIG_OK 0
+#endif
+
+#if !TRIG_OK
+#undef HAVE_CASIN
+#undef HAVE_CASINF
+#undef HAVE_CASINL
+#undef HAVE_CASINH
+#undef HAVE_CASINHF
+#undef HAVE_CASINHL
+#undef HAVE_CATAN
+#undef HAVE_CATANF
+#undef HAVE_CATANL
+#undef HAVE_CATANH
+#undef HAVE_CATANHF
+#undef HAVE_CATANHL
+#endif
+#undef TRIG_OK
+
+#endif /* defined(__linux__) && defined(__GNUC__) */
+
#endif
diff --git a/numpy/core/src/umath/funcs.inc.src b/numpy/core/src/umath/funcs.inc.src
index 3aad44c9f..f0fa3e9dd 100644
--- a/numpy/core/src/umath/funcs.inc.src
+++ b/numpy/core/src/umath/funcs.inc.src
@@ -177,49 +177,8 @@ npy_ObjectLogicalNot(PyObject *i1)
* #ctype = npy_cfloat, npy_cdouble, npy_clongdouble#
* #ftype = npy_float, npy_double, npy_longdouble#
* #c = f, ,l#
- * #C = F, ,L#
- * #precision = 1,2,4#
*/
-/*
- * Perform the operation result := 1 + coef * x * result,
- * with real coefficient `coef`.
- */
-#define SERIES_HORNER_TERM@c@(result, x, coef) \
- do { \
- nc_prod@c@((result), (x), (result)); \
- (result)->real *= (coef); \
- (result)->imag *= (coef); \
- nc_sum@c@((result), &nc_1@c@, (result)); \
- } while(0)
-
-/* constants */
-static @ctype@ nc_1@c@ = {1., 0.};
-static @ctype@ nc_half@c@ = {0.5, 0.};
-static @ctype@ nc_i@c@ = {0., 1.};
-static @ctype@ nc_i2@c@ = {0., 0.5};
-/*
- * static @ctype@ nc_mi@c@ = {0.0@c@, -1.0@c@};
- * static @ctype@ nc_pi2@c@ = {NPY_PI_2@c@., 0.0@c@};
- */
-
-
-static void
-nc_sum@c@(@ctype@ *a, @ctype@ *b, @ctype@ *r)
-{
- r->real = a->real + b->real;
- r->imag = a->imag + b->imag;
- return;
-}
-
-static void
-nc_diff@c@(@ctype@ *a, @ctype@ *b, @ctype@ *r)
-{
- r->real = a->real - b->real;
- r->imag = a->imag - b->imag;
- return;
-}
-
static void
nc_neg@c@(@ctype@ *a, @ctype@ *r)
{
@@ -229,26 +188,6 @@ nc_neg@c@(@ctype@ *a, @ctype@ *r)
}
static void
-nc_prod@c@(@ctype@ *a, @ctype@ *b, @ctype@ *r)
-{
- @ftype@ ar=a->real, br=b->real, ai=a->imag, bi=b->imag;
- r->real = ar*br - ai*bi;
- r->imag = ar*bi + ai*br;
- return;
-}
-
-static void
-nc_quot@c@(@ctype@ *a, @ctype@ *b, @ctype@ *r)
-{
-
- @ftype@ ar=a->real, br=b->real, ai=a->imag, bi=b->imag;
- @ftype@ d = br*br + bi*bi;
- r->real = (ar*br + ai*bi)/d;
- r->imag = (ai*br - ar*bi)/d;
- return;
-}
-
-static void
nc_sqrt@c@(@ctype@ *x, @ctype@ *r)
{
*r = npy_csqrt@c@(*x);
@@ -307,164 +246,28 @@ nc_expm1@c@(@ctype@ *x, @ctype@ *r)
static void
nc_pow@c@(@ctype@ *a, @ctype@ *b, @ctype@ *r)
{
- npy_intp n;
- @ftype@ ar = npy_creal@c@(*a);
- @ftype@ br = npy_creal@c@(*b);
- @ftype@ ai = npy_cimag@c@(*a);
- @ftype@ bi = npy_cimag@c@(*b);
-
- if (br == 0. && bi == 0.) {
- *r = npy_cpack@c@(1., 0.);
- return;
- }
- if (ar == 0. && ai == 0.) {
- if (br > 0 && bi == 0) {
- *r = npy_cpack@c@(0., 0.);
- }
- else {
- volatile @ftype@ tmp = NPY_INFINITY;
- /* NB: there are four complex zeros; c0 = (+-0, +-0), so that unlike
- * for reals, c0**p, with `p` negative is in general
- * ill-defined.
- *
- * c0**z with z complex is also ill-defined.
- */
- *r = npy_cpack@c@(NPY_NAN, NPY_NAN);
-
- /* Raise invalid */
- tmp -= NPY_INFINITY;
- ar = tmp;
- }
- return;
- }
- if (bi == 0 && (n=(npy_intp)br) == br) {
- if (n == 1) {
- /* unroll: handle inf better */
- *r = npy_cpack@c@(ar, ai);
- return;
- }
- else if (n == 2) {
- /* unroll: handle inf better */
- nc_prod@c@(a, a, r);
- return;
- }
- else if (n == 3) {
- /* unroll: handle inf better */
- nc_prod@c@(a, a, r);
- nc_prod@c@(a, r, r);
- return;
- }
- else if (n > -100 && n < 100) {
- @ctype@ p, aa;
- npy_intp mask = 1;
- if (n < 0) n = -n;
- aa = nc_1@c@;
- p = npy_cpack@c@(ar, ai);
- while (1) {
- if (n & mask)
- nc_prod@c@(&aa,&p,&aa);
- mask <<= 1;
- if (n < mask || mask <= 0) break;
- nc_prod@c@(&p,&p,&p);
- }
- *r = npy_cpack@c@(npy_creal@c@(aa), npy_cimag@c@(aa));
- if (br < 0) nc_quot@c@(&nc_1@c@, r, r);
- return;
- }
- }
-
- *r = npy_cpow@c@(*a, *b);
+ *r = npy_cpow@c@(*a, *b);
return;
}
-
-static void
-nc_prodi@c@(@ctype@ *x, @ctype@ *r)
-{
- @ftype@ xr = x->real;
- r->real = -x->imag;
- r->imag = xr;
- return;
-}
-
-
static void
nc_acos@c@(@ctype@ *x, @ctype@ *r)
{
- /*
- * return nc_neg(nc_prodi(nc_log(nc_sum(x,nc_prod(nc_i,
- * nc_sqrt(nc_diff(nc_1,nc_prod(x,x))))))));
- */
- nc_prod@c@(x,x,r);
- nc_diff@c@(&nc_1@c@, r, r);
- nc_sqrt@c@(r, r);
- nc_prodi@c@(r, r);
- nc_sum@c@(x, r, r);
- nc_log@c@(r, r);
- nc_prodi@c@(r, r);
- nc_neg@c@(r, r);
+ *r = npy_cacos@c@(*x);
return;
}
static void
nc_acosh@c@(@ctype@ *x, @ctype@ *r)
{
- /*
- * return nc_log(nc_sum(x,
- * nc_prod(nc_sqrt(nc_sum(x,nc_1)), nc_sqrt(nc_diff(x,nc_1)))));
- */
- @ctype@ t;
-
- nc_sum@c@(x, &nc_1@c@, &t);
- nc_sqrt@c@(&t, &t);
- nc_diff@c@(x, &nc_1@c@, r);
- nc_sqrt@c@(r, r);
- nc_prod@c@(&t, r, r);
- nc_sum@c@(x, r, r);
- nc_log@c@(r, r);
+ *r = npy_cacosh@c@(*x);
return;
}
static void
nc_asin@c@(@ctype@ *x, @ctype@ *r)
{
- /*
- * return nc_neg(nc_prodi(nc_log(nc_sum(nc_prod(nc_i,x),
- * nc_sqrt(nc_diff(nc_1,nc_prod(x,x)))))));
- */
- if (fabs(x->real) > 1e-3 || fabs(x->imag) > 1e-3) {
- @ctype@ a, *pa=&a;
- nc_prod@c@(x, x, r);
- nc_diff@c@(&nc_1@c@, r, r);
- nc_sqrt@c@(r, r);
- nc_prodi@c@(x, pa);
- nc_sum@c@(pa, r, r);
- nc_log@c@(r, r);
- nc_prodi@c@(r, r);
- nc_neg@c@(r, r);
- }
- else {
- /*
- * Small arguments: series expansion, to avoid loss of precision
- * asin(x) = x [1 + (1/6) x^2 [1 + (9/20) x^2 [1 + ...]]]
- *
- * |x| < 1e-3 => |rel. error| < 1e-18 (f), 1e-24, 1e-36 (l)
- */
- @ctype@ x2;
- nc_prod@c@(x, x, &x2);
-
- *r = nc_1@c@;
-#if @precision@ >= 3
- SERIES_HORNER_TERM@c@(r, &x2, 81.0@C@/110);
- SERIES_HORNER_TERM@c@(r, &x2, 49.0@C@/72);
-#endif
-#if @precision@ >= 2
- SERIES_HORNER_TERM@c@(r, &x2, 25.0@C@/42);
-#endif
- SERIES_HORNER_TERM@c@(r, &x2, 9.0@C@/20);
- SERIES_HORNER_TERM@c@(r, &x2, 1.0@C@/6);
- nc_prod@c@(r, x, r);
- }
+ *r = npy_casin@c@(*x);
return;
}
@@ -472,134 +275,35 @@ nc_asin@c@(@ctype@ *x, @ctype@ *r)
static void
nc_asinh@c@(@ctype@ *x, @ctype@ *r)
{
- /*
- * return nc_log(nc_sum(nc_sqrt(nc_sum(nc_1,nc_prod(x,x))),x));
- */
- if (fabs(x->real) > 1e-3 || fabs(x->imag) > 1e-3) {
- nc_prod@c@(x, x, r);
- nc_sum@c@(&nc_1@c@, r, r);
- nc_sqrt@c@(r, r);
- nc_sum@c@(r, x, r);
- nc_log@c@(r, r);
- }
- else {
- /*
- * Small arguments: series expansion, to avoid loss of precision
- * asinh(x) = x [1 - (1/6) x^2 [1 - (9/20) x^2 [1 - ...]]]
- *
- * |x| < 1e-3 => |rel. error| < 1e-18 (f), 1e-24, 1e-36 (l)
- */
- @ctype@ x2;
- nc_prod@c@(x, x, &x2);
-
- *r = nc_1@c@;
-#if @precision@ >= 3
- SERIES_HORNER_TERM@c@(r, &x2, -81.0@C@/110);
- SERIES_HORNER_TERM@c@(r, &x2, -49.0@C@/72);
-#endif
-#if @precision@ >= 2
- SERIES_HORNER_TERM@c@(r, &x2, -25.0@C@/42);
-#endif
- SERIES_HORNER_TERM@c@(r, &x2, -9.0@C@/20);
- SERIES_HORNER_TERM@c@(r, &x2, -1.0@C@/6);
- nc_prod@c@(r, x, r);
- }
+ *r = npy_casinh@c@(*x);
return;
}
static void
nc_atan@c@(@ctype@ *x, @ctype@ *r)
{
- /*
- * return nc_prod(nc_i2,nc_log(nc_quot(nc_sum(nc_i,x),nc_diff(nc_i,x))));
- */
- if (fabs(x->real) > 1e-3 || fabs(x->imag) > 1e-3) {
- @ctype@ a, *pa=&a;
- nc_diff@c@(&nc_i@c@, x, pa);
- nc_sum@c@(&nc_i@c@, x, r);
- nc_quot@c@(r, pa, r);
- nc_log@c@(r,r);
- nc_prod@c@(&nc_i2@c@, r, r);
- }
- else {
- /*
- * Small arguments: series expansion, to avoid loss of precision
- * atan(x) = x [1 - (1/3) x^2 [1 - (3/5) x^2 [1 - ...]]]
- *
- * |x| < 1e-3 => |rel. error| < 1e-18 (f), 1e-24, 1e-36 (l)
- */
- @ctype@ x2;
- nc_prod@c@(x, x, &x2);
-
- *r = nc_1@c@;
-#if @precision@ >= 3
- SERIES_HORNER_TERM@c@(r, &x2, -9.0@C@/11);
- SERIES_HORNER_TERM@c@(r, &x2, -7.0@C@/9);
-#endif
-#if @precision@ >= 2
- SERIES_HORNER_TERM@c@(r, &x2, -5.0@C@/7);
-#endif
- SERIES_HORNER_TERM@c@(r, &x2, -3.0@C@/5);
- SERIES_HORNER_TERM@c@(r, &x2, -1.0@C@/3);
- nc_prod@c@(r, x, r);
- }
+ *r = npy_catan@c@(*x);
return;
}
static void
nc_atanh@c@(@ctype@ *x, @ctype@ *r)
{
- /*
- * return nc_prod(nc_half,nc_log(nc_quot(nc_sum(nc_1,x),nc_diff(nc_1,x))));
- */
- if (fabs(x->real) > 1e-3 || fabs(x->imag) > 1e-3) {
- @ctype@ a, *pa=&a;
- nc_diff@c@(&nc_1@c@, x, r);
- nc_sum@c@(&nc_1@c@, x, pa);
- nc_quot@c@(pa, r, r);
- nc_log@c@(r, r);
- nc_prod@c@(&nc_half@c@, r, r);
- }
- else {
- /*
- * Small arguments: series expansion, to avoid loss of precision
- * atan(x) = x [1 + (1/3) x^2 [1 + (3/5) x^2 [1 + ...]]]
- *
- * |x| < 1e-3 => |rel. error| < 1e-18 (f), 1e-24, 1e-36 (l)
- */
- @ctype@ x2;
- nc_prod@c@(x, x, &x2);
-
- *r = nc_1@c@;
-#if @precision@ >= 3
- SERIES_HORNER_TERM@c@(r, &x2, 9.0@C@/11);
- SERIES_HORNER_TERM@c@(r, &x2, 7.0@C@/9);
-#endif
-#if @precision@ >= 2
- SERIES_HORNER_TERM@c@(r, &x2, 5.0@C@/7);
-#endif
- SERIES_HORNER_TERM@c@(r, &x2, 3.0@C@/5);
- SERIES_HORNER_TERM@c@(r, &x2, 1.0@C@/3);
- nc_prod@c@(r, x, r);
- }
+ *r = npy_catanh@c@(*x);
return;
}
static void
nc_cos@c@(@ctype@ *x, @ctype@ *r)
{
- @ftype@ xr=x->real, xi=x->imag;
- r->real = npy_cos@c@(xr)*npy_cosh@c@(xi);
- r->imag = -npy_sin@c@(xr)*npy_sinh@c@(xi);
+ *r = npy_ccos@c@(*x);
return;
}
static void
nc_cosh@c@(@ctype@ *x, @ctype@ *r)
{
- @ftype@ xr=x->real, xi=x->imag;
- r->real = npy_cos@c@(xi)*npy_cosh@c@(xr);
- r->imag = npy_sin@c@(xi)*npy_sinh@c@(xr);
+ *r = npy_ccosh@c@(*x);
return;
}
@@ -624,63 +328,29 @@ nc_log2@c@(@ctype@ *x, @ctype@ *r)
static void
nc_sin@c@(@ctype@ *x, @ctype@ *r)
{
- @ftype@ xr=x->real, xi=x->imag;
- r->real = npy_sin@c@(xr)*npy_cosh@c@(xi);
- r->imag = npy_cos@c@(xr)*npy_sinh@c@(xi);
+ *r = npy_csin@c@(*x);
return;
}
static void
nc_sinh@c@(@ctype@ *x, @ctype@ *r)
{
- @ftype@ xr=x->real, xi=x->imag;
- r->real = npy_cos@c@(xi)*npy_sinh@c@(xr);
- r->imag = npy_sin@c@(xi)*npy_cosh@c@(xr);
+ *r = npy_csinh@c@(*x);
return;
}
static void
nc_tan@c@(@ctype@ *x, @ctype@ *r)
{
- @ftype@ sr,cr,shi,chi;
- @ftype@ rs,is,rc,ic;
- @ftype@ d;
- @ftype@ xr=x->real, xi=x->imag;
- sr = npy_sin@c@(xr);
- cr = npy_cos@c@(xr);
- shi = npy_sinh@c@(xi);
- chi = npy_cosh@c@(xi);
- rs = sr*chi;
- is = cr*shi;
- rc = cr*chi;
- ic = -sr*shi;
- d = rc*rc + ic*ic;
- r->real = (rs*rc+is*ic)/d;
- r->imag = (is*rc-rs*ic)/d;
- return;
+ *r = npy_ctan@c@(*x);
+ return;
}
static void
nc_tanh@c@(@ctype@ *x, @ctype@ *r)
{
- @ftype@ si,ci,shr,chr;
- @ftype@ rs,is,rc,ic;
- @ftype@ d;
- @ftype@ xr=x->real, xi=x->imag;
- si = npy_sin@c@(xi);
- ci = npy_cos@c@(xi);
- shr = npy_sinh@c@(xr);
- chr = npy_cosh@c@(xr);
- rs = ci*shr;
- is = si*chr;
- rc = ci*chr;
- ic = si*shr;
- d = rc*rc + ic*ic;
- r->real = (rs*rc+is*ic)/d;
- r->imag = (is*rc-rs*ic)/d;
+ *r = npy_ctanh@c@(*x);
return;
}
-#undef SERIES_HORNER_TERM@c@
-
/**end repeat**/
diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c
index ec28bb9e4..fe2e8cac3 100644
--- a/numpy/core/src/umath/ufunc_type_resolution.c
+++ b/numpy/core/src/umath/ufunc_type_resolution.c
@@ -1594,7 +1594,6 @@ set_ufunc_loop_data_types(PyUFuncObject *self, PyArrayObject **op,
else if (op[i] != NULL &&
PyArray_DESCR(op[i])->type_num == type_nums[i]) {
out_dtypes[i] = ensure_dtype_nbo(PyArray_DESCR(op[i]));
- Py_XINCREF(out_dtypes[i]);
}
/*
* For outputs, copy the dtype from op[0] if the type_num
@@ -1603,7 +1602,6 @@ set_ufunc_loop_data_types(PyUFuncObject *self, PyArrayObject **op,
else if (i >= nin && op[0] != NULL &&
PyArray_DESCR(op[0])->type_num == type_nums[i]) {
out_dtypes[i] = ensure_dtype_nbo(PyArray_DESCR(op[0]));
- Py_XINCREF(out_dtypes[i]);
}
/* Otherwise create a plain descr from the type number */
else {
diff --git a/numpy/core/src/umath/umath_tests.c.src b/numpy/core/src/umath/umath_tests.c.src
index 33d9846bd..509415711 100644
--- a/numpy/core/src/umath/umath_tests.c.src
+++ b/numpy/core/src/umath/umath_tests.c.src
@@ -202,12 +202,11 @@ static void
INIT_OUTER_LOOP_2
npy_intp len_n = *dimensions++;
npy_intp len_d = *dimensions++;
- npy_intp len_p = *dimensions;
npy_intp stride_n = *steps++;
npy_intp stride_d = *steps++;
npy_intp stride_p = *steps;
- assert(len_n * (len_n - 1) / 2 == len_p);
+ assert(len_n * (len_n - 1) / 2 == *dimensions);
BEGIN_OUTER_LOOP_2
const char *data_this = (const char *)args[0];
diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py
index b482bf230..9f0fb47e5 100644
--- a/numpy/core/tests/test_numeric.py
+++ b/numpy/core/tests/test_numeric.py
@@ -1574,6 +1574,11 @@ class TestAllclose(object):
assert_(allclose(a, a))
+ def test_equalnan(self):
+ x = np.array([1.0, np.nan])
+ assert_(allclose(x, x, equal_nan=True))
+
+
class TestIsclose(object):
rtol = 1e-5
atol = 1e-8
@@ -1664,17 +1669,25 @@ class TestIsclose(object):
assert_array_equal(isclose(arr, arr, equal_nan=True), [True, True])
def test_masked_arrays(self):
+ # Make sure to test the output type when arguments are interchanged.
+
x = np.ma.masked_where([True, True, False], np.arange(3))
assert_(type(x) is type(isclose(2, x)))
+ assert_(type(x) is type(isclose(x, 2)))
x = np.ma.masked_where([True, True, False], [nan, inf, nan])
assert_(type(x) is type(isclose(inf, x)))
+ assert_(type(x) is type(isclose(x, inf)))
x = np.ma.masked_where([True, True, False], [nan, nan, nan])
y = isclose(nan, x, equal_nan=True)
assert_(type(x) is type(y))
# Ensure that the mask isn't modified...
assert_array_equal([True, True, False], y.mask)
+ y = isclose(x, nan, equal_nan=True)
+ assert_(type(x) is type(y))
+ # Ensure that the mask isn't modified...
+ assert_array_equal([True, True, False], y.mask)
x = np.ma.masked_where([True, True, False], [nan, nan, nan])
y = isclose(x, x, equal_nan=True)
diff --git a/numpy/core/tests/test_records.py b/numpy/core/tests/test_records.py
index 355e5480a..a7895a30a 100644
--- a/numpy/core/tests/test_records.py
+++ b/numpy/core/tests/test_records.py
@@ -73,10 +73,27 @@ class TestFromrecords(TestCase):
assert_((mine.data2[i] == 0.0))
def test_recarray_from_repr(self):
- x = np.rec.array([ (1, 2)], dtype=[('a', np.int8), ('b', np.int8)])
- y = eval("np." + repr(x))
- assert_(isinstance(y, np.recarray))
- assert_equal(y, x)
+ a = np.array([(1,'ABC'), (2, "DEF")],
+ dtype=[('foo', int), ('bar', 'S4')])
+ recordarr = np.rec.array(a)
+ recarr = a.view(np.recarray)
+ recordview = a.view(np.dtype((np.record, a.dtype)))
+
+ recordarr_r = eval("numpy." + repr(recordarr), {'numpy': np})
+ recarr_r = eval("numpy." + repr(recarr), {'numpy': np})
+ recordview_r = eval("numpy." + repr(recordview), {'numpy': np})
+
+ assert_equal(type(recordarr_r), np.recarray)
+ assert_equal(recordarr_r.dtype.type, np.record)
+ assert_equal(recordarr, recordarr_r)
+
+ assert_equal(type(recarr_r), np.recarray)
+ assert_equal(recarr_r.dtype.type, np.void)
+ assert_equal(recarr, recarr_r)
+
+ assert_equal(type(recordview_r), np.ndarray)
+ assert_equal(recordview.dtype.type, np.record)
+ assert_equal(recordview, recordview_r)
def test_recarray_from_names(self):
ra = np.rec.array([
@@ -124,6 +141,28 @@ class TestFromrecords(TestCase):
assert_equal(a.b, ['a', 'bbb'])
assert_equal(a[-1].b, 'bbb')
+ def test_recarray_stringtypes(self):
+ # Issue #3993
+ a = np.array([('abc ', 1), ('abc', 2)],
+ dtype=[('foo', 'S4'), ('bar', int)])
+ a = a.view(np.recarray)
+ assert_equal(a.foo[0] == a.foo[1], False)
+
+ def test_recarray_returntypes(self):
+ a = np.rec.array([('abc ', (1,1), 1), ('abc', (2,3), 1)],
+ dtype=[('foo', 'S4'),
+ ('bar', [('A', int), ('B', int)]),
+ ('baz', int)])
+ assert_equal(type(a.foo), np.ndarray)
+ assert_equal(type(a['foo']), np.ndarray)
+ assert_equal(type(a.bar), np.recarray)
+ assert_equal(type(a['bar']), np.recarray)
+ assert_equal(a.bar.dtype.type, np.record)
+ assert_equal(type(a.baz), np.ndarray)
+ assert_equal(type(a['baz']), np.ndarray)
+ assert_equal(type(a[0].bar), np.record)
+ assert_equal(a[0].bar.A, 1)
+
class TestRecord(TestCase):
def setUp(self):
diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py
index c71b7b658..092953872 100644
--- a/numpy/core/tests/test_umath.py
+++ b/numpy/core/tests/test_umath.py
@@ -1363,45 +1363,53 @@ class TestComplexFunctions(object):
def test_branch_cuts(self):
# check branch cuts and continuity on them
- yield _check_branch_cut, np.log, -0.5, 1j, 1, -1
- yield _check_branch_cut, np.log2, -0.5, 1j, 1, -1
- yield _check_branch_cut, np.log10, -0.5, 1j, 1, -1
- yield _check_branch_cut, np.log1p, -1.5, 1j, 1, -1
- yield _check_branch_cut, np.sqrt, -0.5, 1j, 1, -1
+ yield _check_branch_cut, np.log, -0.5, 1j, 1, -1, True
+ yield _check_branch_cut, np.log2, -0.5, 1j, 1, -1, True
+ yield _check_branch_cut, np.log10, -0.5, 1j, 1, -1, True
+ yield _check_branch_cut, np.log1p, -1.5, 1j, 1, -1, True
+ yield _check_branch_cut, np.sqrt, -0.5, 1j, 1, -1, True
- yield _check_branch_cut, np.arcsin, [ -2, 2], [1j, -1j], 1, -1
- yield _check_branch_cut, np.arccos, [ -2, 2], [1j, -1j], 1, -1
- yield _check_branch_cut, np.arctan, [-2j, 2j], [1, -1 ], -1, 1
+ yield _check_branch_cut, np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True
+ yield _check_branch_cut, np.arccos, [ -2, 2], [1j, 1j], 1, -1, True
+ yield _check_branch_cut, np.arctan, [0-2j, 2j], [1, 1 ], -1, 1, True
- yield _check_branch_cut, np.arcsinh, [-2j, 2j], [-1, 1], -1, 1
- yield _check_branch_cut, np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1
- yield _check_branch_cut, np.arctanh, [ -2, 2], [1j, -1j], 1, -1
+ yield _check_branch_cut, np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True
+ yield _check_branch_cut, np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True
+ yield _check_branch_cut, np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True
# check against bogus branch cuts: assert continuity between quadrants
- yield _check_branch_cut, np.arcsin, [-2j, 2j], [ 1, 1], 1, 1
- yield _check_branch_cut, np.arccos, [-2j, 2j], [ 1, 1], 1, 1
+ yield _check_branch_cut, np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1
+ yield _check_branch_cut, np.arccos, [0-2j, 2j], [ 1, 1], 1, 1
yield _check_branch_cut, np.arctan, [ -2, 2], [1j, 1j], 1, 1
yield _check_branch_cut, np.arcsinh, [ -2, 2, 0], [1j, 1j, 1 ], 1, 1
- yield _check_branch_cut, np.arccosh, [-2j, 2j, 2], [1, 1, 1j], 1, 1
- yield _check_branch_cut, np.arctanh, [-2j, 2j, 0], [1, 1, 1j], 1, 1
+ yield _check_branch_cut, np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1
+ yield _check_branch_cut, np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1
- @dec.knownfailureif(True, "These branch cuts are known to fail")
- def test_branch_cuts_failing(self):
- # XXX: signed zero not OK with ICC on 64-bit platform for log, see
- # http://permalink.gmane.org/gmane.comp.python.numeric.general/25335
- yield _check_branch_cut, np.log, -0.5, 1j, 1, -1, True
- yield _check_branch_cut, np.log2, -0.5, 1j, 1, -1, True
- yield _check_branch_cut, np.log10, -0.5, 1j, 1, -1, True
- yield _check_branch_cut, np.log1p, -1.5, 1j, 1, -1, True
- # XXX: signed zeros are not OK for sqrt or for the arc* functions
- yield _check_branch_cut, np.sqrt, -0.5, 1j, 1, -1, True
- yield _check_branch_cut, np.arcsin, [ -2, 2], [1j, -1j], 1, -1, True
- yield _check_branch_cut, np.arccos, [ -2, 2], [1j, -1j], 1, -1, True
- yield _check_branch_cut, np.arctan, [-2j, 2j], [1, -1 ], -1, 1, True
- yield _check_branch_cut, np.arcsinh, [-2j, 2j], [-1, 1], -1, 1, True
- yield _check_branch_cut, np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True
- yield _check_branch_cut, np.arctanh, [ -2, 2], [1j, -1j], 1, -1, True
+ def test_branch_cuts_complex64(self):
+ # check branch cuts and continuity on them
+ yield _check_branch_cut, np.log, -0.5, 1j, 1, -1, True, np.complex64
+ yield _check_branch_cut, np.log2, -0.5, 1j, 1, -1, True, np.complex64
+ yield _check_branch_cut, np.log10, -0.5, 1j, 1, -1, True, np.complex64
+ yield _check_branch_cut, np.log1p, -1.5, 1j, 1, -1, True, np.complex64
+ yield _check_branch_cut, np.sqrt, -0.5, 1j, 1, -1, True, np.complex64
+
+ yield _check_branch_cut, np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64
+ yield _check_branch_cut, np.arccos, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64
+ yield _check_branch_cut, np.arctan, [0-2j, 2j], [1, 1 ], -1, 1, True, np.complex64
+
+ yield _check_branch_cut, np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64
+ yield _check_branch_cut, np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True, np.complex64
+ yield _check_branch_cut, np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64
+
+ # check against bogus branch cuts: assert continuity between quadrants
+ yield _check_branch_cut, np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64
+ yield _check_branch_cut, np.arccos, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64
+ yield _check_branch_cut, np.arctan, [ -2, 2], [1j, 1j], 1, 1, False, np.complex64
+
+ yield _check_branch_cut, np.arcsinh, [ -2, 2, 0], [1j, 1j, 1 ], 1, 1, False, np.complex64
+ yield _check_branch_cut, np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1, False, np.complex64
+ yield _check_branch_cut, np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1, False, np.complex64
def test_against_cmath(self):
import cmath, sys
@@ -1466,7 +1474,7 @@ class TestComplexFunctions(object):
# So, give more leeway for long complex tests here:
check(x_series, 50*eps)
else:
- check(x_series, 2*eps)
+ check(x_series, 2.1*eps)
check(x_basic, 2*eps/1e-3)
# Check a few points
@@ -1565,8 +1573,12 @@ def _check_branch_cut(f, x0, dx, re_sign=1, im_sign=-1, sig_zero_ok=False,
x0 = np.atleast_1d(x0).astype(dtype)
dx = np.atleast_1d(dx).astype(dtype)
- scale = np.finfo(dtype).eps * 1e3
- atol = 1e-4
+ if np.dtype(dtype).char == 'F':
+ scale = np.finfo(dtype).eps * 1e2
+ atol = np.float32(1e-2)
+ else:
+ scale = np.finfo(dtype).eps * 1e3
+ atol = 1e-4
y0 = f(x0)
yp = f(x0 + dx*scale*np.absolute(x0)/np.absolute(dx))
@@ -1581,16 +1593,20 @@ def _check_branch_cut(f, x0, dx, re_sign=1, im_sign=-1, sig_zero_ok=False,
# check that signed zeros also work as a displacement
jr = (x0.real == 0) & (dx.real != 0)
ji = (x0.imag == 0) & (dx.imag != 0)
-
- x = -x0
- x.real[jr] = 0.*dx.real
- x.imag[ji] = 0.*dx.imag
- x = -x
- ym = f(x)
- ym = ym[jr | ji]
- y0 = y0[jr | ji]
- assert_(np.all(np.absolute(y0.real - ym.real*re_sign) < atol), (y0, ym))
- assert_(np.all(np.absolute(y0.imag - ym.imag*im_sign) < atol), (y0, ym))
+ if np.any(jr):
+ x = x0[jr]
+ x.real = np.NZERO
+ ym = f(x)
+ assert_(np.all(np.absolute(y0[jr].real - ym.real*re_sign) < atol), (y0[jr], ym))
+ assert_(np.all(np.absolute(y0[jr].imag - ym.imag*im_sign) < atol), (y0[jr], ym))
+
+
+ if np.any(ji):
+ x = x0[ji]
+ x.imag = np.NZERO
+ ym = f(x)
+ assert_(np.all(np.absolute(y0[ji].real - ym.real*re_sign) < atol), (y0[ji], ym))
+ assert_(np.all(np.absolute(y0[ji].imag - ym.imag*im_sign) < atol), (y0[ji], ym))
def test_copysign():
assert_(np.copysign(1, -1) == -1)
diff --git a/numpy/distutils/fcompiler/intel.py b/numpy/distutils/fcompiler/intel.py
index a80e525e3..f76174c7a 100644
--- a/numpy/distutils/fcompiler/intel.py
+++ b/numpy/distutils/fcompiler/intel.py
@@ -152,7 +152,7 @@ class IntelVisualFCompiler(BaseIntelFCompiler):
module_include_switch = '/I'
def get_flags(self):
- opt = ['/nologo', '/MD', '/nbs', '/Qlowercase', '/us']
+ opt = ['/nologo', '/MD', '/nbs', '/names:lowercase', '/assume:underscore']
return opt
def get_flags_free(self):
diff --git a/numpy/distutils/lib2def.py b/numpy/distutils/lib2def.py
index 7316547a3..0a5364566 100644
--- a/numpy/distutils/lib2def.py
+++ b/numpy/distutils/lib2def.py
@@ -66,7 +66,7 @@ def getnm(nm_cmd = ['nm', '-Cs', 'python%s.lib' % py_ver]):
"""Returns the output of nm_cmd via a pipe.
nm_output = getnam(nm_cmd = 'nm -Cs py_lib')"""
- f = subprocess.Popen(nm_cmd, shell=True, stdout=subprocess.PIPE)
+ f = subprocess.Popen(nm_cmd, shell=True, stdout=subprocess.PIPE, universal_newlines=True)
nm_output = f.stdout.read()
f.stdout.close()
return nm_output
diff --git a/numpy/doc/structured_arrays.py b/numpy/doc/structured_arrays.py
index f2329827e..73bf3b317 100644
--- a/numpy/doc/structured_arrays.py
+++ b/numpy/doc/structured_arrays.py
@@ -268,6 +268,10 @@ array if the field has a structured type but as a plain ndarray otherwise. ::
>>> type(recordarr.bar)
<class 'numpy.core.records.recarray'>
+Note that if a field has the same name as an ndarray attribute, the ndarray
+attribute takes precedence. Such fields will be inaccessible by attribute but
+may still be accessed by index.
+
Partial Attribute Access
------------------------
@@ -288,7 +292,7 @@ such a view do not have field attributes::
To use the np.record dtype only, convert the dtype using the (base_class,
dtype) form described in numpy.dtype. This type of view is rarely used. ::
- >>> arr_records = arr.view(dtype(np.record, arr.dtype))
+ >>> arr_records = arr.view(dtype((np.record, arr.dtype)))
In documentation, the term 'structured array' will refer to objects of type
np.ndarray with structured dtype, 'record array' will refer to structured
diff --git a/numpy/fft/fftpack.c b/numpy/fft/fftpack.c
index 355076a30..277f49f07 100644
--- a/numpy/fft/fftpack.c
+++ b/numpy/fft/fftpack.c
@@ -1,18 +1,15 @@
/*
-fftpack.c : A set of FFT routines in C.
-Algorithmically based on Fortran-77 FFTPACK by Paul N. Swarztrauber (Version 4, 1985).
-
+ * fftpack.c : A set of FFT routines in C.
+ * Algorithmically based on Fortran-77 FFTPACK by Paul N. Swarztrauber (Version 4, 1985).
*/
+#define NPY_NO_DEPRECATED_API NPY_API_VERSION
-/* isign is +1 for backward and -1 for forward transforms */
-
+#include <Python.h>
#include <math.h>
#include <stdio.h>
-#include <Python.h>
#include <numpy/ndarraytypes.h>
#define DOUBLE
-
#ifdef DOUBLE
#define Treal double
#else
diff --git a/numpy/fft/fftpack_litemodule.c b/numpy/fft/fftpack_litemodule.c
index 7b37ce9b7..e895d0efe 100644
--- a/numpy/fft/fftpack_litemodule.c
+++ b/numpy/fft/fftpack_litemodule.c
@@ -149,7 +149,7 @@ fftpack_rfftf(PyObject *NPY_UNUSED(self), PyObject *args)
PyObject *op1, *op2;
PyArrayObject *data, *ret;
PyArray_Descr *descr;
- double *wsave, *dptr, *rptr;
+ double *wsave = NULL, *dptr, *rptr;
npy_intp nsave;
int npts, nrepeats, i, rstep;
@@ -166,6 +166,9 @@ fftpack_rfftf(PyObject *NPY_UNUSED(self), PyObject *args)
PyArray_DIMS(data)[PyArray_NDIM(data) - 1] = npts/2 + 1;
ret = (PyArrayObject *)PyArray_Zeros(PyArray_NDIM(data),
PyArray_DIMS(data), PyArray_DescrFromType(NPY_CDOUBLE), 0);
+ if (ret == NULL) {
+ goto fail;
+ }
PyArray_DIMS(data)[PyArray_NDIM(data) - 1] = npts;
rstep = PyArray_DIM(ret, PyArray_NDIM(ret) - 1)*2;
diff --git a/numpy/lib/_iotools.py b/numpy/lib/_iotools.py
index f2adcda10..316704b42 100644
--- a/numpy/lib/_iotools.py
+++ b/numpy/lib/_iotools.py
@@ -320,12 +320,13 @@ class NameValidator(object):
# Process the case option .....
if (case_sensitive is None) or (case_sensitive is True):
self.case_converter = lambda x: x
- elif (case_sensitive is False) or ('u' in case_sensitive):
+ elif (case_sensitive is False) or case_sensitive.startswith('u'):
self.case_converter = lambda x: x.upper()
- elif 'l' in case_sensitive:
+ elif case_sensitive.startswith('l'):
self.case_converter = lambda x: x.lower()
else:
- self.case_converter = lambda x: x
+ msg = 'unrecognized case_sensitive value %s.' % case_sensitive
+ raise ValueError(msg)
#
self.replace_space = replace_space
diff --git a/numpy/lib/arraysetops.py b/numpy/lib/arraysetops.py
index d3b6119f4..cb24eb24e 100644
--- a/numpy/lib/arraysetops.py
+++ b/numpy/lib/arraysetops.py
@@ -392,12 +392,13 @@ def in1d(ar1, ar2, assume_unique=False, invert=False):
else:
bool_ar = (sar[1:] == sar[:-1])
flag = np.concatenate((bool_ar, [invert]))
- indx = order.argsort(kind='mergesort')[:len(ar1)]
+ ret = np.empty(ar.shape, dtype=bool)
+ ret[order] = flag
if assume_unique:
- return flag[indx]
+ return ret[:len(ar1)]
else:
- return flag[indx][rev_idx]
+ return ret[rev_idx]
def union1d(ar1, ar2):
"""
diff --git a/numpy/lib/financial.py b/numpy/lib/financial.py
index baff8b0b6..a7e4e60b6 100644
--- a/numpy/lib/financial.py
+++ b/numpy/lib/financial.py
@@ -208,11 +208,11 @@ def pmt(rate, nper, pv, fv=0, when='end'):
"""
when = _convert_when(when)
(rate, nper, pv, fv, when) = map(np.asarray, [rate, nper, pv, fv, when])
- temp = (1+rate)**nper
- miter = np.broadcast(rate, nper, pv, fv, when)
- zer = np.zeros(miter.shape)
- fact = np.where(rate == zer, nper + zer,
- (1 + rate*when)*(temp - 1)/rate + zer)
+ temp = (1 + rate)**nper
+ mask = (rate == 0.0)
+ np.copyto(rate, 1.0, where=mask)
+ z = np.zeros(np.broadcast(rate, nper, pv, fv, when).shape)
+ fact = np.where(mask != z, nper + z, (1 + rate*when)*(temp - 1)/rate + z)
return -(fv + pv*temp) / fact
def nper(rate, pmt, pv, fv=0, when='end'):
diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py
index 9d539d6ac..0632ba1f8 100644
--- a/numpy/lib/npyio.py
+++ b/numpy/lib/npyio.py
@@ -621,6 +621,13 @@ def _savez(file, args, kwds, compress):
def _getconv(dtype):
""" Find the correct dtype converter. Adapted from matplotlib """
+
+ def floatconv(x):
+ x.lower()
+ if b'0x' in x:
+ return float.fromhex(asstr(x))
+ return float(x)
+
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
@@ -631,7 +638,7 @@ def _getconv(dtype):
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.floating):
- return float
+ return floatconv
elif issubclass(typ, np.complex):
return complex
elif issubclass(typ, np.bytes_):
@@ -706,6 +713,10 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
+ .. versionadded:: 1.10.0
+ The strings produced by the Python float.hex method can be used as
+ input for floats.
+
Examples
--------
>>> from StringIO import StringIO # StringIO behaves like a file object
@@ -1204,7 +1215,8 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
usecols=None, names=None,
excludelist=None, deletechars=None, replace_space='_',
autostrip=False, case_sensitive=True, defaultfmt="f%i",
- unpack=None, usemask=False, loose=True, invalid_raise=True):
+ unpack=None, usemask=False, loose=True, invalid_raise=True,
+ max_rows=None):
"""
Load data from a text file, with missing values handled as specified.
@@ -1285,6 +1297,12 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
+ max_rows : int, optional
+ The maximum number of rows to read. Must not be used with skip_footer
+ at the same time. If given, the value must be at least 1. Default is
+ to read the entire file.
+
+ .. versionadded:: 1.10.0
Returns
-------
@@ -1353,6 +1371,14 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
"""
+ if max_rows is not None:
+ if skip_footer:
+ raise ValueError(
+ "The keywords 'skip_footer' and 'max_rows' can not be "
+ "specified at the same time.")
+ if max_rows < 1:
+ raise ValueError("'max_rows' must be at least 1.")
+
# Py3 data conversions to bytes, for convenience
if comments is not None:
comments = asbytes(comments)
@@ -1451,7 +1477,11 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
names = validate_names(names)
# Get the dtype
if dtype is not None:
- dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names)
+ dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names,
+ excludelist=excludelist,
+ deletechars=deletechars,
+ case_sensitive=case_sensitive,
+ replace_space=replace_space)
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
@@ -1647,8 +1677,8 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
# Skip an empty line
if nbvalues == 0:
continue
- # Select only the columns we need
if usecols:
+ # Select only the columns we need
try:
values = [values[_] for _ in usecols]
except IndexError:
@@ -1661,7 +1691,10 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
- for (v, m) in zip(values, missing_values)]))
+ for (v, m) in zip(values,
+ missing_values)]))
+ if len(rows) == max_rows:
+ break
if own_fhd:
fhd.close()
diff --git a/numpy/lib/setup.py b/numpy/lib/setup.py
new file mode 100644
index 000000000..d342410b8
--- /dev/null
+++ b/numpy/lib/setup.py
@@ -0,0 +1,12 @@
+from __future__ import division, print_function
+
+def configuration(parent_package='',top_path=None):
+ from numpy.distutils.misc_util import Configuration
+
+ config = Configuration('lib', parent_package, top_path)
+ config.add_data_dir('tests')
+ return config
+
+if __name__ == '__main__':
+ from numpy.distutils.core import setup
+ setup(configuration=configuration)
diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py
index 70fa3ab03..2d18c5bc8 100644
--- a/numpy/lib/shape_base.py
+++ b/numpy/lib/shape_base.py
@@ -711,7 +711,7 @@ def kron(a, b):
Notes
-----
- The function assumes that the number of dimenensions of `a` and `b`
+ The function assumes that the number of dimensions of `a` and `b`
are the same, if necessary prepending the smallest with ones.
If `a.shape = (r0,r1,..,rN)` and `b.shape = (s0,s1,...,sN)`,
the Kronecker product has shape `(r0*s0, r1*s1, ..., rN*SN)`.
diff --git a/numpy/lib/tests/test__iotools.py b/numpy/lib/tests/test__iotools.py
index 92ca1c973..060f815d5 100644
--- a/numpy/lib/tests/test__iotools.py
+++ b/numpy/lib/tests/test__iotools.py
@@ -7,7 +7,8 @@ from datetime import date
import numpy as np
from numpy.compat import asbytes, asbytes_nested
from numpy.testing import (
- run_module_suite, TestCase, assert_, assert_equal, assert_allclose
+ run_module_suite, TestCase, assert_, assert_equal, assert_allclose,
+ assert_raises
)
from numpy.lib._iotools import (
LineSplitter, NameValidator, StringConverter,
@@ -93,6 +94,9 @@ class TestNameValidator(TestCase):
test = NameValidator(case_sensitive='lower').validate(names)
assert_equal(test, ['a', 'a_1', 'b', 'c'])
+ # check exceptions
+ assert_raises(ValueError, NameValidator, case_sensitive='foobar')
+
def test_excludelist(self):
"Test excludelist"
names = ['dates', 'data', 'Other Data', 'mask']
diff --git a/numpy/lib/tests/test_financial.py b/numpy/lib/tests/test_financial.py
index a4b9cfe2e..baa785424 100644
--- a/numpy/lib/tests/test_financial.py
+++ b/numpy/lib/tests/test_financial.py
@@ -2,7 +2,8 @@ from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import (
- run_module_suite, TestCase, assert_, assert_almost_equal
+ run_module_suite, TestCase, assert_, assert_almost_equal,
+ assert_allclose
)
@@ -13,35 +14,37 @@ class TestFinancial(TestCase):
def test_irr(self):
v = [-150000, 15000, 25000, 35000, 45000, 60000]
- assert_almost_equal(np.irr(v),
- 0.0524, 2)
+ assert_almost_equal(np.irr(v), 0.0524, 2)
v = [-100, 0, 0, 74]
- assert_almost_equal(np.irr(v),
- -0.0955, 2)
+ assert_almost_equal(np.irr(v), -0.0955, 2)
v = [-100, 39, 59, 55, 20]
- assert_almost_equal(np.irr(v),
- 0.28095, 2)
+ assert_almost_equal(np.irr(v), 0.28095, 2)
v = [-100, 100, 0, -7]
- assert_almost_equal(np.irr(v),
- -0.0833, 2)
+ assert_almost_equal(np.irr(v), -0.0833, 2)
v = [-100, 100, 0, 7]
- assert_almost_equal(np.irr(v),
- 0.06206, 2)
+ assert_almost_equal(np.irr(v), 0.06206, 2)
v = [-5, 10.5, 1, -8, 1]
- assert_almost_equal(np.irr(v),
- 0.0886, 2)
+ assert_almost_equal(np.irr(v), 0.0886, 2)
def test_pv(self):
- assert_almost_equal(np.pv(0.07, 20, 12000, 0),
- -127128.17, 2)
+ assert_almost_equal(np.pv(0.07, 20, 12000, 0), -127128.17, 2)
def test_fv(self):
- assert_almost_equal(np.fv(0.075, 20, -2000, 0, 0),
- 86609.36, 2)
+ assert_almost_equal(np.fv(0.075, 20, -2000, 0, 0), 86609.36, 2)
def test_pmt(self):
- assert_almost_equal(np.pmt(0.08/12, 5*12, 15000),
- -304.146, 3)
+ res = np.pmt(0.08/12, 5*12, 15000)
+ tgt = -304.145914
+ assert_allclose(res, tgt)
+ # Test the edge case where rate == 0.0
+ res = np.pmt(0.0, 5*12, 15000)
+ tgt = -250.0
+ assert_allclose(res, tgt)
+ # Test the case where we use broadcast and
+ # the arguments passed in are arrays.
+ res = np.pmt([[0.0, 0.8],[0.3, 0.8]],[12, 3],[2000, 20000])
+ tgt = np.array([[-166.66667, -19311.258],[-626.90814, -19311.258]])
+ assert_allclose(res, tgt)
def test_ppmt(self):
np.round(np.ppmt(0.1/12, 1, 60, 55000), 2) == 710.25
diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py
index 81bddfadd..7054ab1fe 100644
--- a/numpy/lib/tests/test_io.py
+++ b/numpy/lib/tests/test_io.py
@@ -694,6 +694,19 @@ class TestLoadTxt(TestCase):
res = np.loadtxt(c, dtype=np.int64)
assert_equal(res, tgt)
+ def test_from_float_hex(self):
+ # IEEE doubles and floats only, otherwise the float32
+ # conversion may fail.
+ tgt = np.logspace(-10, 10, 5).astype(np.float32)
+ tgt = np.hstack((tgt, -tgt)).astype(np.float)
+ inp = '\n'.join(map(float.hex, tgt))
+ c = TextIO()
+ c.write(inp)
+ for dt in [np.float, np.float32]:
+ c.seek(0)
+ res = np.loadtxt(c, dtype=dt)
+ assert_equal(res, tgt, err_msg="%s" % dt)
+
def test_universal_newline(self):
f, name = mkstemp()
os.write(f, b'1 21\r3 42\r')
@@ -1107,13 +1120,13 @@ M 33 21.99
def test_dtype_with_converters_and_usecols(self):
dstr = "1,5,-1,1:1\n2,8,-1,1:n\n3,3,-2,m:n\n"
dmap = {'1:1':0, '1:n':1, 'm:1':2, 'm:n':3}
- dtyp = [('E1','i4'),('E2','i4'),('E3','i2'),('N', 'i1')]
+ dtyp = [('e1','i4'),('e2','i4'),('e3','i2'),('n', 'i1')]
conv = {0: int, 1: int, 2: int, 3: lambda r: dmap[r.decode()]}
test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',',
names=None, converters=conv)
control = np.rec.array([[1,5,-1,0], [2,8,-1,1], [3,3,-2,3]], dtype=dtyp)
assert_equal(test, control)
- dtyp = [('E1','i4'),('E2','i4'),('N', 'i1')]
+ dtyp = [('e1','i4'),('e2','i4'),('n', 'i1')]
test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',',
usecols=(0,1,3), names=None, converters=conv)
control = np.rec.array([[1,5,0], [2,8,1], [3,3,3]], dtype=dtyp)
@@ -1514,6 +1527,30 @@ M 33 21.99
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
+ def test_replace_space_known_dtype(self):
+ "Test the 'replace_space' (and related) options when dtype != None"
+ txt = "A.A, B (B), C:C\n1, 2, 3"
+ # Test default: replace ' ' by '_' and delete non-alphanum chars
+ test = np.genfromtxt(TextIO(txt),
+ delimiter=",", names=True, dtype=int)
+ ctrl_dtype = [("AA", int), ("B_B", int), ("CC", int)]
+ ctrl = np.array((1, 2, 3), dtype=ctrl_dtype)
+ assert_equal(test, ctrl)
+ # Test: no replace, no delete
+ test = np.genfromtxt(TextIO(txt),
+ delimiter=",", names=True, dtype=int,
+ replace_space='', deletechars='')
+ ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", int)]
+ ctrl = np.array((1, 2, 3), dtype=ctrl_dtype)
+ assert_equal(test, ctrl)
+ # Test: no delete (spaces are replaced by _)
+ test = np.genfromtxt(TextIO(txt),
+ delimiter=",", names=True, dtype=int,
+ deletechars='')
+ ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", int)]
+ ctrl = np.array((1, 2, 3), dtype=ctrl_dtype)
+ assert_equal(test, ctrl)
+
def test_incomplete_names(self):
"Test w/ incomplete names"
data = "A,,C\n0,1,2\n3,4,5"
@@ -1641,6 +1678,60 @@ M 33 21.99
self.assertTrue(isinstance(test, np.recarray))
assert_equal(test, control)
+ def test_max_rows(self):
+ # Test the `max_rows` keyword argument.
+ data = '1 2\n3 4\n5 6\n7 8\n9 10\n'
+ txt = TextIO(data)
+ a1 = np.genfromtxt(txt, max_rows=3)
+ a2 = np.genfromtxt(txt)
+ assert_equal(a1, [[1, 2], [3, 4], [5, 6]])
+ assert_equal(a2, [[7, 8], [9, 10]])
+
+ # max_rows must be at least 1.
+ assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=0)
+
+ # An input with several invalid rows.
+ data = '1 1\n2 2\n0 \n3 3\n4 4\n5 \n6 \n7 \n'
+
+ test = np.genfromtxt(TextIO(data), max_rows=2)
+ control = np.array([[1., 1.], [2., 2.]])
+ assert_equal(test, control)
+
+ # Test keywords conflict
+ assert_raises(ValueError, np.genfromtxt, TextIO(data), skip_footer=1,
+ max_rows=4)
+
+ # Test with invalid value
+ assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=4)
+
+ # Test with invalid not raise
+ with warnings.catch_warnings():
+ warnings.filterwarnings("ignore")
+
+ test = np.genfromtxt(TextIO(data), max_rows=4, invalid_raise=False)
+ control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])
+ assert_equal(test, control)
+
+ test = np.genfromtxt(TextIO(data), max_rows=5, invalid_raise=False)
+ control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])
+ assert_equal(test, control)
+
+ # Structured array with field names.
+ data = 'a b\n#c d\n1 1\n2 2\n#0 \n3 3\n4 4\n5 5\n'
+
+ # Test with header, names and comments
+ txt = TextIO(data)
+ test = np.genfromtxt(txt, skip_header=1, max_rows=3, names=True)
+ control = np.array([(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)],
+ dtype=[('c', '<f8'), ('d', '<f8')])
+ assert_equal(test, control)
+ # To continue reading the same "file", don't use skip_header or
+ # names, and use the previously determined dtype.
+ test = np.genfromtxt(txt, max_rows=None, dtype=test.dtype)
+ control = np.array([(4.0, 4.0), (5.0, 5.0)],
+ dtype=[('c', '<f8'), ('d', '<f8')])
+ assert_equal(test, control)
+
def test_gft_using_filename(self):
# Test that we can load data from a filename as well as a file object
wanted = np.arange(6).reshape((2, 3))
diff --git a/numpy/lib/tests/test_twodim_base.py b/numpy/lib/tests/test_twodim_base.py
index 739061a5d..b65a8df97 100644
--- a/numpy/lib/tests/test_twodim_base.py
+++ b/numpy/lib/tests/test_twodim_base.py
@@ -265,6 +265,37 @@ class TestHistogram2d(TestCase):
a, edge1, edge2 = histogram2d([], [], bins=4)
assert_array_max_ulp(a, np.zeros((4, 4)))
+ def test_binparameter_combination(self):
+ x = array(
+ [0, 0.09207008, 0.64575234, 0.12875982, 0.47390599,
+ 0.59944483, 1])
+ y = array(
+ [0, 0.14344267, 0.48988575, 0.30558665, 0.44700682,
+ 0.15886423, 1])
+ edges = (0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1)
+ H, xe, ye = histogram2d(x, y, (edges, 4))
+ answer = array(
+ [[ 2., 0., 0., 0.],
+ [ 0., 1., 0., 0.],
+ [ 0., 0., 0., 0.],
+ [ 0., 0., 0., 0.],
+ [ 0., 1., 0., 0.],
+ [ 1., 0., 0., 0.],
+ [ 0., 1., 0., 0.],
+ [ 0., 0., 0., 0.],
+ [ 0., 0., 0., 0.],
+ [ 0., 0., 0., 1.]])
+ assert_array_equal(H, answer)
+ assert_array_equal(ye, array([0., 0.25, 0.5, 0.75, 1]))
+ H, xe, ye = histogram2d(x, y, (4, edges))
+ answer = array(
+ [[ 1., 1., 0., 1., 0., 0., 0., 0., 0., 0.],
+ [ 0., 0., 0., 0., 1., 0., 0., 0., 0., 0.],
+ [ 0., 1., 0., 0., 1., 0., 0., 0., 0., 0.],
+ [ 0., 0., 0., 0., 0., 0., 0., 0., 0., 1.]])
+ assert_array_equal(H, answer)
+ assert_array_equal(xe, array([0., 0.25, 0.5, 0.75, 1]))
+
class TestTri(TestCase):
def test_dtype(self):
diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py
index 24dc3cced..464ffd914 100644
--- a/numpy/lib/twodim_base.py
+++ b/numpy/lib/twodim_base.py
@@ -589,16 +589,18 @@ def histogram2d(x, y, bins=10, range=None, normed=False, weights=None):
y : array_like, shape (N,)
An array containing the y coordinates of the points to be
histogrammed.
- bins : int or [int, int] or array_like or [array, array], optional
+ bins : int or array_like or [int, int] or [array, array], optional
The bin specification:
* If int, the number of bins for the two dimensions (nx=ny=bins).
- * If [int, int], the number of bins in each dimension
- (nx, ny = bins).
* If array_like, the bin edges for the two dimensions
(x_edges=y_edges=bins).
+ * If [int, int], the number of bins in each dimension
+ (nx, ny = bins).
* If [array, array], the bin edges in each dimension
(x_edges, y_edges = bins).
+ * A combination [int, array] or [array, int], where int
+ is the number of bins and array is the bin edges.
range : array_like, shape(2,2), optional
The leftmost and rightmost edges of the bins along each dimension
diff --git a/numpy/ma/core.py b/numpy/ma/core.py
index 5d878363d..78f5fbc4d 100644
--- a/numpy/ma/core.py
+++ b/numpy/ma/core.py
@@ -3802,7 +3802,10 @@ class MaskedArray(ndarray):
else:
if m is not nomask:
self._mask += m
- ndarray.__iadd__(self._data, np.where(self._mask, 0, getdata(other)))
+ ndarray.__iadd__(
+ self._data,
+ np.where(self._mask, self.dtype.type(0), getdata(other))
+ )
return self
#....
def __isub__(self, other):
@@ -3814,7 +3817,10 @@ class MaskedArray(ndarray):
self._mask += m
elif m is not nomask:
self._mask += m
- ndarray.__isub__(self._data, np.where(self._mask, 0, getdata(other)))
+ ndarray.__isub__(
+ self._data,
+ np.where(self._mask, self.dtype.type(0), getdata(other))
+ )
return self
#....
def __imul__(self, other):
@@ -3826,7 +3832,10 @@ class MaskedArray(ndarray):
self._mask += m
elif m is not nomask:
self._mask += m
- ndarray.__imul__(self._data, np.where(self._mask, 1, getdata(other)))
+ ndarray.__imul__(
+ self._data,
+ np.where(self._mask, self.dtype.type(1), getdata(other))
+ )
return self
#....
def __idiv__(self, other):
@@ -3841,7 +3850,10 @@ class MaskedArray(ndarray):
other_data = np.where(dom_mask, fval, other_data)
# self._mask = mask_or(self._mask, new_mask)
self._mask |= new_mask
- ndarray.__idiv__(self._data, np.where(self._mask, 1, other_data))
+ ndarray.__idiv__(
+ self._data,
+ np.where(self._mask, self.dtype.type(1), other_data)
+ )
return self
#....
def __ifloordiv__(self, other):
@@ -3856,7 +3868,10 @@ class MaskedArray(ndarray):
other_data = np.where(dom_mask, fval, other_data)
# self._mask = mask_or(self._mask, new_mask)
self._mask |= new_mask
- ndarray.__ifloordiv__(self._data, np.where(self._mask, 1, other_data))
+ ndarray.__ifloordiv__(
+ self._data,
+ np.where(self._mask, self.dtype.type(1), other_data)
+ )
return self
#....
def __itruediv__(self, other):
@@ -3871,7 +3886,10 @@ class MaskedArray(ndarray):
other_data = np.where(dom_mask, fval, other_data)
# self._mask = mask_or(self._mask, new_mask)
self._mask |= new_mask
- ndarray.__itruediv__(self._data, np.where(self._mask, 1, other_data))
+ ndarray.__itruediv__(
+ self._data,
+ np.where(self._mask, self.dtype.type(1), other_data)
+ )
return self
#...
def __ipow__(self, other):
@@ -3879,7 +3897,10 @@ class MaskedArray(ndarray):
other_data = getdata(other)
other_mask = getmask(other)
with np.errstate(divide='ignore', invalid='ignore'):
- ndarray.__ipow__(self._data, np.where(self._mask, 1, other_data))
+ ndarray.__ipow__(
+ self._data,
+ np.where(self._mask, self.dtype.type(1), other_data)
+ )
invalid = np.logical_not(np.isfinite(self._data))
if invalid.any():
if self._mask is not nomask:
diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py
index 9adb9d368..b6082180a 100644
--- a/numpy/ma/extras.py
+++ b/numpy/ma/extras.py
@@ -497,8 +497,9 @@ def average(a, axis=None, weights=None, returned=False):
The average along the specified axis. When returned is `True`,
return a tuple with the average as the first element and the sum
of the weights as the second element. The return type is `np.float64`
- if `a` is of integer type, otherwise it is of the same type as `a`.
- If returned, `sum_of_weights` is of the same type as `average`.
+ if `a` is of integer type and floats smaller than `float64`, or the
+ input data-type, otherwise. If returned, `sum_of_weights` is always
+ `float64`.
Examples
--------
@@ -1920,12 +1921,12 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
m = mask_or(m, getmask(w))
if m is not nomask:
+ not_m = ~m
if w is not None:
- w = ~m*w
- else:
- w = ~m
-
- return np.polyfit(x, y, deg, rcond, full, w, cov)
+ w = w[not_m]
+ return np.polyfit(x[not_m], y[not_m], deg, rcond, full, w, cov)
+ else:
+ return np.polyfit(x, y, deg, rcond, full, w, cov)
polyfit.__doc__ = ma.doc_note(np.polyfit.__doc__, polyfit.__doc__)
diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py
index 975b7aceb..1d4462306 100644
--- a/numpy/ma/tests/test_core.py
+++ b/numpy/ma/tests/test_core.py
@@ -1723,6 +1723,13 @@ class TestMaskedArrayInPlaceArithmetics(TestCase):
xm[2] = masked
self.intdata = (x, y, xm)
self.floatdata = (x.astype(float), y.astype(float), xm.astype(float))
+ self.othertypes = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
+ self.othertypes = [np.dtype(_).type for _ in self.othertypes]
+ self.uint8data = (
+ x.astype(np.uint8),
+ y.astype(np.uint8),
+ xm.astype(np.uint8)
+ )
def test_inplace_addition_scalar(self):
# Test of inplace additions
@@ -1990,6 +1997,226 @@ class TestMaskedArrayInPlaceArithmetics(TestCase):
assert_equal(a, [[1, 1], [3, 3]])
assert_equal(a.mask, [[0, 1], [0, 1]])
+ def test_inplace_addition_scalar_type(self):
+ # Test of inplace additions
+ for t in self.othertypes:
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings("always")
+ (x, y, xm) = (_.astype(t) for _ in self.uint8data)
+ xm[2] = masked
+ x += t(1)
+ assert_equal(x, y + t(1))
+ xm += t(1)
+ assert_equal(xm, y + t(1))
+
+ assert_equal(len(w), 0, "Failed on type=%s." % t)
+
+ def test_inplace_addition_array_type(self):
+ # Test of inplace additions
+ for t in self.othertypes:
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings("always")
+ (x, y, xm) = (_.astype(t) for _ in self.uint8data)
+ m = xm.mask
+ a = arange(10, dtype=t)
+ a[-1] = masked
+ x += a
+ xm += a
+ assert_equal(x, y + a)
+ assert_equal(xm, y + a)
+ assert_equal(xm.mask, mask_or(m, a.mask))
+
+ assert_equal(len(w), 0, "Failed on type=%s." % t)
+
+ def test_inplace_subtraction_scalar_type(self):
+ # Test of inplace subtractions
+ for t in self.othertypes:
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings("always")
+ (x, y, xm) = (_.astype(t) for _ in self.uint8data)
+ x -= t(1)
+ assert_equal(x, y - t(1))
+ xm -= t(1)
+ assert_equal(xm, y - t(1))
+
+ assert_equal(len(w), 0, "Failed on type=%s." % t)
+
+
+ def test_inplace_subtraction_array_type(self):
+ # Test of inplace subtractions
+ for t in self.othertypes:
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings("always")
+ (x, y, xm) = (_.astype(t) for _ in self.uint8data)
+ m = xm.mask
+ a = arange(10, dtype=t)
+ a[-1] = masked
+ x -= a
+ xm -= a
+ assert_equal(x, y - a)
+ assert_equal(xm, y - a)
+ assert_equal(xm.mask, mask_or(m, a.mask))
+
+ assert_equal(len(w), 0, "Failed on type=%s." % t)
+
+ def test_inplace_multiplication_scalar_type(self):
+ # Test of inplace multiplication
+ for t in self.othertypes:
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings("always")
+ (x, y, xm) = (_.astype(t) for _ in self.uint8data)
+ x *= t(2)
+ assert_equal(x, y * t(2))
+ xm *= t(2)
+ assert_equal(xm, y * t(2))
+
+ assert_equal(len(w), 0, "Failed on type=%s." % t)
+
+ def test_inplace_multiplication_array_type(self):
+ # Test of inplace multiplication
+ for t in self.othertypes:
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings("always")
+ (x, y, xm) = (_.astype(t) for _ in self.uint8data)
+ m = xm.mask
+ a = arange(10, dtype=t)
+ a[-1] = masked
+ x *= a
+ xm *= a
+ assert_equal(x, y * a)
+ assert_equal(xm, y * a)
+ assert_equal(xm.mask, mask_or(m, a.mask))
+
+ assert_equal(len(w), 0, "Failed on type=%s." % t)
+
+ def test_inplace_floor_division_scalar_type(self):
+ # Test of inplace division
+ for t in self.othertypes:
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings("always")
+ (x, y, xm) = (_.astype(t) for _ in self.uint8data)
+ x = arange(10, dtype=t) * t(2)
+ xm = arange(10, dtype=t) * t(2)
+ xm[2] = masked
+ x //= t(2)
+ xm //= t(2)
+ assert_equal(x, y)
+ assert_equal(xm, y)
+
+ assert_equal(len(w), 0, "Failed on type=%s." % t)
+
+ def test_inplace_floor_division_array_type(self):
+ # Test of inplace division
+ for t in self.othertypes:
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings("always")
+ (x, y, xm) = (_.astype(t) for _ in self.uint8data)
+ m = xm.mask
+ a = arange(10, dtype=t)
+ a[-1] = masked
+ x //= a
+ xm //= a
+ assert_equal(x, y // a)
+ assert_equal(xm, y // a)
+ assert_equal(
+ xm.mask,
+ mask_or(mask_or(m, a.mask), (a == t(0)))
+ )
+
+ assert_equal(len(w), 0, "Failed on type=%s." % t)
+
+ def test_inplace_division_scalar_type(self):
+ # Test of inplace division
+ for t in self.othertypes:
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings("always")
+ (x, y, xm) = (_.astype(t) for _ in self.uint8data)
+ x = arange(10, dtype=t) * t(2)
+ xm = arange(10, dtype=t) * t(2)
+ xm[2] = masked
+
+ # May get a DeprecationWarning or a TypeError.
+ #
+ # This is a consequence of the fact that this is true divide
+ # and will require casting to float for calculation and
+ # casting back to the original type. This will only be raised
+ # with integers. Whether it is an error or warning is only
+ # dependent on how stringent the casting rules are.
+ #
+ # Will handle the same way.
+ try:
+ x /= t(2)
+ assert_equal(x, y)
+ except (DeprecationWarning, TypeError) as e:
+ warnings.warn(str(e))
+ try:
+ xm /= t(2)
+ assert_equal(xm, y)
+ except (DeprecationWarning, TypeError) as e:
+ warnings.warn(str(e))
+
+ if issubclass(t, np.integer):
+ assert_equal(len(w), 2, "Failed on type=%s." % t)
+ else:
+ assert_equal(len(w), 0, "Failed on type=%s." % t)
+
+ def test_inplace_division_array_type(self):
+ # Test of inplace division
+ for t in self.othertypes:
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings("always")
+ (x, y, xm) = (_.astype(t) for _ in self.uint8data)
+ m = xm.mask
+ a = arange(10, dtype=t)
+ a[-1] = masked
+
+ # May get a DeprecationWarning or a TypeError.
+ #
+ # This is a consequence of the fact that this is true divide
+ # and will require casting to float for calculation and
+ # casting back to the original type. This will only be raised
+ # with integers. Whether it is an error or warning is only
+ # dependent on how stringent the casting rules are.
+ #
+ # Will handle the same way.
+ try:
+ x /= a
+ assert_equal(x, y / a)
+ except (DeprecationWarning, TypeError) as e:
+ warnings.warn(str(e))
+ try:
+ xm /= a
+ assert_equal(xm, y / a)
+ assert_equal(
+ xm.mask,
+ mask_or(mask_or(m, a.mask), (a == t(0)))
+ )
+ except (DeprecationWarning, TypeError) as e:
+ warnings.warn(str(e))
+
+ if issubclass(t, np.integer):
+ assert_equal(len(w), 2, "Failed on type=%s." % t)
+ else:
+ assert_equal(len(w), 0, "Failed on type=%s." % t)
+
+ def test_inplace_pow_type(self):
+ # Test keeping data w/ (inplace) power
+ for t in self.othertypes:
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings("always")
+ # Test pow on scalar
+ x = array([1, 2, 3], mask=[0, 0, 1], dtype=t)
+ xx = x ** t(2)
+ xx_r = array([1, 2 ** 2, 3], mask=[0, 0, 1], dtype=t)
+ assert_equal(xx.data, xx_r.data)
+ assert_equal(xx.mask, xx_r.mask)
+ # Test ipow on scalar
+ x **= t(2)
+ assert_equal(x.data, xx_r.data)
+ assert_equal(x.mask, xx_r.mask)
+
+ assert_equal(len(w), 0, "Failed on type=%s." % t)
+
#------------------------------------------------------------------------------
class TestMaskedArrayMethods(TestCase):
diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py
index 8902d67dc..9e1e8ba38 100644
--- a/numpy/ma/tests/test_extras.py
+++ b/numpy/ma/tests/test_extras.py
@@ -734,6 +734,21 @@ class TestPolynomial(TestCase):
for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)):
assert_almost_equal(a, a_)
+ def test_polyfit_with_masked_NaNs(self):
+ x = np.random.rand(10)
+ y = np.random.rand(20).reshape(-1, 2)
+
+ x[0] = np.nan
+ y[-1,-1] = np.nan
+ x = x.view(MaskedArray)
+ y = y.view(MaskedArray)
+ x[0] = masked
+ y[-1,-1] = masked
+
+ (C, R, K, S, D) = polyfit(x, y, 3, full=True)
+ (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1,:], 3, full=True)
+ for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)):
+ assert_almost_equal(a, a_)
class TestArraySetOps(TestCase):
diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx
index ed582767a..703e9ec28 100644
--- a/numpy/random/mtrand/mtrand.pyx
+++ b/numpy/random/mtrand/mtrand.pyx
@@ -3324,10 +3324,10 @@ cdef class RandomState:
.. math:: P(x;scale) = \\frac{x}{scale^2}e^{\\frac{-x^2}{2 \\cdotp scale^2}}
- The Rayleigh distribution arises if the wind speed and wind direction are
- both gaussian variables, then the vector wind velocity forms a Rayleigh
- distribution. The Rayleigh distribution is used to model the expected
- output from wind turbines.
+ The Rayleigh distribution would arise, for example, if the East
+ and North components of the wind velocity had identical zero-mean
+ Gaussian distributions. Then the wind speed would have a Rayleigh
+ distribution.
References
----------
diff --git a/numpy/testing/utils.py b/numpy/testing/utils.py
index b9bdaeb87..1051288c2 100644
--- a/numpy/testing/utils.py
+++ b/numpy/testing/utils.py
@@ -1244,7 +1244,7 @@ def _assert_valid_refcount(op):
assert_(sys.getrefcount(i) >= rc)
-def assert_allclose(actual, desired, rtol=1e-7, atol=0,
+def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=False,
err_msg='', verbose=True):
"""
Raises an AssertionError if two objects are not equal up to desired
@@ -1266,6 +1266,8 @@ def assert_allclose(actual, desired, rtol=1e-7, atol=0,
Relative tolerance.
atol : float, optional
Absolute tolerance.
+ equal_nan : bool, optional.
+ If True, NaNs will compare equal.
err_msg : str, optional
The error message to be printed in case of failure.
verbose : bool, optional
@@ -1289,7 +1291,8 @@ def assert_allclose(actual, desired, rtol=1e-7, atol=0,
"""
import numpy as np
def compare(x, y):
- return np.core.numeric._allclose_points(x, y, rtol=rtol, atol=atol)
+ return np.core.numeric.isclose(x, y, rtol=rtol, atol=atol,
+ equal_nan=equal_nan)
actual, desired = np.asanyarray(actual), np.asanyarray(desired)
header = 'Not equal to tolerance rtol=%g, atol=%g' % (rtol, atol)