summaryrefslogtreecommitdiff
path: root/numpy
diff options
context:
space:
mode:
Diffstat (limited to 'numpy')
-rw-r--r--numpy/add_newdocs.py370
-rw-r--r--numpy/core/SConscript58
-rw-r--r--numpy/core/arrayprint.py15
-rw-r--r--numpy/core/code_generators/generate_numpy_api.py6
-rw-r--r--numpy/core/code_generators/generate_umath.py5
-rw-r--r--numpy/core/code_generators/numpy_api_order.txt5
-rw-r--r--numpy/core/code_generators/ufunc_docstrings.py350
-rw-r--r--numpy/core/defmatrix.py23
-rw-r--r--numpy/core/fromnumeric.py104
-rw-r--r--numpy/core/include/numpy/_neighborhood_iterator_imp.h90
-rw-r--r--numpy/core/include/numpy/ndarrayobject.h88
-rw-r--r--numpy/core/include/numpy/npy_cpu.h22
-rw-r--r--numpy/core/include/numpy/npy_endian.h36
-rw-r--r--numpy/core/include/numpy/npy_math.h79
-rw-r--r--numpy/core/include/numpy/numpyconfig.h.in4
-rw-r--r--numpy/core/memmap.py4
-rw-r--r--numpy/core/mlib.ini.in12
-rw-r--r--numpy/core/npymath.ini.in19
-rw-r--r--numpy/core/numeric.py307
-rw-r--r--numpy/core/numerictypes.py33
-rw-r--r--numpy/core/setup.py39
-rw-r--r--numpy/core/setup_common.py12
-rw-r--r--numpy/core/setupscons.py13
-rw-r--r--numpy/core/src/_sortmodule.c.src692
-rw-r--r--numpy/core/src/multiarray/arrayobject.c127
-rw-r--r--numpy/core/src/multiarray/arraytypes.c.src193
-rw-r--r--numpy/core/src/multiarray/ctors.c32
-rw-r--r--numpy/core/src/multiarray/descriptor.c108
-rw-r--r--numpy/core/src/multiarray/flagsobject.c103
-rw-r--r--numpy/core/src/multiarray/global.c3
-rw-r--r--numpy/core/src/multiarray/iterators.c629
-rw-r--r--numpy/core/src/multiarray/mapping.c113
-rw-r--r--numpy/core/src/multiarray/methods.c45
-rw-r--r--numpy/core/src/multiarray/multiarray_tests.c.src390
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c25
-rw-r--r--numpy/core/src/multiarray/scalartypes.c.src642
-rw-r--r--numpy/core/src/npymath/_signbit.c (renamed from numpy/core/src/_signbit.c)0
-rw-r--r--numpy/core/src/npymath/npy_math.c.src (renamed from numpy/core/src/npy_math.c.src)262
-rw-r--r--numpy/core/src/npymath/npy_math_private.h121
-rw-r--r--numpy/core/src/py3k_notes.txt197
-rw-r--r--numpy/core/src/umath/loops.c.src38
-rw-r--r--numpy/core/src/umath/loops.h9
-rw-r--r--numpy/core/src/umath/ufunc_object.c300
-rw-r--r--numpy/core/tests/test_defmatrix.py10
-rw-r--r--numpy/core/tests/test_multiarray.py324
-rw-r--r--numpy/core/tests/test_numeric.py43
-rw-r--r--numpy/core/tests/test_numerictypes.py4
-rw-r--r--numpy/core/tests/test_umath.py190
-rw-r--r--numpy/distutils/__init__.py1
-rw-r--r--numpy/distutils/command/__init__.py3
-rw-r--r--numpy/distutils/command/build_clib.py13
-rw-r--r--numpy/distutils/command/build_ext.py16
-rw-r--r--numpy/distutils/command/build_src.py79
-rw-r--r--numpy/distutils/command/config.py29
-rw-r--r--numpy/distutils/command/install.py3
-rw-r--r--numpy/distutils/command/install_clib.py37
-rw-r--r--numpy/distutils/command/scons.py58
-rw-r--r--numpy/distutils/conv_template.py14
-rw-r--r--numpy/distutils/core.py4
-rw-r--r--numpy/distutils/fcompiler/gnu.py34
-rw-r--r--numpy/distutils/fcompiler/intel.py2
-rw-r--r--numpy/distutils/misc_util.py562
-rw-r--r--numpy/distutils/npy_pkg_config.py306
-rw-r--r--numpy/distutils/numpy_distribution.py4
-rw-r--r--numpy/distutils/tests/test_npy_pkg_config.py96
-rw-r--r--numpy/doc/constants.py10
-rw-r--r--numpy/doc/creation.py2
-rw-r--r--numpy/doc/subclassing.py557
-rwxr-xr-xnumpy/f2py/crackfortran.py4
-rw-r--r--numpy/fft/fftpack.py2
-rw-r--r--numpy/lib/arraysetops.py305
-rw-r--r--numpy/lib/financial.py198
-rw-r--r--numpy/lib/function_base.py258
-rw-r--r--numpy/lib/index_tricks.py389
-rw-r--r--numpy/lib/info.py7
-rw-r--r--numpy/lib/io.py135
-rw-r--r--numpy/lib/scimath.py83
-rw-r--r--numpy/lib/shape_base.py49
-rw-r--r--numpy/lib/tests/test_arraysetops.py89
-rw-r--r--numpy/lib/tests/test_financial.py17
-rw-r--r--numpy/lib/tests/test_index_tricks.py60
-rw-r--r--numpy/lib/tests/test_io.py26
-rw-r--r--numpy/lib/tests/test_regression.py4
-rw-r--r--numpy/lib/tests/test_twodim_base.py146
-rw-r--r--numpy/lib/twodim_base.py281
-rw-r--r--numpy/lib/type_check.py28
-rw-r--r--numpy/lib/ufunclike.py6
-rw-r--r--numpy/lib/utils.py138
-rw-r--r--numpy/linalg/linalg.py4
-rw-r--r--numpy/ma/core.py228
-rw-r--r--numpy/ma/extras.py295
-rw-r--r--numpy/ma/tests/test_core.py38
-rw-r--r--numpy/ma/tests/test_extras.py144
-rw-r--r--numpy/testing/decorators.py113
-rw-r--r--numpy/testing/nosetester.py17
-rw-r--r--numpy/testing/tests/test_utils.py141
-rw-r--r--numpy/testing/utils.py201
97 files changed, 8990 insertions, 2540 deletions
diff --git a/numpy/add_newdocs.py b/numpy/add_newdocs.py
index 121b8088f..d588cbba0 100644
--- a/numpy/add_newdocs.py
+++ b/numpy/add_newdocs.py
@@ -747,12 +747,19 @@ add_newdoc('numpy.core.multiarray', 'set_string_function',
Parameters
----------
- f : Python function
+ f : function or None
Function to be used to pretty print arrays. The function should expect
a single array argument and return a string of the representation of
- the array.
- repr : int
- Unknown.
+ the array. If None, the function is reset to the default NumPy function
+ to print arrays.
+ repr : bool, optional
+ If True (default), the function for pretty printing (``__repr__``)
+ is set, if False the function that returns the default string
+ representation (``__str__``) is set.
+
+ See Also
+ --------
+ set_printoptions, get_printoptions
Examples
--------
@@ -766,6 +773,24 @@ add_newdoc('numpy.core.multiarray', 'set_string_function',
>>> print a
[0 1 2 3 4 5 6 7 8 9]
+ We can reset the function to the default:
+
+ >>> np.set_string_function(None)
+ >>> a
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 'l')
+
+ `repr` affects either pretty printing or normal string representation.
+ Note that ``__repr__`` is still affected by setting ``__str__``
+ because the width of each array element in the returned string becomes
+ equal to the length of the result of ``__str__()``.
+
+ >>> x = np.arange(4)
+ >>> np.set_string_function(lambda x:'random', repr=False)
+ >>> x.__str__()
+ 'random'
+ >>> x.__repr__()
+ 'array([ 0, 1, 2, 3])'
+
""")
add_newdoc('numpy.core.multiarray', 'set_numeric_ops',
@@ -973,12 +998,37 @@ add_newdoc('numpy.core.multiarray','newbuffer',
""")
-add_newdoc('numpy.core.multiarray','getbuffer',
- """getbuffer(obj [,offset[, size]])
+add_newdoc('numpy.core.multiarray', 'getbuffer',
+ """
+ getbuffer(obj [,offset[, size]])
Create a buffer object from the given object referencing a slice of
- length size starting at offset. Default is the entire buffer. A
- read-write buffer is attempted followed by a read-only buffer.
+ length size starting at offset.
+
+ Default is the entire buffer. A read-write buffer is attempted followed
+ by a read-only buffer.
+
+ Parameters
+ ----------
+ obj : object
+
+ offset : int, optional
+
+ size : int, optional
+
+ Returns
+ -------
+ buffer_obj : buffer
+
+ Examples
+ --------
+ >>> buf = np.getbuffer(np.ones(5), 1, 3)
+ >>> len(buf)
+ 3
+ >>> buf[0]
+ '\\x00'
+ >>> buf
+ <read-write buffer for 0x8af1e70, size 3, offset 1 at 0x8ba4ec0>
""")
@@ -1595,8 +1645,14 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('__array__',
"""))
+add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_prepare__',
+ """a.__array_prepare__(obj) -> Object of same type as ndarray object obj.
+
+ """))
+
+
add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_wrap__',
- """a.__array_wrap__(obj) -> Object of same type as a from ndarray obj.
+ """a.__array_wrap__(obj) -> Object of same type as ndarray object a.
"""))
@@ -2668,6 +2724,21 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('view',
type : python type
Type of the returned view, e.g. ndarray or matrix.
+
+ Notes
+ -----
+
+ `a.view()` is used two different ways.
+
+ `a.view(some_dtype)` or `a.view(dtype=some_dtype)` constructs a view of
+ the array's memory with a different dtype. This can cause a
+ reinterpretation of the bytes of memory.
+
+ `a.view(ndarray_subclass)`, or `a.view(type=ndarray_subclass)`, just
+ returns an instance of ndarray_subclass that looks at the same array (same
+ shape, dtype, etc.). This does not cause a reinterpretation of the memory.
+
+
Examples
--------
>>> x = np.array([(1, 2)], dtype=[('a', np.int8), ('b', np.int8)])
@@ -2675,12 +2746,27 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('view',
Viewing array data using a different type and dtype:
>>> y = x.view(dtype=np.int16, type=np.matrix)
- >>> print y.dtype
- int16
-
+ >>> y
+ matrix([[513]], dtype=int16)
>>> print type(y)
<class 'numpy.core.defmatrix.matrix'>
+ Creating a view on a structured array so it can be used in calculations
+
+ >>> x = np.array([(1, 2),(3,4)], dtype=[('a', np.int8), ('b', np.int8)])
+ >>> xv = x.view(dtype=np.int8).reshape(-1,2)
+ >>> xv
+ array([[1, 2],
+ [3, 4]], dtype=int8)
+ >>> xv.mean(0)
+ array([ 2., 3.])
+
+ Making changes to the view changes the underlying array
+
+ >>> xv[0,1] = 20
+ >>> print x
+ [(1, 20) (3, 4)]
+
Using a view to convert an array to a record array:
>>> z = x.view(np.recarray)
@@ -2704,9 +2790,9 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('view',
add_newdoc('numpy.core.umath', 'frexp',
"""
- Return normalized fraction and exponent of 2, element-wise of input array.
+ Return normalized fraction and exponent of 2 of input array, element-wise.
- Returns (`out1`, `out2`) from equation `x` = `out1` * ( 2 ** `out2` )
+ Returns (`out1`, `out2`) from equation ``x` = out1 * 2**out2``.
Parameters
----------
@@ -2715,19 +2801,29 @@ add_newdoc('numpy.core.umath', 'frexp',
Returns
-------
- (out1, out2) : tuple of ndarray, (float, int)
- The `out1` ndarray is a float array with numbers between -1 and 1.
- The `out2` array is an int array represent the exponent of 2.
+ (out1, out2) : tuple of ndarrays, (float, int)
+ `out1` is a float array with values between -1 and 1.
+ `out2` is an int array which represent the exponent of 2.
+
+ See Also
+ --------
+ ldexp : Compute ``y = x1 * 2**x2``, the inverse of `frexp`.
+
+ Notes
+ -----
+ Complex dtypes are not supported, they will raise a TypeError.
Examples
--------
- >>> y1,y2 = np.frexp([3.4, 5.7, 1, 10, -100, 0])
+ >>> x = np.arange(9)
+ >>> y1, y2 = np.frexp(x)
>>> y1
- array([ 0.85 , 0.7125 , 0.5 , 0.625 , -0.78125, 0. ])
+ array([ 0. , 0.5 , 0.5 , 0.75 , 0.5 , 0.625, 0.75 , 0.875,
+ 0.5 ])
>>> y2
- array([2, 3, 1, 4, 7, 0], dtype=int32)
+ array([0, 1, 2, 2, 3, 3, 3, 3, 4])
>>> y1 * 2**y2
- array([ 3.4, 5.7, 1. , 10. , -100. , 0. ])
+ array([ 0., 1., 2., 3., 4., 5., 6., 7., 8.])
""")
@@ -2764,31 +2860,97 @@ add_newdoc('numpy.core.umath', 'ldexp',
Parameters
----------
x1 : array_like
- The significand.
+ The array of multipliers.
x2 : array_like
- The exponent.
+ The array of exponents.
Returns
-------
y : array_like
- y = x1 * 2**x2
+ The output array, the result of ``x1 * 2**x2``.
+
+ See Also
+ --------
+ frexp : Return (y1, y2) from ``x = y1 * 2**y2``, the inverse of `ldexp`.
+
+ Notes
+ -----
+ Complex dtypes are not supported, they will raise a TypeError.
+
+ `ldexp` is useful as the inverse of `frexp`, if used by itself it is
+ more clear to simply use the expression ``x1 * 2**x2``.
Examples
--------
- >>> np.ldexp(5., 2)
- 20.
+ >>> np.ldexp(5, np.arange(4))
+ array([ 5., 10., 20., 40.], dtype=float32)
+
+ >>> x = np.arange(6)
+ >>> np.ldexp(*np.frexp(x))
+ array([ 0., 1., 2., 3., 4., 5.])
""")
-add_newdoc('numpy.core.umath','geterrobj',
- """geterrobj()
+add_newdoc('numpy.core.umath', 'geterrobj',
+ """
+ geterrobj()
- Used internally by `geterr`.
+ Return the current object that defines floating-point error handling.
+
+ The error object contains all information that defines the error handling
+ behavior in Numpy. `geterrobj` is used internally by the other
+ functions that get and set error handling behavior (`geterr`, `seterr`,
+ `geterrcall`, `seterrcall`).
Returns
-------
errobj : list
- Internal numpy buffer size, error mask, error callback function.
+ The error object, a list containing three elements:
+ [internal numpy buffer size, error mask, error callback function].
+
+ The error mask is a single integer that holds the treatment information
+ on all four floating point errors. If we print it in base 8, we can see
+ what treatment is set for "invalid", "under", "over", and "divide" (in
+ that order). The printed string can be interpreted with
+
+ * 0 : 'ignore'
+ * 1 : 'warn'
+ * 2 : 'raise'
+ * 3 : 'call'
+ * 4 : 'print'
+ * 5 : 'log'
+
+ See Also
+ --------
+ seterrobj, seterr, geterr, seterrcall, geterrcall
+ getbufsize, setbufsize
+
+ Notes
+ -----
+ For complete documentation of the types of floating-point exceptions and
+ treatment options, see `seterr`.
+
+ Examples
+ --------
+ >>> np.geterrobj() # first get the defaults
+ [10000, 0, None]
+
+ >>> def err_handler(type, flag):
+ ... print "Floating point error (%s), with flag %s" % (type, flag)
+ ...
+ >>> old_bufsize = np.setbufsize(20000)
+ >>> old_err = np.seterr(divide='raise')
+ >>> old_handler = np.seterrcall(err_handler)
+ >>> np.geterrobj()
+ [20000, 2, <function err_handler at 0x91dcaac>]
+
+ >>> old_err = np.seterr(all='ignore')
+ >>> np.base_repr(np.geterrobj()[1], 8)
+ '0'
+ >>> old_err = np.seterr(divide='warn', over='log', under='call',
+ invalid='print')
+ >>> np.base_repr(np.geterrobj()[1], 8)
+ '4351'
""")
@@ -2796,16 +2958,57 @@ add_newdoc('numpy.core.umath', 'seterrobj',
"""
seterrobj(errobj)
- Used internally by `seterr`.
+ Set the object that defines floating-point error handling.
+
+ The error object contains all information that defines the error handling
+ behavior in Numpy. `seterrobj` is used internally by the other
+ functions that set error handling behavior (`seterr`, `seterrcall`).
Parameters
----------
errobj : list
- [buffer_size, error_mask, callback_func]
+ The error object, a list containing three elements:
+ [internal numpy buffer size, error mask, error callback function].
+
+ The error mask is a single integer that holds the treatment information
+ on all four floating point errors. If we print it in base 8, we can see
+ what treatment is set for "invalid", "under", "over", and "divide" (in
+ that order). The printed string can be interpreted with
+
+ * 0 : 'ignore'
+ * 1 : 'warn'
+ * 2 : 'raise'
+ * 3 : 'call'
+ * 4 : 'print'
+ * 5 : 'log'
See Also
--------
- seterrcall
+ geterrobj, seterr, geterr, seterrcall, geterrcall
+ getbufsize, setbufsize
+
+ Notes
+ -----
+ For complete documentation of the types of floating-point exceptions and
+ treatment options, see `seterr`.
+
+ Examples
+ --------
+ >>> old_errobj = np.geterrobj() # first get the defaults
+ >>> old_errobj
+ [10000, 0, None]
+
+ >>> def err_handler(type, flag):
+ ... print "Floating point error (%s), with flag %s" % (type, flag)
+ ...
+ >>> new_errobj = [20000, 12, err_handler]
+ >>> np.seterrobj(new_errobj)
+ >>> np.base_repr(12, 8) # int for divide=4 ('print') and over=1 ('warn')
+ '14'
+ >>> np.geterr()
+ {'over': 'warn', 'divide': 'print', 'invalid': 'ignore', 'under': 'ignore'}
+ >>> np.geterrcall()
+ <function err_handler at 0xb75e9304>
""")
@@ -2822,32 +3025,49 @@ add_newdoc('numpy.lib._compiled_base', 'digitize',
Return the indices of the bins to which each value in input array belongs.
- Each index returned is such that `bins[i-1]` <= `x` < `bins[i]` if `bins`
- is monotonically increasing, or `bins[i-1]` > `x` >= `bins[i]` if `bins`
- is monotonically decreasing. Beyond the bounds of `bins`, 0 or len(`bins`)
- is returned as appropriate.
+ Each index ``i`` returned is such that ``bins[i-1] <= x < bins[i]`` if
+ `bins` is monotonically increasing, or ``bins[i-1] > x >= bins[i]`` if
+ `bins` is monotonically decreasing. If values in `x` are beyond the
+ bounds of `bins`, 0 or ``len(bins)`` is returned as appropriate.
Parameters
----------
x : array_like
- Input array to be binned.
+ Input array to be binned. It has to be 1-dimensional.
bins : array_like
- Array of bins.
+ Array of bins. It has to be 1-dimensional and monotonic.
Returns
-------
- out : ndarray
- Output array of indices of same shape as `x`.
+ out : ndarray of ints
+ Output array of indices, of same shape as `x`.
+
+ Raises
+ ------
+ ValueError
+ If the input is not 1-dimensional, or if `bins` is not monotonic.
+ TypeError
+ If the type of the input is complex.
+
+ See Also
+ --------
+ bincount, histogram, unique
+
+ Notes
+ -----
+ If values in `x` are such that they fall outside the bin range,
+ attempting to index `bins` with the indices that `digitize` returns
+ will result in an IndexError.
Examples
--------
>>> x = np.array([0.2, 6.4, 3.0, 1.6])
>>> bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0])
- >>> d = np.digitize(x,bins)
- >>> d
+ >>> inds = np.digitize(x, bins)
+ >>> inds
array([1, 4, 3, 2])
- >>> for n in range(len(x)):
- ... print bins[d[n]-1], "<=", x[n], "<", bins[d[n]]
+ >>> for n in range(x.size):
+ ... print bins[inds[n]-1], "<=", x[n], "<", bins[inds[n]]
...
0.0 <= 0.2 < 1.0
4.0 <= 6.4 < 10.0
@@ -2860,24 +3080,54 @@ add_newdoc('numpy.lib._compiled_base', 'bincount',
"""
bincount(x, weights=None)
- Return the number of occurrences of each value in array of nonnegative
- integers.
+ Count number of occurrences of each value in array of non-negative ints.
- The output, b[i], represents the number of times that i is found in `x`.
- If `weights` is specified, every occurrence of i at a position p
- contributes `weights` [p] instead of 1.
+ The number of bins (of size 1) is one larger than the largest value in
+ `x`. Each bin gives the number of occurrences of its index value in `x`.
+ If `weights` is specified the input array is weighted by it, i.e. if a
+ value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead
+ of ``out[n] += 1``.
Parameters
----------
- x : array_like, 1 dimension, nonnegative integers
- Input array.
- weights : array_like, same shape as `x`, optional
- Weights.
+ x : array_like, 1 dimension, nonnegative ints
+ Input array. The length of `x` is equal to ``np.amax(x)+1``.
+ weights : array_like, optional
+ Weights, array of the same shape as `x`.
+
+ Returns
+ -------
+ out : ndarray of ints
+ The result of binning the input array.
+
+ Raises
+ ------
+ ValueError
+ If the input is not 1-dimensional, or contains elements with negative
+ values.
+ TypeError
+ If the type of the input is float or complex.
See Also
--------
histogram, digitize, unique
+ Examples
+ --------
+ >>> np.bincount(np.arange(5))
+ array([1, 1, 1, 1, 1])
+ >>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7]))
+ array([1, 3, 1, 1, 0, 0, 0, 1])
+
+ >>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23])
+ >>> np.bincount(x).size == np.amax(x)+1
+ True
+
+ >>> np.bincount(np.arange(5, dtype=np.float))
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in <module>
+ TypeError: array cannot be safely cast to required type
+
""")
add_newdoc('numpy.lib._compiled_base', 'add_docstring',
@@ -3440,6 +3690,8 @@ add_newdoc('numpy.core.multiarray', 'dtype', ('isbuiltin',
"""
Integer indicating how this dtype relates to the built-in dtypes.
+ Read-only.
+
= ========================================================================
0 if this is a structured array type, with fields
1 if this is a dtype compiled into numpy (such as ints, floats etc)
@@ -3642,7 +3894,7 @@ add_newdoc('numpy.lib.index_tricks', 'mgrid',
Examples
--------
- >>> mgrid[0:5,0:5]
+ >>> np.mgrid[0:5,0:5]
array([[[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[2, 2, 2, 2, 2],
@@ -3653,7 +3905,7 @@ add_newdoc('numpy.lib.index_tricks', 'mgrid',
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4],
[0, 1, 2, 3, 4]]])
- >>> mgrid[-1:1:5j]
+ >>> np.mgrid[-1:1:5j]
array([-1. , -0.5, 0. , 0.5, 1. ])
""")
@@ -3697,7 +3949,7 @@ add_newdoc('numpy.lib.index_tricks', 'ogrid',
""")
-
+
##############################################################################
#
# Documentation for `generic` attributes and methods
@@ -3709,7 +3961,7 @@ add_newdoc('numpy.core.numerictypes', 'generic',
""")
# Attributes
-
+
add_newdoc('numpy.core.numerictypes', 'generic', ('T',
"""
"""))
@@ -4000,7 +4252,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('var',
add_newdoc('numpy.core.numerictypes', 'generic', ('view',
"""
"""))
-
+
##############################################################################
#
diff --git a/numpy/core/SConscript b/numpy/core/SConscript
index f09b17618..fffbee5af 100644
--- a/numpy/core/SConscript
+++ b/numpy/core/SConscript
@@ -120,10 +120,9 @@ numpyconfig_sym.append(('NPY_NO_SMP', nosmp))
# Check whether we can use C99 printing formats
#----------------------------------------------
if config.CheckDeclaration(('PRIdPTR'), includes = '#include <inttypes.h>'):
- usec99 = 1
+ numpyconfig_sym.append(('DEFINE_NPY_USE_C99_FORMATS', '#define NPY_USE_C99_FORMATS 1'))
else:
- usec99 = 0
-numpyconfig_sym.append(('USE_C99_FORMATS', usec99))
+ numpyconfig_sym.append(('DEFINE_NPY_USE_C99_FORMATS', ''))
#----------------------
# Checking the mathlib
@@ -187,9 +186,10 @@ for f in ["isnan", "isinf", "signbit", "isfinite"]:
"""
st = config.CheckDeclaration(f, includes=includes)
if st:
- numpyconfig_sym.append(('DEFINE_NPY_HAVE_DECL_%s' % f.upper(),
- '#define NPY_HAVE_DECL_%s' % f.upper()))
-
+ numpyconfig_sym.append(('DEFINE_NPY_HAVE_DECL_%s' % f.upper(),
+ '#define NPY_HAVE_DECL_%s' % f.upper()))
+ else:
+ numpyconfig_sym.append(('DEFINE_NPY_HAVE_DECL_%s' % f.upper(), ''))
inline = config.CheckInline()
config.Define('inline', inline)
@@ -198,7 +198,9 @@ numpyconfig_sym.append(('NPY_INLINE', inline))
if ENABLE_SEPARATE_COMPILATION:
config.Define("ENABLE_SEPARATE_COMPILATION", 1)
- numpyconfig_sym.append(('NPY_ENABLE_SEPARATE_COMPILATION', 1))
+ numpyconfig_sym.append(('DEFINE_NPY_ENABLE_SEPARATE_COMPILATION', '#define NPY_ENABLE_SEPARATE_COMPILATION 1'))
+else:
+ numpyconfig_sym.append(('DEFINE_NPY_ENABLE_SEPARATE_COMPILATION', ''))
# Checking for visibility macro
def visibility_define():
@@ -262,6 +264,10 @@ write_info(env)
# Build
#==========
+# List of headers which need to be "installed " into the build directory for
+# proper in-place build support
+generated_headers = []
+
#---------------------------------------
# Generate the public configuration file
#---------------------------------------
@@ -272,7 +278,9 @@ for key, value in numpyconfig_sym:
env['SUBST_DICT'] = config_dict
include_dir = 'include/numpy'
-env.SubstInFile(pjoin(include_dir, 'numpyconfig.h'), pjoin(include_dir, 'numpyconfig.h.in'))
+target = env.SubstInFile(pjoin(include_dir, 'numpyconfig.h'),
+ pjoin(include_dir, 'numpyconfig.h.in'))
+generated_headers.append(target[0])
env['CONFIG_H_GEN'] = numpyconfig_sym
@@ -298,26 +306,45 @@ umathmodule_src = env.GenerateFromTemplate(pjoin('src', 'umath',
'umathmodule.c.src'))
umath_tests_src = env.GenerateFromTemplate(pjoin('src', 'umath',
'umath_tests.c.src'))
+multiarray_tests_src = env.GenerateFromTemplate(pjoin('src', 'multiarray',
+ 'multiarray_tests.c.src'))
scalarmathmodule_src = env.GenerateFromTemplate(
pjoin('src', 'scalarmathmodule.c.src'))
umath = env.GenerateUmath('__umath_generated',
pjoin('code_generators', 'generate_umath.py'))
-multiarray_api = env.GenerateMultiarrayApi('multiarray_api',
+multiarray_api = env.GenerateMultiarrayApi('include/numpy/multiarray_api',
[ pjoin('code_generators', 'numpy_api_order.txt')])
+generated_headers.append(multiarray_api[0])
-ufunc_api = env.GenerateUfuncApi('ufunc_api',
+ufunc_api = env.GenerateUfuncApi('include/numpy/ufunc_api',
pjoin('code_generators', 'ufunc_api_order.txt'))
+generated_headers.append(ufunc_api[0])
-env.Prepend(CPPPATH = ['include', '.'])
+# include/numpy is added for compatibility reasons with distutils: this is
+# needed for __multiarray_api.c and __ufunc_api.c included from multiarray and
+# ufunc.
+env.Prepend(CPPPATH = ['include', '.', 'include/numpy'])
# npymath core lib
-npymath_src = env.GenerateFromTemplate(pjoin('src', 'npy_math.c.src'))
-env.DistutilsStaticExtLibrary("npymath", npymath_src)
+npymath_src = env.GenerateFromTemplate(pjoin('src', 'npymath', 'npy_math.c.src'))
+env.DistutilsInstalledStaticExtLibrary("npymath", npymath_src, install_dir='lib')
env.Prepend(LIBS=["npymath"])
env.Prepend(LIBPATH=["."])
+subst_dict = {'@prefix@': '$distutils_install_prefix',
+ '@sep@': repr(os.path.sep)}
+npymath_ini = env.SubstInFile(pjoin('lib', 'npy-pkg-config', 'npymath.ini'),
+ 'npymath.ini.in', SUBST_DICT=subst_dict)
+
+subst_dict = {'@posix_mathlib@': " ".join(['-l%s' % l for l in mlib]),
+ '@msvc_mathlib@': " ".join(['%s.mlib' % l for l in mlib])}
+mlib_ini = env.SubstInFile(pjoin('lib', 'npy-pkg-config', 'mlib.ini'),
+ 'mlib.ini.in', SUBST_DICT=subst_dict)
+env.Install('$distutils_installdir/lib/npy-pkg-config', mlib_ini)
+env.Install('$distutils_installdir/lib/npy-pkg-config', npymath_ini)
+
#-----------------
# Build multiarray
#-----------------
@@ -353,6 +380,7 @@ if ENABLE_SEPARATE_COMPILATION:
else:
multiarray_src = [pjoin('src', 'multiarray', 'multiarraymodule_onefile.c')]
multiarray = env.DistutilsPythonExtension('multiarray', source = multiarray_src)
+env.DistutilsPythonExtension('multiarray_tests', source=multiarray_tests_src)
#------------------
# Build sort module
@@ -395,3 +423,7 @@ if build_blasdot:
dotblas_o = env.PythonObject('_dotblas', source = dotblas_src)
env.Depends(dotblas_o, pjoin("blasdot", "cblas.h"))
dotblas = env.DistutilsPythonExtension('_dotblas', dotblas_o)
+
+# "Install" the header in the build directory, so that in-place build works
+for h in generated_headers:
+ env.Install(pjoin('$distutils_installdir', include_dir), h)
diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py
index 6d3c52990..c8bc9438a 100644
--- a/numpy/core/arrayprint.py
+++ b/numpy/core/arrayprint.py
@@ -56,10 +56,14 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None,
suppress : bool, optional
Whether or not suppress printing of small floating point values
using scientific notation (default False).
- nanstr : string, optional
- String representation of floating point not-a-number (default nan).
- infstr : string, optional
- String representation of floating point infinity (default inf).
+ nanstr : str, optional
+ String representation of floating point not-a-number (default NaN).
+ infstr : str, optional
+ String representation of floating point infinity (default Inf).
+
+ See Also
+ --------
+ get_printoptions, set_string_function
Examples
--------
@@ -79,12 +83,9 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None,
>>> eps = np.finfo(float).eps
>>> x = np.arange(4.)
-
>>> x**2 - (x + eps)**2
array([ -4.9304e-32, -4.4409e-16, 0.0000e+00, 0.0000e+00])
-
>>> np.set_printoptions(suppress=True)
-
>>> x**2 - (x + eps)**2
array([-0., -0., 0., 0.])
diff --git a/numpy/core/code_generators/generate_numpy_api.py b/numpy/core/code_generators/generate_numpy_api.py
index 509048471..69f8c2026 100644
--- a/numpy/core/code_generators/generate_numpy_api.py
+++ b/numpy/core/code_generators/generate_numpy_api.py
@@ -28,6 +28,7 @@ extern NPY_NO_EXPORT PyTypeObject PyArrayFlags_Type;
extern NPY_NO_EXPORT PyTypeObject PyArrayIter_Type;
extern NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type;
extern NPY_NO_EXPORT PyTypeObject PyArrayMultiIter_Type;
+extern NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type;
extern NPY_NO_EXPORT PyTypeObject PyBoolArrType_Type;
extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2];
#else
@@ -39,6 +40,7 @@ NPY_NO_EXPORT PyTypeObject PyArrayFlags_Type;
NPY_NO_EXPORT PyTypeObject PyArrayIter_Type;
NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type;
NPY_NO_EXPORT PyTypeObject PyArrayMultiIter_Type;
+NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type;
NPY_NO_EXPORT PyTypeObject PyBoolArrType_Type;
NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2];
#endif
@@ -113,13 +115,13 @@ _import_array(void)
PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as unknown endian");
return -1;
}
-#ifdef NPY_BIG_ENDIAN
+#if NPY_BYTE_ORDER ==NPY_BIG_ENDIAN
if (st != NPY_CPU_BIG) {
PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\
"big endian, but detected different endianness at runtime");
return -1;
}
-#elif defined(NPY_LITTLE_ENDIAN)
+#elif NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN
if (st != NPY_CPU_LITTLE) {
PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\
"little endian, but detected different endianness at runtime");
diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py
index 15e427f29..60a8b5a30 100644
--- a/numpy/core/code_generators/generate_umath.py
+++ b/numpy/core/code_generators/generate_umath.py
@@ -650,6 +650,11 @@ defdict = {
docstrings.get('numpy.core.umath.signbit'),
TD(flts, out='?'),
),
+'copysign' :
+ Ufunc(2, 1, None,
+ docstrings.get('numpy.core.umath.copysign'),
+ TD(flts),
+ ),
'modf' :
Ufunc(1, 2, None,
docstrings.get('numpy.core.umath.modf'),
diff --git a/numpy/core/code_generators/numpy_api_order.txt b/numpy/core/code_generators/numpy_api_order.txt
index 6b7272459..72f8d5c82 100644
--- a/numpy/core/code_generators/numpy_api_order.txt
+++ b/numpy/core/code_generators/numpy_api_order.txt
@@ -173,9 +173,10 @@ PyArray_CompareString
PyArray_MultiIterFromObjects
PyArray_GetEndianness
PyArray_GetNDArrayCFeatureVersion
-PyArray_Acorrelate
+PyArray_Correlate2
+PyArray_NeighborhoodIterNew
PyArray_SetDatetimeParseFunction
PyArray_DatetimeToDatetimeStruct
PyArray_TimedeltaToTimedeltaStruct
PyArray_DatetimeStructToDatetime
-PyArray_TimedeltaStructToTimedelta \ No newline at end of file
+PyArray_TimedeltaStructToTimedelt
diff --git a/numpy/core/code_generators/ufunc_docstrings.py b/numpy/core/code_generators/ufunc_docstrings.py
index e880f21a2..7744bb4bf 100644
--- a/numpy/core/code_generators/ufunc_docstrings.py
+++ b/numpy/core/code_generators/ufunc_docstrings.py
@@ -93,6 +93,9 @@ add_newdoc('numpy.core.umath', 'arccos',
`x`-coordinate on the unit circle.
For real arguments, the domain is [-1, 1].
+ out : ndarray, optional
+ Array to store results in.
+
Returns
-------
angle : ndarray
@@ -153,6 +156,8 @@ add_newdoc('numpy.core.umath', 'arccosh',
----------
x : array_like
Input array.
+ out : ndarray, optional
+ Array of the same shape as `x`.
Returns
-------
@@ -715,16 +720,44 @@ add_newdoc('numpy.core.umath', 'cos',
----------
x : array_like
Input array in radians.
+ out : ndarray, optional
+ Output array of same shape as `x`.
Returns
-------
- out : ndarray
- Output array of same shape as `x`.
+ y : ndarray
+ The corresponding cosine values.
+
+ Raises
+ ------
+ ValueError: invalid return array shape
+ if `out` is provided and `out.shape` != `x.shape` (See Examples)
+
+ Notes
+ -----
+ If `out` is provided, the function writes the result into it,
+ and returns a reference to `out`. (See Examples)
+
+ References
+ ----------
+ M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
+ New York, NY: Dover, 1972.
Examples
--------
>>> np.cos(np.array([0, np.pi/2, np.pi]))
array([ 1.00000000e+00, 6.12303177e-17, -1.00000000e+00])
+ >>>
+ >>> # Example of providing the optional output parameter
+ >>> out2 = np.cos([0.1], out1)
+ >>> out2 is out1
+ True
+ >>>
+ >>> # Example of ValueError due to provision of shape mis-matched `out`
+ >>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in <module>
+ ValueError: invalid return array shape
""")
@@ -903,7 +936,7 @@ add_newdoc('numpy.core.umath', 'equal',
add_newdoc('numpy.core.umath', 'exp',
"""
- Calculate the exponential of the elements in the input array.
+ Calculate the exponential of all elements in the input array.
Parameters
----------
@@ -913,7 +946,12 @@ add_newdoc('numpy.core.umath', 'exp',
Returns
-------
out : ndarray
- Element-wise exponential of `x`.
+ Output array, element-wise exponential of `x`.
+
+ See Also
+ --------
+ expm1 : Calculate ``exp(x) - 1`` for all elements in the array.
+ exp2 : Calculate ``2**x`` for all elements in the array.
Notes
-----
@@ -968,20 +1006,34 @@ add_newdoc('numpy.core.umath', 'exp2',
x : array_like
Input values.
+ out : ndarray, optional
+ \tArray to insert results into.
+
Returns
-------
out : ndarray
Element-wise 2 to the power `x`.
+ See Also
+ --------
+ exp : calculate x**p.
+
Notes
-----
.. versionadded:: 1.3.0
+
+
+ Examples
+ --------
+ >>> np.exp2([2,3])
+ array([4,9])
+
""")
add_newdoc('numpy.core.umath', 'expm1',
"""
- Compute ``exp(x) - 1`` for all elements in the array.
+ Calculate ``exp(x) - 1`` for all elements in the array.
Parameters
----------
@@ -1621,7 +1673,7 @@ add_newdoc('numpy.core.umath', 'log',
add_newdoc('numpy.core.umath', 'log10',
"""
- Compute the logarithm in base 10 element-wise.
+ Return the base 10 logarithm of the input array, element-wise.
Parameters
----------
@@ -1631,7 +1683,8 @@ add_newdoc('numpy.core.umath', 'log10',
Returns
-------
y : ndarray
- Base-10 logarithm of `x`.
+ The logarithm to the base 10 of `x`, element-wise. NaNs are
+ returned where x is negative.
Notes
-----
@@ -1656,7 +1709,7 @@ add_newdoc('numpy.core.umath', 'log10',
Examples
--------
- >>> np.log10([1.e-15,-3.])
+ >>> np.log10([1e-15, -3.])
array([-15., NaN])
""")
@@ -1687,71 +1740,91 @@ add_newdoc('numpy.core.umath', 'log2',
add_newdoc('numpy.core.umath', 'logaddexp',
"""
- Logarithm of `exp(x) + exp(y)`.
+ Logarithm of the sum of exponentiations of the inputs.
- This function is useful in statistics where the calculated probabilities of
- events may be so small as to excede the range of normal floating point
- numbers. In such cases the logarithm of the calculated probability is
- stored. This function allows adding probabilities stored in such a fashion.
+ Calculates ``log(exp(x1) + exp(x2))``. This function is useful in
+ statistics where the calculated probabilities of events may be so small
+ as to exceed the range of normal floating point numbers. In such cases
+ the logarithm of the calculated probability is stored. This function
+ allows adding probabilities stored in such a fashion.
Parameters
----------
- x : array_like
- Input values.
- y : array_like
+ x1, x2 : array_like
Input values.
-
Returns
-------
result : ndarray
- Logarithm of `exp(x) + exp(y)`.
+ Logarithm of ``exp(x1) + exp(x2)``.
See Also
--------
- logaddexp2
+ logaddexp2: Logarithm of the sum of exponentiations of inputs in base-2.
Notes
-----
.. versionadded:: 1.3.0
+ Examples
+ --------
+ >>> prob1 = np.log(1e-50)
+ >>> prob2 = np.log(2.5e-50)
+ >>> prob12 = np.logaddexp(prob1, prob2)
+ >>> prob12
+ -113.87649168120691
+ >>> np.exp(prob12)
+ 3.5000000000000057e-50
+
""")
add_newdoc('numpy.core.umath', 'logaddexp2',
"""
- Base-2 Logarithm of `2**x + 2**y`.
+ Logarithm of the sum of exponentiations of the inputs in base-2.
- This function is useful in machine learning when the calculated probabilities of
- events may be so small as to excede the range of normal floating point
- numbers. In such cases the base-2 logarithm of the calculated probability
- can be used instead. This function allows adding probabilities stored in such a fashion.
+ Calculates ``log2(2**x1 + 2**x2)``. This function is useful in machine
+ learning when the calculated probabilities of events may be so small
+ as to exceed the range of normal floating point numbers. In such cases
+ the base-2 logarithm of the calculated probability can be used instead.
+ This function allows adding probabilities stored in such a fashion.
Parameters
----------
- x : array_like
- Input values.
- y : array_like
+ x1, x2 : array_like
Input values.
-
+ out : ndarray, optional
+ Array to store results in.
Returns
-------
result : ndarray
- Base-2 logarithm of `2**x + 2**y`.
+ Base-2 logarithm of ``2**x1 + 2**x2``.
See Also
--------
- logaddexp
+ logaddexp: Logarithm of the sum of exponentiations of the inputs.
Notes
-----
.. versionadded:: 1.3.0
+ Examples
+ --------
+ >>> prob1 = np.log2(1e-50)
+ >>> prob2 = np.log2(2.5e-50)
+ >>> prob12 = np.logaddexp2(prob1, prob2)
+ >>> prob1, prob2, prob12
+ (-166.09640474436813, -164.77447664948076, -164.28904982231052)
+ >>> 2**prob12
+ 3.4999999999999914e-50
+
""")
add_newdoc('numpy.core.umath', 'log1p',
"""
- `log(1 + x)` in base `e`, elementwise.
+ Return the natural logarithm of one plus the input array, element-wise.
+
+ Calculates ``log(1 + x)``.
Parameters
----------
@@ -1761,7 +1834,11 @@ add_newdoc('numpy.core.umath', 'log1p',
Returns
-------
y : ndarray
- Natural logarithm of `1 + x`, elementwise.
+ Natural logarithm of `1 + x`, element-wise.
+
+ See Also
+ --------
+ expm1 : ``exp(x) - 1``, the inverse of `log1p`.
Notes
-----
@@ -2022,8 +2099,6 @@ add_newdoc('numpy.core.umath', 'minimum',
add_newdoc('numpy.core.umath', 'fmax',
"""
- fmax(x1, x2[, out])
-
Element-wise maximum of array elements.
Compare two arrays and returns a new array containing the element-wise
@@ -2132,7 +2207,7 @@ add_newdoc('numpy.core.umath', 'fmin',
add_newdoc('numpy.core.umath', 'modf',
"""
- Return the fractional and integral part of a number.
+ Return the fractional and integral parts of an array, element-wise.
The fractional and integral parts are negative if the given number is
negative.
@@ -2140,7 +2215,7 @@ add_newdoc('numpy.core.umath', 'modf',
Parameters
----------
x : array_like
- Input number.
+ Input array.
Returns
-------
@@ -2149,33 +2224,37 @@ add_newdoc('numpy.core.umath', 'modf',
y2 : ndarray
Integral part of `x`.
+ Notes
+ -----
+ For integer input the return values are floats.
+
Examples
--------
- >>> np.modf(2.5)
- (0.5, 2.0)
- >>> np.modf(-.4)
- (-0.40000000000000002, -0.0)
+ >>> np.modf([0, 3.5])
+ (array([ 0. , 0.5]), array([ 0., 3.]))
+ >>> np.modf(-0.5)
+ (-0.5, -0)
""")
add_newdoc('numpy.core.umath', 'multiply',
"""
- Multiply arguments elementwise.
+ Multiply arguments element-wise.
Parameters
----------
x1, x2 : array_like
- The arrays to be multiplied.
+ Input arrays to be multiplied.
Returns
-------
y : ndarray
- The product of `x1` and `x2`, elementwise. Returns a scalar if
+ The product of `x1` and `x2`, element-wise. Returns a scalar if
both `x1` and `x2` are scalars.
Notes
-----
- Equivalent to `x1` * `x2` in terms of array-broadcasting.
+ Equivalent to `x1` * `x2` in terms of array broadcasting.
Examples
--------
@@ -2353,23 +2432,34 @@ add_newdoc('numpy.core.umath', 'deg2rad',
add_newdoc('numpy.core.umath', 'reciprocal',
"""
- Return element-wise reciprocal.
+ Return the reciprocal of the argument, element-wise.
+
+ Calculates ``1/x``.
Parameters
----------
x : array_like
- Input value.
+ Input array.
Returns
-------
y : ndarray
- Return value.
+ Return array.
+
+ Notes
+ -----
+ .. note::
+ This function is not designed to work with integers.
+
+ For integer arguments with absolute value larger than 1 the result is
+ always zero because of the way Python handles integer division.
+ For integer zero the result is an overflow.
Examples
--------
- >>> reciprocal(2.)
+ >>> np.reciprocal(2.)
0.5
- >>> reciprocal([1, 2., 3.33])
+ >>> np.reciprocal([1, 2., 3.33])
array([ 1. , 0.5 , 0.3003003])
""")
@@ -2378,7 +2468,7 @@ add_newdoc('numpy.core.umath', 'remainder',
"""
Returns element-wise remainder of division.
- Computes `x1 - floor(x1/x2)*x2`.
+ Computes ``x1 - floor(x1/x2)*x2``.
Parameters
----------
@@ -2390,22 +2480,23 @@ add_newdoc('numpy.core.umath', 'remainder',
Returns
-------
y : ndarray
- The remainder of the quotient `x1/x2`, element-wise. Returns a scalar
+ The remainder of the quotient ``x1/x2``, element-wise. Returns a scalar
if both `x1` and `x2` are scalars.
See Also
--------
- divide
- floor
+ divide, floor
Notes
-----
- Returns 0 when `x2` is 0.
+ Returns 0 when `x2` is 0 and both `x1` and `x2` are (arrays of) integers.
Examples
--------
- >>> np.remainder([4,7],[2,3])
+ >>> np.remainder([4,7], [2,3])
array([0, 1])
+ >>> np.remainder(np.arange(7), 5)
+ array([0, 1, 2, 3, 4, 0, 1])
""")
@@ -2523,6 +2614,33 @@ add_newdoc('numpy.core.umath', 'signbit',
""")
+add_newdoc('numpy.core.umath', 'copysign',
+ """
+ Change the sign of x to that of y element-wise.
+
+ Parameters
+ ----------
+ x: array_like
+ Values to change the sign of.
+ y: array_like
+ The sign of y is copied to x.
+
+ Returns
+ -------
+ out : array_like
+ values of x with the sign of y
+
+ Examples
+ --------
+ >>> np.copysign(1.3, -1)
+ -1.3
+ >>> 1/np.copysign(0, 1)
+ inf
+ >>> 1/np.copysign(0, -1)
+ -inf
+
+ """)
+
add_newdoc('numpy.core.umath', 'sin',
"""
Trigonometric sine, element-wise.
@@ -2590,11 +2708,50 @@ add_newdoc('numpy.core.umath', 'sinh',
----------
x : array_like
Input array.
+ out : ndarray, optional
+ Output array of same shape as `x`.
Returns
-------
- out : ndarray
- Output array of same shape as `x`.
+ y : ndarray
+ The corresponding hyperbolic sine values.
+
+ Raises
+ ------
+ ValueError: invalid return array shape
+ if `out` is provided and `out.shape` != `x.shape` (See Examples)
+
+ Notes
+ -----
+ If `out` is provided, the function writes the result into it,
+ and returns a reference to `out`. (See Examples)
+
+ References
+ ----------
+ M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
+ New York, NY: Dover, 1972, pg. 83.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.sinh(0)
+ 0.0
+ >>> np.sinh(np.pi*1j/2)
+ 1j
+ >>> np.sinh(np.pi*1j)
+ 1.2246063538223773e-016j (exact value is 0)
+ >>> # Discrepancy due to vagaries of floating point arithmetic.
+ >>>
+ >>> # Example of providing the optional output parameter
+ >>> out2 = np.sinh([0.1], out1)
+ >>> out2 is out1
+ True
+ >>>
+ >>> # Example of ValueError due to provision of shape mis-matched `out`
+ >>> np.sinh(np.zeros((3,3)),np.zeros((2,2)))
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in <module>
+ ValueError: invalid return array shape
""")
@@ -2667,13 +2824,12 @@ add_newdoc('numpy.core.umath', 'square',
add_newdoc('numpy.core.umath', 'subtract',
"""
- Subtract arguments element-wise.
+ Subtract arguments, element-wise.
Parameters
----------
x1, x2 : array_like
- The arrays to be subtracted from each other. If type is 'array_like'
- the `x1` and `x2` shapes must be identical.
+ The arrays to be subtracted from each other.
Returns
-------
@@ -2683,7 +2839,7 @@ add_newdoc('numpy.core.umath', 'subtract',
Notes
-----
- Equivalent to `x1` - `x2` in terms of array-broadcasting.
+ Equivalent to ``x1 - x2`` in terms of array broadcasting.
Examples
--------
@@ -2703,39 +2859,107 @@ add_newdoc('numpy.core.umath', 'tan',
"""
Compute tangent element-wise.
+ Equivalent to ``np.sin(x)/np.cos(x)`` element-wise.
+
Parameters
----------
x : array_like
- Angles in radians.
+ Input array.
+ out : ndarray, optional
+ Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding tangent values.
+ Raises
+ ------
+ ValueError: invalid return array shape
+ if `out` is provided and `out.shape` != `x.shape` (See Examples)
+
+ Notes
+ -----
+ If `out` is provided, the function writes the result into it,
+ and returns a reference to `out`. (See Examples)
+
+ References
+ ----------
+ M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
+ New York, NY: Dover, 1972.
Examples
--------
>>> from math import pi
>>> np.tan(np.array([-pi,pi/2,pi]))
array([ 1.22460635e-16, 1.63317787e+16, -1.22460635e-16])
+ >>>
+ >>> # Example of providing the optional output parameter illustrating
+ >>> # that what is returned is a reference to said parameter
+ >>> out2 = np.cos([0.1], out1)
+ >>> out2 is out1
+ True
+ >>>
+ >>> # Example of ValueError due to provision of shape mis-matched `out`
+ >>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in <module>
+ ValueError: invalid return array shape
""")
add_newdoc('numpy.core.umath', 'tanh',
"""
- Hyperbolic tangent element-wise.
+ Compute hyperbolic tangent element-wise.
+
+ Equivalent to ``np.sinh(x)/np.cosh(x)`` or
+ ``-1j * np.tan(1j*x)``.
Parameters
----------
x : array_like
Input array.
+ out : ndarray, optional
+ Output array of same shape as `x`.
Returns
-------
y : ndarray
The corresponding hyperbolic tangent values.
+ Raises
+ ------
+ ValueError: invalid return array shape
+ if `out` is provided and `out.shape` != `x.shape` (See Examples)
+
+ Notes
+ -----
+ If `out` is provided, the function writes the result into it,
+ and returns a reference to `out`. (See Examples)
+
+ References
+ ----------
+ M. Abramowitz and I. A. Stegun, Handbook of Mathematical Functions.
+ New York, NY: Dover, 1972, pg. 83.
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> np.tanh((0, np.pi*1j, np.pi*1j/2))
+ array([ 0. +0.00000000e+00j, 0. -1.22460635e-16j, 0. +1.63317787e+16j])
+ >>>
+ >>> # Example of providing the optional output parameter illustrating
+ >>> # that what is returned is a reference to said parameter
+ >>> out2 = np.tanh([0.1], out1)
+ >>> out2 is out1
+ True
+ >>>
+ >>> # Example of ValueError due to provision of shape mis-matched `out`
+ >>> np.tanh(np.zeros((3,3)),np.zeros((2,2)))
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in <module>
+ ValueError: invalid return array shape
+
""")
add_newdoc('numpy.core.umath', 'true_divide',
diff --git a/numpy/core/defmatrix.py b/numpy/core/defmatrix.py
index d1636e8b5..354e40060 100644
--- a/numpy/core/defmatrix.py
+++ b/numpy/core/defmatrix.py
@@ -2,7 +2,7 @@ __all__ = ['matrix', 'bmat', 'mat', 'asmatrix']
import sys
import numeric as N
-from numeric import concatenate, isscalar, binary_repr, identity
+from numeric import concatenate, isscalar, binary_repr, identity, asanyarray
from numerictypes import issubdtype
# make translation table
@@ -115,6 +115,7 @@ def matrix_power(M,n):
[ 0, -1]])
"""
+ M = asanyarray(M)
if len(M.shape) != 2 or M.shape[0] != M.shape[1]:
raise ValueError("input must be a square array")
if not issubdtype(type(n),int):
@@ -490,6 +491,26 @@ class matrix(N.ndarray):
return N.ndarray.prod(self, axis, dtype, out)._align(axis)
def any(self, axis=None, out=None):
+ """
+ Test whether any array element along a given axis evaluates to True.
+
+ Refer to `numpy.any` for full documentation.
+
+ Parameters
+ ----------
+ axis: int, optional
+ Axis along which logical OR is performed
+ out: ndarray, optional
+ Output to existing array instead of creating new one, must have
+ same shape as expected output
+
+ Returns
+ -------
+ any : bool, ndarray
+ Returns a single bool if `axis` is ``None``; otherwise,
+ returns `ndarray`
+
+ """
return N.ndarray.any(self, axis, out)._align(axis)
def all(self, axis=None, out=None):
diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py
index 99b837ba2..f7f584d3d 100644
--- a/numpy/core/fromnumeric.py
+++ b/numpy/core/fromnumeric.py
@@ -256,15 +256,14 @@ def repeat(a, repeats, axis=None):
def put(a, ind, v, mode='raise'):
"""
- Changes specific elements of one array by replacing from another array.
+ Replaces specified elements of an array with given values.
- The indexing works on the flattened target array, `put` is roughly
+ The indexing works on the flattened target array. `put` is roughly
equivalent to:
::
- for i, val in zip(ind, v):
- x.flat[i] = val
+ a.flat[ind] = v
Parameters
----------
@@ -292,14 +291,14 @@ def put(a, ind, v, mode='raise'):
Examples
--------
- >>> x = np.arange(5)
- >>> np.put(x, [0, 2], [-44, -55])
- >>> x
+ >>> a = np.arange(5)
+ >>> np.put(a, [0, 2], [-44, -55])
+ >>> a
array([-44, 1, -55, 3, 4])
- >>> x = np.arange(5)
- >>> np.put(x, 22, -5, mode='clip')
- >>> x
+ >>> a = np.arange(5)
+ >>> np.put(a, 22, -5, mode='clip')
+ >>> a
array([ 0, 1, 2, 3, -5])
"""
@@ -450,6 +449,22 @@ def sort(a, axis=-1, kind='quicksort', order=None):
the last axis is faster and uses less space than sorting along
any other axis.
+ The sort order for complex numbers is lexicographic. If both the real
+ and imaginary parts are non-nan then the order is determined by the
+ real parts except when they are equal, in which case the order is
+ determined by the imaginary parts.
+
+ Previous to numpy 1.4.0 sorting real and complex arrays containing nan
+ values led to undefined behaviour. In numpy versions >= 1.4.0 nan
+ values are sorted to the end. The extended sort order is:
+
+ Real: [R, nan]
+ Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj]
+
+ where R is a non-nan real value. Complex values with the same nan
+ placements are sorted according to the non-nan part if it exists.
+ Non-nan values are sorted as before.
+
Examples
--------
>>> a = np.array([[1,4],[3,1]])
@@ -529,6 +544,9 @@ def argsort(a, axis=-1, kind='quicksort', order=None):
-----
See `sort` for notes on the different sorting algorithms.
+ As of Numpy 1.4.0 argsort works with real/complex arrays containing
+ nan values. The enhanced sort order is documented in the numpy.sort.
+
Examples
--------
One dimensional array:
@@ -665,6 +683,9 @@ def searchsorted(a, v, side='left'):
-----
Binary search is used to find the required insertion points.
+ As of Numpy 1.4.0 searchsorted works with real/complex arrays containing
+ nan values. The enhanced sort order is documented in the numpy.sort.
+
Examples
--------
>>> np.searchsorted([1,2,3,4,5], 3)
@@ -1086,10 +1107,14 @@ def compress(condition, a, axis=None, out=None):
"""
Return selected slices of an array along given axis.
+ When working along a given axis, a slice along that axis is returned in
+ `output` for each index where `condition` evaluates to True. When
+ working on a 1-D array, `compress` is equivalent to `extract`.
+
Parameters
----------
- condition : array_like
- Boolean 1-D array selecting which entries to return. If len(condition)
+ condition : 1-D array of bools
+ Array that selects which entries to return. If len(condition)
is less than the size of `a` along the given axis, then output is
truncated to the length of the condition array.
a : array_like
@@ -1109,18 +1134,31 @@ def compress(condition, a, axis=None, out=None):
See Also
--------
- ndarray.compress: Equivalent method.
+ take, choose, diag, diagonal, select
+ ndarray.compress : Equivalent method.
Examples
--------
- >>> a = np.array([[1, 2], [3, 4]])
+ >>> a = np.array([[1, 2], [3, 4], [5, 6]])
+ >>> a
+ array([[1, 2],
+ [3, 4],
+ [5, 6]])
>>> np.compress([0, 1], a, axis=0)
array([[3, 4]])
- >>> np.compress([1], a, axis=1)
- array([[1],
- [3]])
- >>> np.compress([0,1,1], a)
- array([2, 3])
+ >>> np.compress([False, True, True], a, axis=0)
+ array([[3, 4],
+ [5, 6]])
+ >>> np.compress([False, True], a, axis=1)
+ array([[2],
+ [4],
+ [6]])
+
+ Working on the flattened array does not return slices along an axis but
+ selects elements.
+
+ >>> np.compress([False, True], a)
+ array([2])
"""
try:
@@ -1306,6 +1344,8 @@ def any(a,axis=None, out=None):
"""
Test whether any array element along a given axis evaluates to True.
+ Returns single boolean unless `axis` is not ``None``
+
Parameters
----------
a : array_like
@@ -1322,8 +1362,8 @@ def any(a,axis=None, out=None):
Returns
-------
- any : ndarray, bool
- A new boolean or array is returned unless `out` is
+ any : bool, ndarray
+ A new boolean or `ndarray` is returned unless `out` is
specified, in which case a reference to `out` is returned.
See Also
@@ -1429,12 +1469,10 @@ def cumsum (a, axis=None, dtype=None, out=None):
Parameters
----------
a : array_like
- Input array or object that can be converted to an array.
+ Input array.
axis : int, optional
Axis along which the cumulative sum is computed. The default
- (`axis` = `None`) is to compute the cumsum over the flattened
- array. `axis` may be negative, in which case it counts from the
- last to the first axis.
+ (None) is to compute the cumsum over the flattened array.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults
@@ -1459,11 +1497,12 @@ def cumsum (a, axis=None, dtype=None, out=None):
Examples
--------
- >>> a = np.array([[1,2,3],[4,5,6]])
+ >>> a = np.array([[1,2,3], [4,5,6]])
>>> np.cumsum(a)
array([ 1, 3, 6, 10, 15, 21])
- >>> np.cumsum(a,dtype=float) # specifies type of output value(s)
+ >>> np.cumsum(a, dtype=float) # specifies type of output value(s)
array([ 1., 3., 6., 10., 15., 21.])
+
>>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns
array([[1, 2, 3],
[5, 7, 9]])
@@ -2122,14 +2161,13 @@ def std(a, axis=None, dtype=None, out=None, ddof=0):
Returns
-------
- standard_deviation : {ndarray, scalar}; see dtype parameter above.
+ standard_deviation : ndarray, see dtype parameter above.
If `out` is None, return a new array containing the standard deviation,
otherwise return a reference to the output array.
See Also
--------
- numpy.var : Variance
- numpy.mean : Average
+ var, mean
Notes
-----
@@ -2145,7 +2183,7 @@ def std(a, axis=None, dtype=None, out=None, ddof=0):
is the square root of the estimated variance, so even with ``ddof=1``, it
will not be an unbiased estimate of the standard deviation per se.
- Note that, for complex numbers, std takes the absolute
+ Note that, for complex numbers, `std` takes the absolute
value before squaring, so that the result is always real and nonnegative.
Examples
@@ -2153,9 +2191,9 @@ def std(a, axis=None, dtype=None, out=None, ddof=0):
>>> a = np.array([[1, 2], [3, 4]])
>>> np.std(a)
1.1180339887498949
- >>> np.std(a, 0)
+ >>> np.std(a, axis=0)
array([ 1., 1.])
- >>> np.std(a, 1)
+ >>> np.std(a, axis=1)
array([ 0.5, 0.5])
"""
diff --git a/numpy/core/include/numpy/_neighborhood_iterator_imp.h b/numpy/core/include/numpy/_neighborhood_iterator_imp.h
new file mode 100644
index 000000000..5a73784c1
--- /dev/null
+++ b/numpy/core/include/numpy/_neighborhood_iterator_imp.h
@@ -0,0 +1,90 @@
+#ifndef _NPY_INCLUDE_NEIGHBORHOOD_IMP
+#error You should not include this header directly
+#endif
+/*
+ * Private API (here for inline)
+ */
+static NPY_INLINE int
+_PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter);
+
+/*
+ * Update to next item of the iterator
+ *
+ * Note: this simply increment the coordinates vector, last dimension
+ * incremented first , i.e, for dimension 3
+ * ...
+ * -1, -1, -1
+ * -1, -1, 0
+ * -1, -1, 1
+ * ....
+ * -1, 0, -1
+ * -1, 0, 0
+ * ....
+ * 0, -1, -1
+ * 0, -1, 0
+ * ....
+ */
+#define _UPDATE_COORD_ITER(c) \
+ wb = iter->coordinates[c] < iter->bounds[c][1]; \
+ if (wb) { \
+ iter->coordinates[c] += 1; \
+ return 0; \
+ } \
+ else { \
+ iter->coordinates[c] = iter->bounds[c][0]; \
+ }
+
+static NPY_INLINE int
+_PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter)
+{
+ int i, wb;
+
+ for (i = iter->nd - 1; i >= 0; --i) {
+ _UPDATE_COORD_ITER(i)
+ }
+
+ return 0;
+}
+
+/*
+ * Version optimized for 2d arrays, manual loop unrolling
+ */
+static NPY_INLINE int
+_PyArrayNeighborhoodIter_IncrCoord2D(PyArrayNeighborhoodIterObject* iter)
+{
+ int wb;
+
+ _UPDATE_COORD_ITER(1)
+ _UPDATE_COORD_ITER(0)
+
+ return 0;
+}
+#undef _UPDATE_COORD_ITER
+
+/*
+ * Advance to the next neighbour
+ */
+static NPY_INLINE int
+PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter)
+{
+ _PyArrayNeighborhoodIter_IncrCoord (iter);
+ iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates);
+
+ return 0;
+}
+
+/*
+ * Reset functions
+ */
+static NPY_INLINE int
+PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter)
+{
+ int i;
+
+ for (i = 0; i < iter->nd; ++i) {
+ iter->coordinates[i] = iter->bounds[i][0];
+ }
+ iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates);
+
+ return 0;
+}
diff --git a/numpy/core/include/numpy/ndarrayobject.h b/numpy/core/include/numpy/ndarrayobject.h
index 8d0e444a2..34b080732 100644
--- a/numpy/core/include/numpy/ndarrayobject.h
+++ b/numpy/core/include/numpy/ndarrayobject.h
@@ -735,7 +735,18 @@ typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *);
#define NPY_DISABLE_C_API
#endif
-typedef struct {
+/*****************************
+ * Basic iterator object
+ *****************************/
+
+/* FWD declaration */
+typedef struct PyArrayIterObject_tag PyArrayIterObject;
+
+/* type of the function which translates a set of coordinates to a pointer to
+ * the data */
+typedef char* (*npy_iter_get_dataptr_t)(PyArrayIterObject* iter, npy_intp*);
+
+struct PyArrayIterObject_tag {
PyObject_HEAD
int nd_m1; /* number of dimensions - 1 */
npy_intp index, size;
@@ -747,7 +758,12 @@ typedef struct {
PyArrayObject *ao;
char *dataptr; /* pointer to current item*/
npy_bool contiguous;
-} PyArrayIterObject;
+
+ npy_intp bounds[NPY_MAXDIMS][2];
+ npy_intp limits[NPY_MAXDIMS][2];
+ npy_intp limits_sizes[NPY_MAXDIMS];
+ npy_iter_get_dataptr_t translate;
+} ;
/* Iterator API */
@@ -971,6 +987,72 @@ typedef struct {
} PyArrayMapIterObject;
+enum {
+ NPY_NEIGHBORHOOD_ITER_ZERO_PADDING,
+ NPY_NEIGHBORHOOD_ITER_ONE_PADDING,
+ NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING,
+ NPY_NEIGHBORHOOD_ITER_CIRCULAR_PADDING,
+ NPY_NEIGHBORHOOD_ITER_MIRROR_PADDING,
+};
+
+typedef struct {
+ PyObject_HEAD
+
+ /*
+ * PyArrayIterObject part: keep this in this exact order
+ */
+ int nd_m1; /* number of dimensions - 1 */
+ npy_intp index, size;
+ npy_intp coordinates[NPY_MAXDIMS];/* N-dimensional loop */
+ npy_intp dims_m1[NPY_MAXDIMS]; /* ao->dimensions - 1 */
+ npy_intp strides[NPY_MAXDIMS]; /* ao->strides or fake */
+ npy_intp backstrides[NPY_MAXDIMS];/* how far to jump back */
+ npy_intp factors[NPY_MAXDIMS]; /* shape factors */
+ PyArrayObject *ao;
+ char *dataptr; /* pointer to current item*/
+ npy_bool contiguous;
+
+ npy_intp bounds[NPY_MAXDIMS][2];
+ npy_intp limits[NPY_MAXDIMS][2];
+ npy_intp limits_sizes[NPY_MAXDIMS];
+ npy_iter_get_dataptr_t translate;
+
+ /*
+ * New members
+ */
+ npy_intp nd;
+
+ /* Dimensions is the dimension of the array */
+ npy_intp dimensions[NPY_MAXDIMS];
+
+ /* Neighborhood points coordinates are computed relatively to the point pointed
+ * by _internal_iter */
+ PyArrayIterObject* _internal_iter;
+ /* To keep a reference to the representation of the constant value for
+ * constant padding */
+ char* constant;
+
+ int mode;
+} PyArrayNeighborhoodIterObject;
+
+/*
+ * Neighborhood iterator API
+ */
+
+/* General: those work for any mode */
+static NPY_INLINE int
+PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter);
+static NPY_INLINE int
+PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter);
+// static NPY_INLINE int
+// PyArrayNeighborhoodIter_Next2D(PyArrayNeighborhoodIterObject* iter);
+
+/* Include inline implementations - functions defined there are not considered
+ * public API */
+#define _NPY_INCLUDE_NEIGHBORHOOD_IMP
+#include "_neighborhood_iterator_imp.h"
+#undef _NPY_INCLUDE_NEIGHBORHOOD_IMP
+
/* The default array type
*/
#define NPY_DEFAULT_TYPE NPY_DOUBLE
@@ -1112,7 +1194,7 @@ typedef struct {
#define NPY_SWAP 's'
#define NPY_IGNORE '|'
-#ifdef NPY_BIG_ENDIAN
+#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN
#define NPY_NATBYTE NPY_BIG
#define NPY_OPPBYTE NPY_LITTLE
#else
diff --git a/numpy/core/include/numpy/npy_cpu.h b/numpy/core/include/numpy/npy_cpu.h
index 106901e55..4d6579189 100644
--- a/numpy/core/include/numpy/npy_cpu.h
+++ b/numpy/core/include/numpy/npy_cpu.h
@@ -8,7 +8,12 @@
* NPY_CPU_SPARC
* NPY_CPU_S390
* NPY_CPU_IA64
- * NPY_CPU_PARISC
+ * NPY_CPU_HPPA
+ * NPY_CPU_ALPHA
+ * NPY_CPU_ARMEL
+ * NPY_CPU_ARMEB
+ * NPY_CPU_SH_LE
+ * NPY_CPU_SH_BE
*/
#ifndef _NPY_CPUARCH_H_
#define _NPY_CPUARCH_H_
@@ -42,9 +47,18 @@
#define NPY_CPU_S390
#elif defined(__ia64)
#define NPY_CPU_IA64
-#elif defined(__parisc__)
- /* XXX: Not sure about this one... */
- #define NPY_CPU_PARISC
+#elif defined(__hppa__)
+ #define NPY_CPU_HPPA
+#elif defined(__alpha__)
+ #define NPY_CPU_ALPHA
+#elif defined(__arm__) && defined(__ARMEL__)
+ #define NPY_CPU_ARMEL
+#elif defined(__arm__) && defined(__ARMEB__)
+ #define NPY_CPU_ARMEB
+#elif defined(__sh__) && defined(__LITTLE_ENDIAN__)
+ #define NPY_CPU_SH_LE
+#elif defined(__sh__) && defined(__BIG_ENDIAN__)
+ #define NPY_CPU_SH_BE
#else
#error Unknown CPU, please report this to numpy maintainers with \
information about your platform (OS, CPU and compiler)
diff --git a/numpy/core/include/numpy/npy_endian.h b/numpy/core/include/numpy/npy_endian.h
index 0a5c05ef9..f3ae4dbff 100644
--- a/numpy/core/include/numpy/npy_endian.h
+++ b/numpy/core/include/numpy/npy_endian.h
@@ -9,26 +9,32 @@
#ifdef NPY_HAVE_ENDIAN_H
/* Use endian.h if available */
#include <endian.h>
+
#define NPY_BYTE_ORDER __BYTE_ORDER
- #if (__BYTE_ORDER == __LITTLE_ENDIAN)
- #define NPY_LITTLE_ENDIAN
- #elif (__BYTE_ORDER == __BIG_ENDIAN)
- #define NPY_BIG_ENDIAN
- #else
- #error Unknown machine endianness detected.
- #endif
+ #define NPY_LITTLE_ENDIAN __LITTLE_ENDIAN
+ #define NPY_BIG_ENDIAN __BIG_ENDIAN
#else
/* Set endianness info using target CPU */
#include "npy_cpu.h"
- #if defined(NPY_CPU_X86) || defined(NPY_CPU_AMD64)\
- || defined(NPY_CPU_IA64)
- #define NPY_LITTLE_ENDIAN
- #define NPY_BYTE_ORDER 1234
- #elif defined(NPY_CPU_PPC) || defined(NPY_CPU_SPARC)\
- || defined(NPY_CPU_S390) || defined(NPY_CPU_PARISC) || defined(NPY_CPU_PPC64)
- #define NPY_BIG_ENDIAN
- #define NPY_BYTE_ORDER 4321
+ #define NPY_LITTLE_ENDIAN 1234
+ #define NPY_BIG_ENDIAN 4321
+
+ #if defined(NPY_CPU_X86) \
+ || defined(NPY_CPU_AMD64) \
+ || defined(NPY_CPU_IA64) \
+ || defined(NPY_CPU_ALPHA) \
+ || defined(NPY_CPU_ARMEL) \
+ || defined(NPY_CPU_SH_LE)
+ #define NPY_BYTE_ORDER NPY_LITTLE_ENDIAN
+ #elif defined(NPY_CPU_PPC) \
+ || defined(NPY_CPU_SPARC) \
+ || defined(NPY_CPU_S390) \
+ || defined(NPY_CPU_HPPA) \
+ || defined(NPY_CPU_PPC64) \
+ || defined(NPY_CPU_ARMEB) \
+ || defined(NPY_CPU_SH_BE)
+ #define NPY_BYTE_ORDER NPY_BIG_ENDIAN
#else
#error Unknown CPU: can not set endianness
#endif
diff --git a/numpy/core/include/numpy/npy_math.h b/numpy/core/include/numpy/npy_math.h
index 2a8ea182b..c219504e4 100644
--- a/numpy/core/include/numpy/npy_math.h
+++ b/numpy/core/include/numpy/npy_math.h
@@ -52,38 +52,47 @@ NPY_INLINE static float __npy_nzerof(void)
/*
* Useful constants
*/
-#define NPY_E 2.7182818284590452353602874713526625 /* e */
-#define NPY_LOG2E 1.4426950408889634073599246810018921 /* log_2 e */
-#define NPY_LOG10E 0.4342944819032518276511289189166051 /* log_10 e */
-#define NPY_LOGE2 0.6931471805599453094172321214581766 /* log_e 2 */
-#define NPY_LOGE10 2.3025850929940456840179914546843642 /* log_e 10 */
-#define NPY_PI 3.1415926535897932384626433832795029 /* pi */
-#define NPY_PI_2 1.5707963267948966192313216916397514 /* pi/2 */
-#define NPY_PI_4 0.7853981633974483096156608458198757 /* pi/4 */
-#define NPY_1_PI 0.3183098861837906715377675267450287 /* 1/pi */
-#define NPY_2_PI 0.6366197723675813430755350534900574 /* 2/pi */
-
-#define NPY_Ef 2.7182818284590452353602874713526625F /* e */
-#define NPY_LOG2Ef 1.4426950408889634073599246810018921F /* log_2 e */
-#define NPY_LOG10Ef 0.4342944819032518276511289189166051F /* log_10 e */
-#define NPY_LOGE2f 0.6931471805599453094172321214581766F /* log_e 2 */
-#define NPY_LOGE10f 2.3025850929940456840179914546843642F /* log_e 10 */
-#define NPY_PIf 3.1415926535897932384626433832795029F /* pi */
-#define NPY_PI_2f 1.5707963267948966192313216916397514F /* pi/2 */
-#define NPY_PI_4f 0.7853981633974483096156608458198757F /* pi/4 */
-#define NPY_1_PIf 0.3183098861837906715377675267450287F /* 1/pi */
-#define NPY_2_PIf 0.6366197723675813430755350534900574F /* 2/pi */
-
-#define NPY_El 2.7182818284590452353602874713526625L /* e */
-#define NPY_LOG2El 1.4426950408889634073599246810018921L /* log_2 e */
-#define NPY_LOG10El 0.4342944819032518276511289189166051L /* log_10 e */
-#define NPY_LOGE2l 0.6931471805599453094172321214581766L /* log_e 2 */
-#define NPY_LOGE10l 2.3025850929940456840179914546843642L /* log_e 10 */
-#define NPY_PIl 3.1415926535897932384626433832795029L /* pi */
-#define NPY_PI_2l 1.5707963267948966192313216916397514L /* pi/2 */
-#define NPY_PI_4l 0.7853981633974483096156608458198757L /* pi/4 */
-#define NPY_1_PIl 0.3183098861837906715377675267450287L /* 1/pi */
-#define NPY_2_PIl 0.6366197723675813430755350534900574L /* 2/pi */
+#define NPY_E 2.718281828459045235360287471352662498 /* e */
+#define NPY_LOG2E 1.442695040888963407359924681001892137 /* log_2 e */
+#define NPY_LOG10E 0.434294481903251827651128918916605082 /* log_10 e */
+#define NPY_LOGE2 0.693147180559945309417232121458176568 /* log_e 2 */
+#define NPY_LOGE10 2.302585092994045684017991454684364208 /* log_e 10 */
+#define NPY_PI 3.141592653589793238462643383279502884 /* pi */
+#define NPY_PI_2 1.570796326794896619231321691639751442 /* pi/2 */
+#define NPY_PI_4 0.785398163397448309615660845819875721 /* pi/4 */
+#define NPY_1_PI 0.318309886183790671537767526745028724 /* 1/pi */
+#define NPY_2_PI 0.636619772367581343075535053490057448 /* 2/pi */
+#define NPY_EULER 0.577215664901532860606512090082402431 /* Euler constant */
+#define NPY_SQRT2 1.414213562373095048801688724209698079 /* sqrt(2) */
+#define NPY_SQRT1_2 0.707106781186547524400844362104849039 /* 1/sqrt(2) */
+
+#define NPY_Ef 2.718281828459045235360287471352662498F /* e */
+#define NPY_LOG2Ef 1.442695040888963407359924681001892137F /* log_2 e */
+#define NPY_LOG10Ef 0.434294481903251827651128918916605082F /* log_10 e */
+#define NPY_LOGE2f 0.693147180559945309417232121458176568F /* log_e 2 */
+#define NPY_LOGE10f 2.302585092994045684017991454684364208F /* log_e 10 */
+#define NPY_PIf 3.141592653589793238462643383279502884F /* pi */
+#define NPY_PI_2f 1.570796326794896619231321691639751442F /* pi/2 */
+#define NPY_PI_4f 0.785398163397448309615660845819875721F /* pi/4 */
+#define NPY_1_PIf 0.318309886183790671537767526745028724F /* 1/pi */
+#define NPY_2_PIf 0.636619772367581343075535053490057448F /* 2/pi */
+#define NPY_EULERf 0.577215664901532860606512090082402431F /* Euler constan*/
+#define NPY_SQRT2f 1.414213562373095048801688724209698079F /* sqrt(2) */
+#define NPY_SQRT1_2f 0.707106781186547524400844362104849039F /* 1/sqrt(2) */
+
+#define NPY_El 2.718281828459045235360287471352662498L /* e */
+#define NPY_LOG2El 1.442695040888963407359924681001892137L /* log_2 e */
+#define NPY_LOG10El 0.434294481903251827651128918916605082L /* log_10 e */
+#define NPY_LOGE2l 0.693147180559945309417232121458176568L /* log_e 2 */
+#define NPY_LOGE10l 2.302585092994045684017991454684364208L /* log_e 10 */
+#define NPY_PIl 3.141592653589793238462643383279502884L /* pi */
+#define NPY_PI_2l 1.570796326794896619231321691639751442L /* pi/2 */
+#define NPY_PI_4l 0.785398163397448309615660845819875721L /* pi/4 */
+#define NPY_1_PIl 0.318309886183790671537767526745028724L /* 1/pi */
+#define NPY_2_PIl 0.636619772367581343075535053490057448L /* 2/pi */
+#define NPY_EULERl 0.577215664901532860606512090082402431L /* Euler constan*/
+#define NPY_SQRT2l 1.414213562373095048801688724209698079L /* sqrt(2) */
+#define NPY_SQRT1_2l 0.707106781186547524400844362104849039L /* 1/sqrt(2) */
/*
* C99 double math funcs
@@ -128,6 +137,8 @@ double npy_atan2(double x, double y);
double npy_pow(double x, double y);
double npy_modf(double x, double* y);
+double npy_copysign(double x, double y);
+
/*
* IEEE 754 fpu handling. Those are guaranteed to be macros
*/
@@ -198,6 +209,8 @@ float npy_fmodf(float x, float y);
float npy_modff(float x, float* y);
+float npy_copysignf(float x, float y);
+
/*
* float C99 math functions
*/
@@ -235,6 +248,8 @@ npy_longdouble npy_fmodl(npy_longdouble x, npy_longdouble y);
npy_longdouble npy_modfl(npy_longdouble x, npy_longdouble* y);
+npy_longdouble npy_copysignl(npy_longdouble x, npy_longdouble y);
+
/*
* Non standard functions
*/
diff --git a/numpy/core/include/numpy/numpyconfig.h.in b/numpy/core/include/numpy/numpyconfig.h.in
index 9c3f40d17..b3c0d851d 100644
--- a/numpy/core/include/numpy/numpyconfig.h.in
+++ b/numpy/core/include/numpy/numpyconfig.h.in
@@ -21,10 +21,10 @@
@DEFINE_NPY_SIZEOF_PY_LONG_LONG@
#define NPY_INLINE @NPY_INLINE@
-#define NPY_ENABLE_SEPARATE_COMPILATION @NPY_ENABLE_SEPARATE_COMPILATION@
+@DEFINE_NPY_ENABLE_SEPARATE_COMPILATION@
#define NPY_VISIBILITY_HIDDEN @VISIBILITY_HIDDEN@
-#define NPY_USE_C99_FORMATS @USE_C99_FORMATS@
+@DEFINE_NPY_USE_C99_FORMATS@
#define NPY_ABI_VERSION @NPY_ABI_VERSION@
#define NPY_API_VERSION @NPY_API_VERSION@
diff --git a/numpy/core/memmap.py b/numpy/core/memmap.py
index 65f4938fe..2392c3aa7 100644
--- a/numpy/core/memmap.py
+++ b/numpy/core/memmap.py
@@ -17,7 +17,7 @@ mode_equivalents = {
class memmap(ndarray):
"""
- Create a memory-map to an array stored in a file on disk.
+ Create a memory-map to an array stored in a *binary* file on disk.
Memory-mapped files are used for accessing small segments of large files
on disk, without reading the entire file into memory. Numpy's
@@ -58,7 +58,7 @@ class memmap(ndarray):
order : {'C', 'F'}, optional
Specify the order of the ndarray memory layout: C (row-major) or
Fortran (column-major). This only has an effect if the shape is
- greater than 1-D. The defaullt order is 'C'.
+ greater than 1-D. The default order is 'C'.
Methods
-------
diff --git a/numpy/core/mlib.ini.in b/numpy/core/mlib.ini.in
new file mode 100644
index 000000000..badaa2ae9
--- /dev/null
+++ b/numpy/core/mlib.ini.in
@@ -0,0 +1,12 @@
+[meta]
+Name = mlib
+Description = Math library used with this version of numpy
+Version = 1.0
+
+[default]
+Libs=@posix_mathlib@
+Cflags=
+
+[msvc]
+Libs=@msvc_mathlib@
+Cflags=
diff --git a/numpy/core/npymath.ini.in b/numpy/core/npymath.ini.in
new file mode 100644
index 000000000..73379e47c
--- /dev/null
+++ b/numpy/core/npymath.ini.in
@@ -0,0 +1,19 @@
+[meta]
+Name=npymath
+Description=Portable, core math library implementing C99 standard
+Version=0.1
+
+[variables]
+prefix=@prefix@
+libdir=${prefix}@sep@lib
+includedir=${prefix}@sep@include
+
+[default]
+Libs=-L${libdir} -lnpymath
+Cflags=-I${includedir}
+Requires=mlib
+
+[msvc]
+Libs=/LIBPATH:${libdir} npymath.lib
+Cflags=/INCLUDE:${includedir}
+Requires=mlib
diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py
index 50e3fd75b..c961bcc0f 100644
--- a/numpy/core/numeric.py
+++ b/numpy/core/numeric.py
@@ -6,7 +6,7 @@ __all__ = ['newaxis', 'ndarray', 'flatiter', 'ufunc',
'set_numeric_ops', 'can_cast',
'asarray', 'asanyarray', 'ascontiguousarray', 'asfortranarray',
'isfortran', 'empty_like', 'zeros_like',
- 'correlate', 'acorrelate', 'convolve', 'inner', 'dot', 'outer', 'vdot',
+ 'correlate', 'convolve', 'inner', 'dot', 'outer', 'vdot',
'alterdot', 'restoredot', 'roll', 'rollaxis', 'cross', 'tensordot',
'array2string', 'get_printoptions', 'set_printoptions',
'array_repr', 'array_str', 'set_string_function',
@@ -22,6 +22,7 @@ __all__ = ['newaxis', 'ndarray', 'flatiter', 'ufunc',
'CLIP', 'RAISE', 'WRAP', 'MAXDIMS', 'BUFSIZE', 'ALLOW_THREADS']
import sys
+import warnings
import multiarray
import umath
from umath import *
@@ -362,24 +363,52 @@ def require(a, dtype=None, requirements=None):
Parameters
----------
a : array_like
- The object to be converted to a type-and-requirement satisfying array
+ The object to be converted to a type-and-requirement-satisfying array.
dtype : data-type
- The required data-type (None is the default data-type -- float64)
- requirements : list of strings
+ The required data-type, the default data-type is float64).
+ requirements : str or list of str
The requirements list can be any of the following
- * 'ENSUREARRAY' ('E') - ensure that a base-class ndarray
* 'F_CONTIGUOUS' ('F') - ensure a Fortran-contiguous array
* 'C_CONTIGUOUS' ('C') - ensure a C-contiguous array
* 'ALIGNED' ('A') - ensure a data-type aligned array
- * 'WRITEABLE' ('W') - ensure a writeable array
+ * 'WRITEABLE' ('W') - ensure a writable array
* 'OWNDATA' ('O') - ensure an array that owns its own data
+ See Also
+ --------
+ asarray : Convert input to an ndarray.
+ asanyarray : Convert to an ndarray, but pass through ndarray subclasses.
+ ascontiguousarray : Convert input to a contiguous array.
+ asfortranarray : Convert input to an ndarray with column-major
+ memory order.
+ ndarray.flags : Information about the memory layout of the array.
+
Notes
-----
The returned array will be guaranteed to have the listed requirements
by making a copy if needed.
+ Examples
+ --------
+ >>> x = np.arange(6).reshape(2,3)
+ >>> x.flags
+ C_CONTIGUOUS : True
+ F_CONTIGUOUS : False
+ OWNDATA : False
+ WRITEABLE : True
+ ALIGNED : True
+ UPDATEIFCOPY : False
+
+ >>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F'])
+ >>> y.flags
+ C_CONTIGUOUS : False
+ F_CONTIGUOUS : True
+ OWNDATA : True
+ WRITEABLE : True
+ ALIGNED : True
+ UPDATEIFCOPY : False
+
"""
if requirements is None:
requirements = []
@@ -507,7 +536,7 @@ def argwhere(a):
[1, 2]])
"""
- return asarray(a.nonzero()).T
+ return transpose(asanyarray(a).nonzero())
def flatnonzero(a):
"""
@@ -557,7 +586,7 @@ def _mode_from_name(mode):
return _mode_from_name_dict[mode.lower()[0]]
return mode
-def correlate(a,v,mode='valid'):
+def correlate(a,v,mode='valid',old_behavior=True):
"""
Discrete, linear correlation of two 1-dimensional sequences.
@@ -574,51 +603,48 @@ def correlate(a,v,mode='valid'):
mode : {'valid', 'same', 'full'}, optional
Refer to the `convolve` docstring. Note that the default
is `valid`, unlike `convolve`, which uses `full`.
+ old_behavior : bool
+ If True, uses the old, numeric behavior (correlate(a,v) == correlate(v,
+ a), and the conjugate is not taken for complex arrays). If False, uses
+ the conventional signal processing definition (see note).
See Also
--------
convolve : Discrete, linear convolution of two
one-dimensional sequences.
- acorrelate: Discrete correlation following the usual signal processing
- definition for complex arrays, and without assuming that
- correlate(a, b) == correlate(b, a)
- """
- mode = _mode_from_name(mode)
- return multiarray.correlate(a,v,mode)
-
-def acorrelate(a, v, mode='valid'):
- """
- Discrete, linear correlation of two 1-dimensional sequences.
- This function computes the correlation as generally defined in signal
- processing texts:
+ Note
+ ----
+ If old_behavior is False, this function computes the correlation as
+ generally defined in signal processing texts::
- z[k] = sum_n a[n] * conj(v[n+k])
+ z[k] = sum_n a[n] * conj(v[n+k])
with a and v sequences being zero-padded where necessary and conj being the
conjugate.
- Parameters
- ----------
- a, v : array_like
- Input sequences.
- mode : {'valid', 'same', 'full'}, optional
- Refer to the `convolve` docstring. Note that the default
- is `valid`, unlike `convolve`, which uses `full`.
-
- Note
- ----
- This is the function which corresponds to matlab xcorr.
-
- See Also
+ Examples
--------
- convolve : Discrete, linear convolution of two
- one-dimensional sequences.
- correlate: Deprecated function to compute correlation
+ >>> np.correlate([1, 2, 3], [0, 1, 0.5])
+ array([ 3.5])
+ >>> np.correlate([1, 2, 3], [0, 1, 0.5], "same")
+ array([ 2. , 3.5, 3. ])
+ >>> np.correlate([1, 2, 3], [0, 1, 0.5], "full")
+ array([ 0.5, 2. , 3.5, 3. , 0. ])
+
"""
mode = _mode_from_name(mode)
- return multiarray.acorrelate(a, v, mode)
-
+ if old_behavior:
+ warnings.warn("""
+The current behavior of correlate is deprecated for 1.4.0, and will be removed
+for NumPy 1.5.0.
+
+The new behavior fits the conventional definition of correlation: inputs are
+never swapped, and the second argument is conjugated for complex arrays.""",
+ DeprecationWarning)
+ return multiarray.correlate(a,v,mode)
+ else:
+ return multiarray.correlate2(a,v,mode)
def convolve(a,v,mode='full'):
"""
@@ -1170,20 +1196,26 @@ def array_repr(arr, max_line_width=None, precision=None, suppress_small=None):
Parameters
----------
arr : ndarray
- Input array.
- max_line_width : int
- The maximum number of columns the string should span. Newline
- characters splits the string appropriately after array elements.
- precision : int
- Floating point precision.
- suppress_small : bool
- Represent very small numbers as zero.
+ Input array.
+ max_line_width : int, optional
+ The maximum number of columns the string should span. Newline
+ characters split the string appropriately after array elements.
+ precision : int, optional
+ Floating point precision. Default is the current printing precision
+ (usually 8), which can be altered using `set_printoptions`.
+ suppress_small : bool, optional
+ Represent very small numbers as zero, default is False. Very small
+ is defined by `precision`, if the precision is 8 then
+ numbers smaller than 5e-9 are represented as zero.
Returns
-------
string : str
The string representation of an array.
+ See Also
+ --------
+ array_str, array2string, set_printoptions
Examples
--------
@@ -1194,6 +1226,10 @@ def array_repr(arr, max_line_width=None, precision=None, suppress_small=None):
>>> np.array_repr(np.array([], np.int32))
'array([], dtype=int32)'
+ >>> x = np.array([1e-6, 4e-7, 2, 3])
+ >>> np.array_repr(x, precision=6, suppress_small=True)
+ 'array([ 0.000001, 0. , 2. , 3. ])'
+
"""
if arr.size > 0 or arr.shape==(0,):
lst = array2string(arr, max_line_width, precision, suppress_small,
@@ -1221,7 +1257,11 @@ def array_repr(arr, max_line_width=None, precision=None, suppress_small=None):
def array_str(a, max_line_width=None, precision=None, suppress_small=None):
"""
- Return a string representation of an array.
+ Return a string representation of the data in an array.
+
+ The data in the array is returned as a single string. This function
+ is similar to `array_repr`, the difference is that `array_repr` also
+ returns information on the type of array and data type.
Parameters
----------
@@ -1230,13 +1270,16 @@ def array_str(a, max_line_width=None, precision=None, suppress_small=None):
max_line_width : int, optional
Inserts newlines if text is longer than `max_line_width`.
precision : int, optional
- If `a` is float, `precision` sets floating point precision.
- suppress_small : boolean, optional
- Represent very small numbers as zero.
+ Floating point precision. Default is the current printing precision
+ (usually 8), which can be altered using set_printoptions.
+ suppress_small : bool, optional
+ Represent very small numbers as zero, default is False. Very small is
+ defined by precision, if the precision is 8 then numbers smaller than
+ 5e-9 are represented as zero.
See Also
--------
- array2string, array_repr
+ array2string, array_repr, set_printoptions
Examples
--------
@@ -1264,8 +1307,8 @@ def indices(dimensions, dtype=int):
----------
dimensions : sequence of ints
The shape of the grid.
- dtype : optional
- Data_type of the result.
+ dtype : dtype, optional
+ Data type of the result.
Returns
-------
@@ -1291,7 +1334,7 @@ def indices(dimensions, dtype=int):
Examples
--------
- >>> grid = np.indices((2,3))
+ >>> grid = np.indices((2, 3))
>>> grid.shape
(2,2,3)
>>> grid[0] # row indices
@@ -1301,6 +1344,17 @@ def indices(dimensions, dtype=int):
array([[0, 1, 2],
[0, 1, 2]])
+ The indices can be used as an index into an array.
+
+ >>> x = np.arange(20).reshape(5, 4)
+ >>> row, col = np.indices((2, 3))
+ >>> x[row, col]
+ array([[0, 1, 2],
+ [4, 5, 6]])
+
+ Note that it would be more straightforward in the above example to
+ extract the required elements directly with ``x[:2, :3]``.
+
"""
dimensions = tuple(dimensions)
N = len(dimensions)
@@ -1638,15 +1692,9 @@ def identity(n, dtype=None):
[ 0., 0., 1.]])
"""
- a = array([1]+n*[0],dtype=dtype)
- b = empty((n,n),dtype=dtype)
-
- # Note that this assignment depends on the convention that since the a
- # array is shorter than the flattened b array, then the a array will
- # be repeated until it is the appropriate size. Given a's construction,
- # this nicely sets the diagonal to all ones.
- b.flat = a
- return b
+ a = zeros((n,n), dtype=dtype)
+ a.flat[::n+1] = 1
+ return a
def allclose(a, b, rtol=1.e-5, atol=1.e-8):
"""
@@ -1816,22 +1864,24 @@ def seterr(all=None, divide=None, over=None, under=None, invalid=None):
Parameters
----------
- all : {'ignore', 'warn', 'raise', 'call'}, optional
+ all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Set treatment for all types of floating-point errors at once:
- - ignore: Take no action when the exception occurs
- - warn: Print a `RuntimeWarning` (via the Python `warnings` module)
- - raise: Raise a `FloatingPointError`
+ - ignore: Take no action when the exception occurs.
+ - warn: Print a `RuntimeWarning` (via the Python `warnings` module).
+ - raise: Raise a `FloatingPointError`.
- call: Call a function specified using the `seterrcall` function.
+ - print: Print a warning directly to ``stdout``.
+ - log: Record error in a Log object specified by `seterrcall`.
The default is not to change the current behavior.
- divide : {'ignore', 'warn', 'raise', 'call'}, optional
+ divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for division by zero.
- over : {'ignore', 'warn', 'raise', 'call'}, optional
+ over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for floating-point overflow.
- under : {'ignore', 'warn', 'raise', 'call'}, optional
+ under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for floating-point underflow.
- invalid : {'ignore', 'warn', 'raise', 'call'}, optional
+ invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for invalid floating-point operation.
Returns
@@ -1859,22 +1909,25 @@ def seterr(all=None, divide=None, over=None, under=None, invalid=None):
Examples
--------
-
- Set mode:
-
- >>> seterr(over='raise') # doctest: +SKIP
+ >>> np.seterr(over='raise')
{'over': 'ignore', 'divide': 'ignore', 'invalid': 'ignore',
'under': 'ignore'}
+ >>> np.seterr(all='ignore') # reset to default
+ {'over': 'raise', 'divide': 'warn', 'invalid': 'warn', 'under': 'warn'}
- >>> old_settings = seterr(all='warn', over='raise') # doctest: +SKIP
-
- >>> int16(32000) * int16(3) # doctest: +SKIP
+ >>> np.int16(32000) * np.int16(3)
+ 30464
+ >>> old_settings = np.seterr(all='warn', over='raise')
+ >>> np.int16(32000) * np.int16(3)
Traceback (most recent call last):
- File "<stdin>", line 1, in ?
+ File "<stdin>", line 1, in <module>
FloatingPointError: overflow encountered in short_scalars
- >>> seterr(all='ignore') # doctest: +SKIP
- {'over': 'ignore', 'divide': 'ignore', 'invalid': 'ignore',
- 'under': 'ignore'}
+
+ >>> np.seterr(all='print')
+ {'over': 'print', 'divide': 'print', 'invalid': 'print', 'under': 'print'}
+ >>> np.int16(32000) * np.int16(3)
+ Warning: overflow encountered in short_scalars
+ 30464
"""
@@ -1897,11 +1950,41 @@ def seterr(all=None, divide=None, over=None, under=None, invalid=None):
def geterr():
- """Get the current way of handling floating-point errors.
+ """
+ Get the current way of handling floating-point errors.
+
+ Returns
+ -------
+ res : dict
+ A dictionary with keys "divide", "over", "under", and "invalid",
+ whose values are from the strings "ignore", "print", "log", "warn",
+ "raise", and "call". The keys represent possible floating-point
+ exceptions, and the values define how these exceptions are handled.
+
+ See Also
+ --------
+ geterrcall, seterr, seterrcall
+
+ Notes
+ -----
+ For complete documentation of the types of floating-point exceptions and
+ treatment options, see `seterr`.
+
+ Examples
+ --------
+ >>> np.geterr() # default is all set to 'ignore'
+ {'over': 'ignore', 'divide': 'ignore', 'invalid': 'ignore',
+ 'under': 'ignore'}
+ >>> np.arange(3.) / np.arange(3.)
+ array([ NaN, 1., 1.])
+
+ >>> oldsettings = np.seterr(all='warn', over='raise')
+ >>> np.geterr()
+ {'over': 'raise', 'divide': 'warn', 'invalid': 'warn', 'under': 'warn'}
+ >>> np.arange(3.) / np.arange(3.)
+ __main__:1: RuntimeWarning: invalid value encountered in divide
+ array([ NaN, 1., 1.])
- Returns a dictionary with entries "divide", "over", "under", and
- "invalid", whose values are from the strings
- "ignore", "print", "log", "warn", "raise", and "call".
"""
maskvalue = umath.geterrobj()[1]
mask = 7
@@ -1952,13 +2035,13 @@ def seterrcall(func):
is to set the error-handler to 'call', using `seterr`. Then, set
the function to call using this function.
- The second is to set the error-handler to `log`, using `seterr`.
+ The second is to set the error-handler to 'log', using `seterr`.
Floating-point errors then trigger a call to the 'write' method of
the provided object.
Parameters
----------
- log_func_or_obj : callable f(err, flag) or object with write method
+ func : callable f(err, flag) or object with write method
Function to call upon floating-point errors ('call'-mode) or
object whose 'write' method is used to log such message ('log'-mode).
@@ -1971,7 +2054,7 @@ def seterrcall(func):
In other words, ``flags = divide + 2*over + 4*under + 8*invalid``.
- If an object is provided, it's write method should take one argument,
+ If an object is provided, its write method should take one argument,
a string.
Returns
@@ -1979,6 +2062,10 @@ def seterrcall(func):
h : callable or log instance
The old error handler.
+ See Also
+ --------
+ seterr, geterr, geterrcall
+
Examples
--------
Callback upon error:
@@ -2025,7 +2112,45 @@ def seterrcall(func):
return old
def geterrcall():
- """Return the current callback function used on floating-point errors.
+ """
+ Return the current callback function used on floating-point errors.
+
+ When the error handling for a floating-point error (one of "divide",
+ "over", "under", or "invalid") is set to 'call' or 'log', the function
+ that is called or the log instance that is written to is returned by
+ `geterrcall`. This function or log instance has been set with
+ `seterrcall`.
+
+ Returns
+ -------
+ errobj : callable, log instance or None
+ The current error handler. If no handler was set through `seterrcall`,
+ ``None`` is returned.
+
+ See Also
+ --------
+ seterrcall, seterr, geterr
+
+ Notes
+ -----
+ For complete documentation of the types of floating-point exceptions and
+ treatment options, see `seterr`.
+
+ Examples
+ --------
+ >>> np.geterrcall() # we did not yet set a handler, returns None
+
+ >>> oldsettings = np.seterr(all='call')
+ >>> def err_handler(type, flag):
+ ... print "Floating point error (%s), with flag %s" % (type, flag)
+ >>> oldhandler = np.seterrcall(err_handler)
+ >>> np.array([1,2,3])/0.0
+ Floating point error (divide by zero), with flag 1
+ array([ Inf, Inf, Inf])
+ >>> cur_handler = np.geterrcall()
+ >>> cur_handler is err_handler
+ True
+
"""
return umath.geterrobj()[2]
diff --git a/numpy/core/numerictypes.py b/numpy/core/numerictypes.py
index c72cc122a..70315f1e0 100644
--- a/numpy/core/numerictypes.py
+++ b/numpy/core/numerictypes.py
@@ -521,6 +521,9 @@ def issubdtype(arg1, arg2):
arg2 : dtype_like
dtype or string representing a typecode.
+ Returns
+ -------
+ out : bool
See Also
--------
@@ -660,14 +663,24 @@ def _find_common_coerce(a, b):
thisind = __test_types.index(a.char)
except ValueError:
return None
+ return _can_coerce_all([a,b], start=thisind)
+
+# Find a data-type that all data-types in a list can be coerced to
+def _can_coerce_all(dtypelist, start=0):
+ N = len(dtypelist)
+ if N == 0:
+ return None
+ if N == 1:
+ return dtypelist[0]
+ thisind = start
while thisind < __len_test_types:
newdtype = dtype(__test_types[thisind])
- if newdtype >= b and newdtype >= a:
+ numcoerce = len([x for x in dtypelist if newdtype >= x])
+ if numcoerce == N:
return newdtype
thisind += 1
return None
-
def find_common_type(array_types, scalar_types):
"""
Determine common type following standard coercion rules
@@ -696,16 +709,14 @@ def find_common_type(array_types, scalar_types):
array_types = [dtype(x) for x in array_types]
scalar_types = [dtype(x) for x in scalar_types]
- if len(scalar_types) == 0:
- if len(array_types) == 0:
- return None
- else:
- return max(array_types)
- if len(array_types) == 0:
- return max(scalar_types)
+ maxa = _can_coerce_all(array_types)
+ maxsc = _can_coerce_all(scalar_types)
- maxa = max(array_types)
- maxsc = max(scalar_types)
+ if maxa is None:
+ return maxsc
+
+ if maxsc is None:
+ return maxa
try:
index_a = _kind_list.index(maxa.kind)
diff --git a/numpy/core/setup.py b/numpy/core/setup.py
index dca1787a9..6c66512b0 100644
--- a/numpy/core/setup.py
+++ b/numpy/core/setup.py
@@ -1,11 +1,13 @@
import imp
import os
import sys
+import shutil
from os.path import join
from numpy.distutils import log
from distutils.dep_util import newer
from distutils.sysconfig import get_config_var
import warnings
+import re
from setup_common import *
@@ -142,7 +144,7 @@ def check_math_capabilities(config, moredefs, mathlibs):
# config.h in the public namespace, so we have a clash for the common
# functions we test. We remove every function tested by python's
# autoconf, hoping their own test are correct
- if sys.version_info[:2] >= (2, 6):
+ if sys.version_info[:2] >= (2, 5):
for f in OPTIONAL_STDFUNCS_MAYBE:
if config.check_decl(fname2def(f),
headers=["Python.h", "math.h"]):
@@ -321,6 +323,7 @@ def configuration(parent_package='',top_path=None):
d = os.path.dirname(target)
if not os.path.exists(d):
os.makedirs(d)
+
if newer(__file__,target):
config_cmd = config.get_config_cmd()
log.info('Generating %s',target)
@@ -434,8 +437,6 @@ def configuration(parent_package='',top_path=None):
# Check wether we can use inttypes (C99) formats
if config_cmd.check_decl('PRIdPTR', headers = ['inttypes.h']):
moredefs.append(('NPY_USE_C99_FORMATS', 1))
- else:
- moredefs.append(('NPY_USE_C99_FORMATS', 0))
# Inline check
inline = config_cmd.check_inline()
@@ -548,12 +549,13 @@ def configuration(parent_package='',top_path=None):
return []
config.add_data_files('include/numpy/*.h')
+ config.add_include_dirs(join('src', 'npymath'))
config.add_include_dirs(join('src', 'multiarray'))
config.add_include_dirs(join('src', 'umath'))
config.numpy_include_dirs.extend(config.paths('include'))
- deps = [join('src','_signbit.c'),
+ deps = [join('src','npymath','_signbit.c'),
join('include','numpy','*object.h'),
'include/numpy/fenv/fenv.c',
'include/numpy/fenv/fenv.h',
@@ -578,10 +580,28 @@ def configuration(parent_package='',top_path=None):
# (don't ask). Because clib are generated before extensions, we have to
# explicitly add an extension which has generate_config_h and
# generate_numpyconfig_h as sources *before* adding npymath.
- config.add_library('npymath',
- sources=[join('src', 'npy_math.c.src')],
- depends=[])
-
+
+ subst_dict = dict([("sep", os.path.sep)])
+ def get_mathlib_info(*args):
+ # Another ugly hack: the mathlib info is known once build_src is run,
+ # but we cannot use add_installed_pkg_config here either, so we only
+ # updated the substition dictionary during npymath build
+ config_cmd = config.get_config_cmd()
+ mlibs = check_mathlib(config_cmd)
+
+ posix_mlib = ' '.join(['-l%s' % l for l in mlibs])
+ msvc_mlib = ' '.join(['%s.lib' % l for l in mlibs])
+ subst_dict["posix_mathlib"] = posix_mlib
+ subst_dict["msvc_mathlib"] = msvc_mlib
+
+ config.add_installed_library('npymath',
+ sources=[join('src', 'npymath', 'npy_math.c.src'), get_mathlib_info],
+ install_dir='lib')
+ config.add_npy_pkg_config("npymath.ini.in", "lib/npy-pkg-config",
+ subst_dict)
+ config.add_npy_pkg_config("mlib.ini.in", "lib/npy-pkg-config",
+ subst_dict)
+
multiarray_deps = [
join('src', 'multiarray', 'arrayobject.h'),
join('src', 'multiarray', 'arraytypes.h'),
@@ -706,6 +726,9 @@ def configuration(parent_package='',top_path=None):
config.add_extension('umath_tests',
sources = [join('src','umath', 'umath_tests.c.src')])
+ config.add_extension('multiarray_tests',
+ sources = [join('src', 'multiarray', 'multiarray_tests.c.src')])
+
config.add_data_dir('tests')
config.add_data_dir('tests/data')
diff --git a/numpy/core/setup_common.py b/numpy/core/setup_common.py
index 37023e8db..ab801fc6d 100644
--- a/numpy/core/setup_common.py
+++ b/numpy/core/setup_common.py
@@ -76,8 +76,8 @@ def check_api_version(apiversion, codegen_dir):
"with checksum %s, but recorded checksum for C API version %d in " \
"codegen_dir/cversions.txt is %s. If functions were added in the " \
"C API, you have to update C_API_VERSION in %s."
- warnings.warn(msg % (apiversion, curapi_hash, apiversion, api_hash,
- __file__),
+ warnings.warn(msg % (apiversion, curapi_hash, apiversion, api_hash,
+ __file__),
MismatchCAPIWarning)
# Mandatory functions: if not found, fail the build
MANDATORY_FUNCS = ["sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs",
@@ -87,17 +87,19 @@ MANDATORY_FUNCS = ["sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs",
# Standard functions which may not be available and for which we have a
# replacement implementation. Note that some of these are C99 functions.
OPTIONAL_STDFUNCS = ["expm1", "log1p", "acosh", "asinh", "atanh",
- "rint", "trunc", "exp2", "log2"]
+ "rint", "trunc", "exp2", "log2", "hypot", "atan2", "pow",
+ "copysign"]
# Subset of OPTIONAL_STDFUNCS which may alreay have HAVE_* defined by Python.h
-OPTIONAL_STDFUNCS_MAYBE = ["expm1", "log1p", "acosh", "atanh", "asinh"]
+OPTIONAL_STDFUNCS_MAYBE = ["expm1", "log1p", "acosh", "atanh", "asinh", "hypot",
+ "copysign"]
# C99 functions: float and long double versions
C99_FUNCS = ["sin", "cos", "tan", "sinh", "cosh", "tanh", "fabs", "floor",
"ceil", "rint", "trunc", "sqrt", "log10", "log", "log1p", "exp",
"expm1", "asin", "acos", "atan", "asinh", "acosh", "atanh",
"hypot", "atan2", "pow", "fmod", "modf", 'frexp', 'ldexp',
- "exp2", "log2"]
+ "exp2", "log2", "copysign"]
C99_FUNCS_SINGLE = [f + 'f' for f in C99_FUNCS]
C99_FUNCS_EXTENDED = [f + 'l' for f in C99_FUNCS]
diff --git a/numpy/core/setupscons.py b/numpy/core/setupscons.py
index be42246ad..3dfaa48d9 100644
--- a/numpy/core/setupscons.py
+++ b/numpy/core/setupscons.py
@@ -63,8 +63,10 @@ def configuration(parent_package='',top_path=None):
# XXX: I really have to think about how to communicate path info
# between scons and distutils, and set the options at one single
# location.
- h_file = join(get_scons_pkg_build_dir(config.name), '__multiarray_api.h')
- t_file = join(get_scons_pkg_build_dir(config.name), 'multiarray_api.txt')
+ h_file = join(get_scons_pkg_build_dir(config.name),
+ 'include/numpy/__multiarray_api.h')
+ t_file = join(get_scons_pkg_build_dir(config.name),
+ 'include/numpy/multiarray_api.txt')
config.add_data_files((header_dir, h_file),
(header_dir, t_file))
@@ -73,8 +75,10 @@ def configuration(parent_package='',top_path=None):
# XXX: I really have to think about how to communicate path info
# between scons and distutils, and set the options at one single
# location.
- h_file = join(get_scons_pkg_build_dir(config.name), '__ufunc_api.h')
- t_file = join(get_scons_pkg_build_dir(config.name), 'ufunc_api.txt')
+ h_file = join(get_scons_pkg_build_dir(config.name),
+ 'include/numpy/__ufunc_api.h')
+ t_file = join(get_scons_pkg_build_dir(config.name),
+ 'include/numpy/ufunc_api.txt')
config.add_data_files((header_dir, h_file),
(header_dir, t_file))
@@ -87,6 +91,7 @@ def configuration(parent_package='',top_path=None):
config.add_sconscript('SConstruct',
post_hook = add_generated_files,
source_files = source_files)
+ config.add_scons_installed_library('npymath', 'lib')
config.add_data_files('include/numpy/*.h')
config.add_include_dirs('src')
diff --git a/numpy/core/src/_sortmodule.c.src b/numpy/core/src/_sortmodule.c.src
index 51c5feb41..28299c1a7 100644
--- a/numpy/core/src/_sortmodule.c.src
+++ b/numpy/core/src/_sortmodule.c.src
@@ -1,85 +1,257 @@
/* -*- c -*- */
-/* The purpose of this module is to add faster sort functions
- that are type-specific. This is done by altering the
- function table for the builtin descriptors.
-
- These sorting functions are copied almost directly from numarray
- with a few modifications (complex comparisons compare the imaginary
- part if the real parts are equal, for example), and the names
- are changed.
-
- The original sorting code is due to Charles R. Harris who wrote
- it for numarray.
-*/
-
-/* Quick sort is usually the fastest, but the worst case scenario can
- be slower than the merge and heap sorts. The merge sort requires
- extra memory and so for large arrays may not be useful.
-
- The merge sort is *stable*, meaning that equal components
- are unmoved from their entry versions, so it can be used to
- implement lexigraphic sorting on multiple keys.
+/*
+ * The purpose of this module is to add faster sort functions
+ * that are type-specific. This is done by altering the
+ * function table for the builtin descriptors.
+ *
+ * These sorting functions are copied almost directly from numarray
+ * with a few modifications (complex comparisons compare the imaginary
+ * part if the real parts are equal, for example), and the names
+ * are changed.
+ *
+ * The original sorting code is due to Charles R. Harris who wrote
+ * it for numarray.
+ */
- The heap sort is included for completeness.
-*/
+/*
+ * Quick sort is usually the fastest, but the worst case scenario can
+ * be slower than the merge and heap sorts. The merge sort requires
+ * extra memory and so for large arrays may not be useful.
+ *
+ * The merge sort is *stable*, meaning that equal components
+ * are unmoved from their entry versions, so it can be used to
+ * implement lexigraphic sorting on multiple keys.
+ *
+ * The heap sort is included for completeness.
+ */
#include "Python.h"
#include "numpy/noprefix.h"
+#include "numpy/npy_math.h"
+#define NOT_USED NPY_UNUSED(unused)
#define PYA_QS_STACK 100
#define SMALL_QUICKSORT 15
#define SMALL_MERGESORT 20
#define SMALL_STRING 16
-#define SWAP(a,b) {SWAP_temp = (b); (b)=(a); (a) = SWAP_temp;}
-#define STDC_LT(a,b) ((a) < (b))
-#define STDC_LE(a,b) ((a) <= (b))
-#define STDC_EQ(a,b) ((a) == (b))
-#define NUMC_LT(p,q) ((((p).real==(q).real) ? ((p).imag < (q).imag): ((p).real < (q).real)))
-#define NUMC_LE(p,q) ((((p).real==(q).real) ? ((p).imag <= (q).imag): ((p).real <= (q).real)))
-#define NUMC_EQ(p,q) (((p).real==(q).real) && ((p).imag == (q).imag))
-#define STRING_LT(pa, pb, len) (compare_string(pa, pb, len) < 0)
-#define STRING_LE(pa, pb, len) (compare_string(pa, pb, len) <= 0)
-#define STRING_EQ(pa, pb, len) (compare_string(pa, pb, len) == 0)
-#define UNICODE_LT(pa, pb, len) (compare_ucs4(pa, pb, len) < 0)
-#define UNICODE_LE(pa, pb, len) (compare_ucs4(pa, pb, len) <= 0)
-#define UNICODE_EQ(pa, pb, len) (compare_ucs4(pa, pb, len) == 0)
+
+/*
+ *****************************************************************************
+ ** SWAP MACROS **
+ *****************************************************************************
+ */
+
+/**begin repeat
+ *
+ * #TYPE = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG,
+ * LONGLONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE, CFLOAT,
+ * CDOUBLE,CLONGDOUBLE, INTP#
+ * #type = npy_bool, npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int,
+ * npy_uint, npy_long, npy_ulong, npy_longlong, npy_ulonglong,
+ * npy_float, npy_double, npy_longdouble, npy_cfloat, npy_cdouble,
+ * npy_clongdouble, npy_intp#
+ */
+#define @TYPE@_SWAP(a,b) {@type@ tmp = (b); (b)=(a); (a) = tmp;}
+
+/**end repeat**/
+
+/*
+ *****************************************************************************
+ ** COMPARISON FUNCTIONS **
+ *****************************************************************************
+ */
+
+/**begin repeat
+ *
+ * #TYPE = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG,
+ * LONGLONG, ULONGLONG#
+ * #type = Bool, byte, ubyte, short, ushort, int, uint, long, ulong,
+ * longlong, ulonglong#
+ */
+NPY_INLINE static int
+@TYPE@_LT(@type@ a, @type@ b)
+{
+ return a < b;
+}
+/**end repeat**/
+
+
+/**begin repeat
+ *
+ * #TYPE = FLOAT, DOUBLE, LONGDOUBLE#
+ * #type = float, double, longdouble#
+ */
+NPY_INLINE static int
+@TYPE@_LT(@type@ a, @type@ b)
+{
+ return a < b || (b != b && a == a);
+}
+/**end repeat**/
+
+
+/*
+ * For inline functions SUN recommends not using a return in the then part
+ * of an if statement. It's a SUN compiler thing, so assign the return value
+ * to a variable instead.
+ */
+
+/**begin repeat
+ *
+ * #TYPE = CFLOAT, CDOUBLE, CLONGDOUBLE#
+ * #type = cfloat, cdouble, clongdouble#
+ */
+NPY_INLINE static int
+@TYPE@_LT(@type@ a, @type@ b)
+{
+ int ret;
+
+ if (a.real < b.real) {
+ ret = a.imag == a.imag || b.imag != b.imag;
+ }
+ else if (a.real > b.real) {
+ ret = b.imag != b.imag && a.imag == a.imag;
+ }
+ else if (a.real == b.real || (a.real != a.real && b.real != b.real)) {
+ ret = a.imag < b.imag || (b.imag != b.imag && a.imag == a.imag);
+ }
+ else {
+ ret = b.real != b.real;
+ }
+
+ return ret;
+}
+/**end repeat**/
+
+
+/* The PyObject functions are stubs for later use */
+NPY_INLINE static int
+PyObject_LT(PyObject *pa, PyObject *pb)
+{
+ return 0;
+}
+
+
+NPY_INLINE static void
+STRING_COPY(char *s1, char *s2, size_t len)
+{
+ memcpy(s1, s2, len);
+}
+
+
+NPY_INLINE static void
+STRING_SWAP(char *s1, char *s2, size_t len)
+{
+ while(len--) {
+ const char t = *s1;
+ *s1++ = *s2;
+ *s2++ = t;
+ }
+}
+
+
+NPY_INLINE static int
+STRING_LT(char *s1, char *s2, size_t len)
+{
+ const unsigned char *c1 = (unsigned char *)s1;
+ const unsigned char *c2 = (unsigned char *)s2;
+ size_t i;
+ int ret = 0;
+
+ for (i = 0; i < len; ++i) {
+ if (c1[i] != c2[i]) {
+ ret = c1[i] < c2[i];
+ break;
+ }
+ }
+ return ret;
+}
+
+
+NPY_INLINE static void
+UNICODE_COPY(npy_ucs4 *s1, npy_ucs4 *s2, size_t len)
+{
+ while(len--) {
+ *s1++ = *s2++;
+ }
+}
+
+
+NPY_INLINE static void
+UNICODE_SWAP(npy_ucs4 *s1, npy_ucs4 *s2, size_t len)
+{
+ while(len--) {
+ const npy_ucs4 t = *s1;
+ *s1++ = *s2;
+ *s2++ = t;
+ }
+}
+
+
+NPY_INLINE static int
+UNICODE_LT(npy_ucs4 *s1, npy_ucs4 *s2, size_t len)
+{
+ size_t i;
+ int ret = 0;
+
+ for (i = 0; i < len; ++i) {
+ if (s1[i] != s2[i]) {
+ ret = s1[i] < s2[i];
+ break;
+ }
+ }
+ return ret;
+}
+
+
+/*
+ *****************************************************************************
+ ** NUMERIC SORTS **
+ *****************************************************************************
+ */
/**begin repeat
- #TYPE=BOOL,BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE,CFLOAT,CDOUBLE,CLONGDOUBLE,DATETIME,TIMEDELTA#
- #type=Bool,byte,ubyte,short,ushort,int,uint,long,ulong,longlong,ulonglong,float,double,longdouble,cfloat,cdouble,clongdouble,datetime,timedelta#
- #lessthan=STDC_LT*14,NUMC_LT*3,STDC_LT*2#
- #lessequal=STDC_LE*14,NUMC_LE*3,STDC_LE*2#
-**/
+ *
+ * #TYPE = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG,
+ * LONGLONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE,
+ * CFLOAT, CDOUBLE, CLONGDOUBLE#
+ * #type = Bool, byte, ubyte, short, ushort, int, uint, long, ulong,
+ * longlong, ulonglong, float, double, longdouble,
+ * cfloat, cdouble, clongdouble#
+ */
+
+
static int
-@TYPE@_quicksort(@type@ *start, intp num, void * NPY_UNUSED(unused))
+@TYPE@_quicksort(@type@ *start, npy_intp num, void *NOT_USED)
{
@type@ *pl = start;
@type@ *pr = start + num - 1;
- @type@ vp, SWAP_temp;
+ @type@ vp;
@type@ *stack[PYA_QS_STACK], **sptr = stack, *pm, *pi, *pj, *pk;
- for(;;) {
+ for (;;) {
while ((pr - pl) > SMALL_QUICKSORT) {
/* quicksort partition */
pm = pl + ((pr - pl) >> 1);
- if (@lessthan@(*pm, *pl)) SWAP(*pm, *pl);
- if (@lessthan@(*pr, *pm)) SWAP(*pr, *pm);
- if (@lessthan@(*pm, *pl)) SWAP(*pm, *pl);
+ if (@TYPE@_LT(*pm, *pl)) @TYPE@_SWAP(*pm, *pl);
+ if (@TYPE@_LT(*pr, *pm)) @TYPE@_SWAP(*pr, *pm);
+ if (@TYPE@_LT(*pm, *pl)) @TYPE@_SWAP(*pm, *pl);
vp = *pm;
pi = pl;
pj = pr - 1;
- SWAP(*pm, *pj);
- for(;;) {
- do ++pi; while (@lessthan@(*pi, vp));
- do --pj; while (@lessthan@(vp, *pj));
- if (pi >= pj) break;
- SWAP(*pi,*pj);
+ @TYPE@_SWAP(*pm, *pj);
+ for (;;) {
+ do ++pi; while (@TYPE@_LT(*pi, vp));
+ do --pj; while (@TYPE@_LT(vp, *pj));
+ if (pi >= pj) {
+ break;
+ }
+ @TYPE@_SWAP(*pi,*pj);
}
pk = pr - 1;
- SWAP(*pi, *pk);
+ @TYPE@_SWAP(*pi, *pk);
/* push largest partition on stack */
if (pi - pl < pr - pi) {
*sptr++ = pi + 1;
@@ -94,16 +266,18 @@ static int
}
/* insertion sort */
- for(pi = pl + 1; pi <= pr; ++pi) {
+ for (pi = pl + 1; pi <= pr; ++pi) {
vp = *pi;
pj = pi;
pk = pi - 1;
- while (pj > pl && @lessthan@(vp, *pk)) {
+ while (pj > pl && @TYPE@_LT(vp, *pk)) {
*pj-- = *pk--;
}
*pj = vp;
}
- if (sptr == stack) break;
+ if (sptr == stack) {
+ break;
+ }
pr = *(--sptr);
pl = *(--sptr);
}
@@ -112,34 +286,36 @@ static int
}
static int
-@TYPE@_aquicksort(@type@ *v, intp* tosort, intp num, void *NPY_UNUSED(unused))
+@TYPE@_aquicksort(@type@ *v, npy_intp* tosort, npy_intp num, void *NOT_USED)
{
@type@ vp;
- intp *pl, *pr, SWAP_temp;
- intp *stack[PYA_QS_STACK], **sptr=stack, *pm, *pi, *pj, *pk, vi;
+ npy_intp *pl, *pr;
+ npy_intp *stack[PYA_QS_STACK], **sptr=stack, *pm, *pi, *pj, *pk, vi;
pl = tosort;
pr = tosort + num - 1;
- for(;;) {
+ for (;;) {
while ((pr - pl) > SMALL_QUICKSORT) {
/* quicksort partition */
pm = pl + ((pr - pl) >> 1);
- if (@lessthan@(v[*pm],v[*pl])) SWAP(*pm,*pl);
- if (@lessthan@(v[*pr],v[*pm])) SWAP(*pr,*pm);
- if (@lessthan@(v[*pm],v[*pl])) SWAP(*pm,*pl);
+ if (@TYPE@_LT(v[*pm],v[*pl])) INTP_SWAP(*pm,*pl);
+ if (@TYPE@_LT(v[*pr],v[*pm])) INTP_SWAP(*pr,*pm);
+ if (@TYPE@_LT(v[*pm],v[*pl])) INTP_SWAP(*pm,*pl);
vp = v[*pm];
pi = pl;
pj = pr - 1;
- SWAP(*pm,*pj);
- for(;;) {
- do ++pi; while (@lessthan@(v[*pi],vp));
- do --pj; while (@lessthan@(vp,v[*pj]));
- if (pi >= pj) break;
- SWAP(*pi,*pj);
- }
- pk = pr - 1;
- SWAP(*pi,*pk);
+ INTP_SWAP(*pm,*pj);
+ for (;;) {
+ do ++pi; while (@TYPE@_LT(v[*pi],vp));
+ do --pj; while (@TYPE@_LT(vp,v[*pj]));
+ if (pi >= pj) {
+ break;
+ }
+ INTP_SWAP(*pi,*pj);
+ }
+ pk = pr - 1;
+ INTP_SWAP(*pi,*pk);
/* push largest partition on stack */
if (pi - pl < pr - pi) {
*sptr++ = pi + 1;
@@ -154,17 +330,19 @@ static int
}
/* insertion sort */
- for(pi = pl + 1; pi <= pr; ++pi) {
+ for (pi = pl + 1; pi <= pr; ++pi) {
vi = *pi;
vp = v[vi];
pj = pi;
pk = pi - 1;
- while (pj > pl && @lessthan@(vp, v[*pk])) {
+ while (pj > pl && @TYPE@_LT(vp, v[*pk])) {
*pj-- = *pk--;
}
*pj = vi;
}
- if (sptr == stack) break;
+ if (sptr == stack) {
+ break;
+ }
pr = *(--sptr);
pl = *(--sptr);
}
@@ -174,10 +352,10 @@ static int
static int
-@TYPE@_heapsort(@type@ *start, intp n, void *NPY_UNUSED(unused))
+@TYPE@_heapsort(@type@ *start, npy_intp n, void *NOT_USED)
{
@type@ tmp, *a;
- intp i,j,l;
+ npy_intp i,j,l;
/* The array needs to be offset by one for heapsort indexing */
a = start - 1;
@@ -185,15 +363,17 @@ static int
for (l = n>>1; l > 0; --l) {
tmp = a[l];
for (i = l, j = l<<1; j <= n;) {
- if (j < n && @lessthan@(a[j], a[j+1]))
+ if (j < n && @TYPE@_LT(a[j], a[j+1])) {
j += 1;
- if (@lessthan@(tmp, a[j])) {
+ }
+ if (@TYPE@_LT(tmp, a[j])) {
a[i] = a[j];
i = j;
j += j;
}
- else
+ else {
break;
+ }
}
a[i] = tmp;
}
@@ -203,15 +383,17 @@ static int
a[n] = a[1];
n -= 1;
for (i = 1, j = 2; j <= n;) {
- if (j < n && @lessthan@(a[j], a[j+1]))
+ if (j < n && @TYPE@_LT(a[j], a[j+1])) {
j++;
- if (@lessthan@(tmp, a[j])) {
+ }
+ if (@TYPE@_LT(tmp, a[j])) {
a[i] = a[j];
i = j;
j += j;
}
- else
+ else {
break;
+ }
}
a[i] = tmp;
}
@@ -220,24 +402,26 @@ static int
}
static int
-@TYPE@_aheapsort(@type@ *v, intp *tosort, intp n, void *NPY_UNUSED(unused))
+@TYPE@_aheapsort(@type@ *v, npy_intp *tosort, npy_intp n, void *NOT_USED)
{
- intp *a, i,j,l, tmp;
+ npy_intp *a, i,j,l, tmp;
/* The arrays need to be offset by one for heapsort indexing */
a = tosort - 1;
for (l = n>>1; l > 0; --l) {
tmp = a[l];
for (i = l, j = l<<1; j <= n;) {
- if (j < n && @lessthan@(v[a[j]], v[a[j+1]]))
+ if (j < n && @TYPE@_LT(v[a[j]], v[a[j+1]])) {
j += 1;
- if (@lessthan@(v[tmp], v[a[j]])) {
+ }
+ if (@TYPE@_LT(v[tmp], v[a[j]])) {
a[i] = a[j];
i = j;
j += j;
}
- else
+ else {
break;
+ }
}
a[i] = tmp;
}
@@ -247,15 +431,17 @@ static int
a[n] = a[1];
n -= 1;
for (i = 1, j = 2; j <= n;) {
- if (j < n && @lessthan@(v[a[j]], v[a[j+1]]))
+ if (j < n && @TYPE@_LT(v[a[j]], v[a[j+1]])) {
j++;
- if (@lessthan@(v[tmp], v[a[j]])) {
+ }
+ if (@TYPE@_LT(v[tmp], v[a[j]])) {
a[i] = a[j];
i = j;
j += j;
}
- else
+ else {
break;
+ }
}
a[i] = tmp;
}
@@ -273,17 +459,17 @@ static void
pm = pl + ((pr - pl) >> 1);
@TYPE@_mergesort0(pl, pm, pw);
@TYPE@_mergesort0(pm, pr, pw);
- for(pi = pw, pj = pl; pj < pm;) {
+ for (pi = pw, pj = pl; pj < pm;) {
*pi++ = *pj++;
}
pj = pw;
pk = pl;
while (pj < pi && pm < pr) {
- if (@lessequal@(*pj,*pm)) {
- *pk = *pj++;
+ if (@TYPE@_LT(*pm,*pj)) {
+ *pk = *pm++;
}
else {
- *pk = *pm++;
+ *pk = *pj++;
}
pk++;
}
@@ -293,11 +479,11 @@ static void
}
else {
/* insertion sort */
- for(pi = pl + 1; pi < pr; ++pi) {
+ for (pi = pl + 1; pi < pr; ++pi) {
vp = *pi;
pj = pi;
pk = pi -1;
- while (pj > pl && @lessthan@(vp, *pk)) {
+ while (pj > pl && @TYPE@_LT(vp, *pk)) {
*pj-- = *pk--;
}
*pj = vp;
@@ -306,7 +492,7 @@ static void
}
static int
-@TYPE@_mergesort(@type@ *start, intp num, void *NPY_UNUSED(unused))
+@TYPE@_mergesort(@type@ *start, npy_intp num, void *NOT_USED)
{
@type@ *pl, *pr, *pw;
@@ -324,39 +510,39 @@ static int
}
static void
-@TYPE@_amergesort0(intp *pl, intp *pr, @type@ *v, intp *pw)
+@TYPE@_amergesort0(npy_intp *pl, npy_intp *pr, @type@ *v, npy_intp *pw)
{
@type@ vp;
- intp vi, *pi, *pj, *pk, *pm;
+ npy_intp vi, *pi, *pj, *pk, *pm;
if (pr - pl > SMALL_MERGESORT) {
/* merge sort */
pm = pl + ((pr - pl + 1)>>1);
@TYPE@_amergesort0(pl,pm-1,v,pw);
@TYPE@_amergesort0(pm,pr,v,pw);
- for(pi = pw, pj = pl; pj < pm; ++pi, ++pj) {
+ for (pi = pw, pj = pl; pj < pm; ++pi, ++pj) {
*pi = *pj;
}
- for(pk = pw, pm = pl; pk < pi && pj <= pr; ++pm) {
- if (@lessequal@(v[*pk],v[*pj])) {
- *pm = *pk;
- ++pk;
- }
- else {
+ for (pk = pw, pm = pl; pk < pi && pj <= pr; ++pm) {
+ if (@TYPE@_LT(v[*pj],v[*pk])) {
*pm = *pj;
++pj;
}
+ else {
+ *pm = *pk;
+ ++pk;
+ }
}
- for(; pk < pi; ++pm, ++pk) {
+ for (; pk < pi; ++pm, ++pk) {
*pm = *pk;
}
}
else {
/* insertion sort */
- for(pi = pl + 1; pi <= pr; ++pi) {
+ for (pi = pl + 1; pi <= pr; ++pi) {
vi = *pi;
vp = v[vi];
- for(pj = pi, pk = pi - 1; pj > pl && @lessthan@(vp, v[*pk]); --pj, --pk) {
+ for (pj = pi, pk = pi - 1; pj > pl && @TYPE@_LT(vp, v[*pk]); --pj, --pk) {
*pj = *pk;
}
*pj = vi;
@@ -365,9 +551,9 @@ static void
}
static int
-@TYPE@_amergesort(@type@ *v, intp *tosort, intp num, void *NPY_UNUSED(unused))
+@TYPE@_amergesort(@type@ *v, npy_intp *tosort, npy_intp num, void *NOT_USED)
{
- intp *pl, *pr, *pw;
+ npy_intp *pl, *pr, *pw;
pl = tosort; pr = pl + num - 1;
pw = PyDimMem_NEW((1+num/2));
@@ -382,85 +568,22 @@ static int
return 0;
}
+
+
/**end repeat**/
/*
- * Subroutines that will hopefully be inlined when the code
- * for strings and unicode is compiled with proper flags.
+ *****************************************************************************
+ ** STRING SORTS **
+ *****************************************************************************
*/
-#define copy_string memcpy
-
-
-static void
-swap_string(char *s1, char *s2, size_t len)
-{
- while(len--) {
- const char t = *s1;
- *s1++ = *s2;
- *s2++ = t;
- }
-}
-
-
-static int
-compare_string(char *s1, char *s2, size_t len)
-{
- const unsigned char *c1 = (unsigned char *)s1;
- const unsigned char *c2 = (unsigned char *)s2;
- size_t i;
-
- for(i = 0; i < len; ++i) {
- if (c1[i] != c2[i]) {
- return (c1[i] > c2[i]) ? 1 : -1;
- }
- }
- return 0;
-}
-
-
-static void
-copy_ucs4(npy_ucs4 *s1, npy_ucs4 *s2, size_t len)
-{
- while(len--) {
- *s1++ = *s2++;
- }
-}
-
-
-static void
-swap_ucs4(npy_ucs4 *s1, npy_ucs4 *s2, size_t len)
-{
- while(len--) {
- const npy_ucs4 t = *s1;
- *s1++ = *s2;
- *s2++ = t;
- }
-}
-
-
-static int
-compare_ucs4(npy_ucs4 *s1, npy_ucs4 *s2, size_t len)
-{
- size_t i;
-
- for(i = 0; i < len; ++i) {
- if (s1[i] != s2[i]) {
- return (s1[i] > s2[i]) ? 1 : -1;
- }
- }
- return 0;
-}
-
/**begin repeat
- #TYPE=STRING, UNICODE#
- #type=char, PyArray_UCS4#
- #lessthan=STRING_LT, UNICODE_LT#
- #lessequal=STRING_LE, UNICODE_LE#
- #swap=swap_string, swap_ucs4#
- #copy=copy_string, copy_ucs4#
-**/
+ *
+ * #TYPE = STRING, UNICODE#
+ * #type = char, PyArray_UCS4#
+ */
static void
@TYPE@_mergesort0(@type@ *pl, @type@ *pr, @type@ *pw, @type@ *vp, size_t len)
@@ -472,41 +595,41 @@ static void
pm = pl + (((pr - pl)/len) >> 1)*len;
@TYPE@_mergesort0(pl, pm, pw, vp, len);
@TYPE@_mergesort0(pm, pr, pw, vp, len);
- @copy@(pw, pl, pm - pl);
+ @TYPE@_COPY(pw, pl, pm - pl);
pi = pw + (pm - pl);
pj = pw;
pk = pl;
while (pj < pi && pm < pr) {
- if (@lessequal@(pj, pm, len)) {
- @copy@(pk, pj, len);
- pj += len;
+ if (@TYPE@_LT(pm, pj, len)) {
+ @TYPE@_COPY(pk, pm, len);
+ pm += len;
}
else {
- @copy@(pk, pm, len);
- pm += len;
+ @TYPE@_COPY(pk, pj, len);
+ pj += len;
}
pk += len;
}
- @copy@(pk, pj, pi - pj);
+ @TYPE@_COPY(pk, pj, pi - pj);
}
else {
/* insertion sort */
- for(pi = pl + len; pi < pr; pi += len) {
- @copy@(vp, pi, len);
+ for (pi = pl + len; pi < pr; pi += len) {
+ @TYPE@_COPY(vp, pi, len);
pj = pi;
pk = pi - len;
- while (pj > pl && @lessthan@(vp, pk, len)) {
- @copy@(pj, pk, len);
+ while (pj > pl && @TYPE@_LT(vp, pk, len)) {
+ @TYPE@_COPY(pj, pk, len);
pj -= len;
pk -= len;
}
- @copy@(pj, vp, len);
+ @TYPE@_COPY(pj, vp, len);
}
}
}
static int
-@TYPE@_mergesort(@type@ *start, intp num, PyArrayObject *arr)
+@TYPE@_mergesort(@type@ *start, npy_intp num, PyArrayObject *arr)
{
const size_t elsize = arr->descr->elsize;
const size_t len = elsize / sizeof(@type@);
@@ -537,7 +660,7 @@ fail_0:
}
static int
-@TYPE@_quicksort(@type@ *start, intp num, PyArrayObject *arr)
+@TYPE@_quicksort(@type@ *start, npy_intp num, PyArrayObject *arr)
{
const size_t len = arr->descr->elsize/sizeof(@type@);
@type@ *vp = malloc(arr->descr->elsize);
@@ -545,25 +668,27 @@ static int
@type@ *pr = start + (num - 1)*len;
@type@ *stack[PYA_QS_STACK], **sptr = stack, *pm, *pi, *pj, *pk;
- for(;;) {
+ for (;;) {
while ((size_t)(pr - pl) > SMALL_QUICKSORT*len) {
/* quicksort partition */
pm = pl + (((pr - pl)/len) >> 1)*len;
- if (@lessthan@(pm, pl, len)) @swap@(pm, pl, len);
- if (@lessthan@(pr, pm, len)) @swap@(pr, pm, len);
- if (@lessthan@(pm, pl, len)) @swap@(pm, pl, len);
- @copy@(vp, pm, len);
+ if (@TYPE@_LT(pm, pl, len)) @TYPE@_SWAP(pm, pl, len);
+ if (@TYPE@_LT(pr, pm, len)) @TYPE@_SWAP(pr, pm, len);
+ if (@TYPE@_LT(pm, pl, len)) @TYPE@_SWAP(pm, pl, len);
+ @TYPE@_COPY(vp, pm, len);
pi = pl;
pj = pr - len;
- @swap@(pm, pj, len);
- for(;;) {
- do pi += len; while (@lessthan@(pi, vp, len));
- do pj -= len; while (@lessthan@(vp, pj, len));
- if (pi >= pj) break;
- @swap@(pi, pj, len);
+ @TYPE@_SWAP(pm, pj, len);
+ for (;;) {
+ do pi += len; while (@TYPE@_LT(pi, vp, len));
+ do pj -= len; while (@TYPE@_LT(vp, pj, len));
+ if (pi >= pj) {
+ break;
+ }
+ @TYPE@_SWAP(pi, pj, len);
}
pk = pr - len;
- @swap@(pi, pk, len);
+ @TYPE@_SWAP(pi, pk, len);
/* push largest partition on stack */
if (pi - pl < pr - pi) {
*sptr++ = pi + len;
@@ -578,18 +703,20 @@ static int
}
/* insertion sort */
- for(pi = pl + len; pi <= pr; pi += len) {
- @copy@(vp, pi, len);
+ for (pi = pl + len; pi <= pr; pi += len) {
+ @TYPE@_COPY(vp, pi, len);
pj = pi;
pk = pi - len;
- while (pj > pl && @lessthan@(vp, pk, len)) {
- @copy@(pj, pk, len);
+ while (pj > pl && @TYPE@_LT(vp, pk, len)) {
+ @TYPE@_COPY(pj, pk, len);
pj -= len;
pk -= len;
}
- @copy@(pj, vp, len);
+ @TYPE@_COPY(pj, vp, len);
+ }
+ if (sptr == stack) {
+ break;
}
- if (sptr == stack) break;
pr = *(--sptr);
pl = *(--sptr);
}
@@ -600,45 +727,47 @@ static int
static int
-@TYPE@_heapsort(@type@ *start, intp n, PyArrayObject *arr)
+@TYPE@_heapsort(@type@ *start, npy_intp n, PyArrayObject *arr)
{
size_t len = arr->descr->elsize/sizeof(@type@);
@type@ *tmp = malloc(arr->descr->elsize);
@type@ *a = start - len;
- intp i,j,l;
+ npy_intp i,j,l;
for (l = n>>1; l > 0; --l) {
- @copy@(tmp, a + l*len, len);
+ @TYPE@_COPY(tmp, a + l*len, len);
for (i = l, j = l<<1; j <= n;) {
- if (j < n && @lessthan@(a + j*len, a + (j+1)*len, len))
+ if (j < n && @TYPE@_LT(a + j*len, a + (j+1)*len, len))
j += 1;
- if (@lessthan@(tmp, a + j*len, len)) {
- @copy@(a + i*len, a + j*len, len);
+ if (@TYPE@_LT(tmp, a + j*len, len)) {
+ @TYPE@_COPY(a + i*len, a + j*len, len);
i = j;
j += j;
}
- else
+ else {
break;
+ }
}
- @copy@(a + i*len, tmp, len);
+ @TYPE@_COPY(a + i*len, tmp, len);
}
for (; n > 1;) {
- @copy@(tmp, a + n*len, len);
- @copy@(a + n*len, a + len, len);
+ @TYPE@_COPY(tmp, a + n*len, len);
+ @TYPE@_COPY(a + n*len, a + len, len);
n -= 1;
for (i = 1, j = 2; j <= n;) {
- if (j < n && @lessthan@(a + j*len, a + (j+1)*len, len))
+ if (j < n && @TYPE@_LT(a + j*len, a + (j+1)*len, len))
j++;
- if (@lessthan@(tmp, a + j*len, len)) {
- @copy@(a + i*len, a + j*len, len);
+ if (@TYPE@_LT(tmp, a + j*len, len)) {
+ @TYPE@_COPY(a + i*len, a + j*len, len);
i = j;
j += j;
}
- else
+ else {
break;
+ }
}
- @copy@(a + i*len, tmp, len);
+ @TYPE@_COPY(a + i*len, tmp, len);
}
free(tmp);
@@ -647,10 +776,10 @@ static int
static int
-@TYPE@_aheapsort(@type@ *v, intp *tosort, intp n, PyArrayObject *arr)
+@TYPE@_aheapsort(@type@ *v, npy_intp *tosort, npy_intp n, PyArrayObject *arr)
{
size_t len = arr->descr->elsize/sizeof(@type@);
- intp *a, i,j,l, tmp;
+ npy_intp *a, i,j,l, tmp;
/* The array needs to be offset by one for heapsort indexing */
a = tosort - 1;
@@ -658,15 +787,16 @@ static int
for (l = n>>1; l > 0; --l) {
tmp = a[l];
for (i = l, j = l<<1; j <= n;) {
- if (j < n && @lessthan@(v + a[j]*len, v + a[j+1]*len, len))
+ if (j < n && @TYPE@_LT(v + a[j]*len, v + a[j+1]*len, len))
j += 1;
- if (@lessthan@(v + tmp*len, v + a[j]*len, len)) {
+ if (@TYPE@_LT(v + tmp*len, v + a[j]*len, len)) {
a[i] = a[j];
i = j;
j += j;
}
- else
+ else {
break;
+ }
}
a[i] = tmp;
}
@@ -676,15 +806,16 @@ static int
a[n] = a[1];
n -= 1;
for (i = 1, j = 2; j <= n;) {
- if (j < n && @lessthan@(v + a[j]*len, v + a[j+1]*len, len))
+ if (j < n && @TYPE@_LT(v + a[j]*len, v + a[j+1]*len, len))
j++;
- if (@lessthan@(v + tmp*len, v + a[j]*len, len)) {
+ if (@TYPE@_LT(v + tmp*len, v + a[j]*len, len)) {
a[i] = a[j];
i = j;
j += j;
}
- else
+ else {
break;
+ }
}
a[i] = tmp;
}
@@ -694,35 +825,37 @@ static int
static int
-@TYPE@_aquicksort(@type@ *v, intp* tosort, intp num, PyArrayObject *arr)
+@TYPE@_aquicksort(@type@ *v, npy_intp* tosort, npy_intp num, PyArrayObject *arr)
{
size_t len = arr->descr->elsize/sizeof(@type@);
@type@ *vp;
- intp *pl = tosort;
- intp *pr = tosort + num - 1;
- intp *stack[PYA_QS_STACK];
- intp **sptr=stack;
- intp *pm, *pi, *pj, *pk, vi, SWAP_temp;
+ npy_intp *pl = tosort;
+ npy_intp *pr = tosort + num - 1;
+ npy_intp *stack[PYA_QS_STACK];
+ npy_intp **sptr=stack;
+ npy_intp *pm, *pi, *pj, *pk, vi;
- for(;;) {
+ for (;;) {
while ((pr - pl) > SMALL_QUICKSORT) {
/* quicksort partition */
pm = pl + ((pr - pl) >> 1);
- if (@lessthan@(v + (*pm)*len, v + (*pl)*len, len)) SWAP(*pm, *pl);
- if (@lessthan@(v + (*pr)*len, v + (*pm)*len, len)) SWAP(*pr, *pm);
- if (@lessthan@(v + (*pm)*len, v + (*pl)*len, len)) SWAP(*pm, *pl);
+ if (@TYPE@_LT(v + (*pm)*len, v + (*pl)*len, len)) INTP_SWAP(*pm, *pl);
+ if (@TYPE@_LT(v + (*pr)*len, v + (*pm)*len, len)) INTP_SWAP(*pr, *pm);
+ if (@TYPE@_LT(v + (*pm)*len, v + (*pl)*len, len)) INTP_SWAP(*pm, *pl);
vp = v + (*pm)*len;
pi = pl;
pj = pr - 1;
- SWAP(*pm,*pj);
- for(;;) {
- do ++pi; while (@lessthan@(v + (*pi)*len, vp, len));
- do --pj; while (@lessthan@(vp, v + (*pj)*len, len));
- if (pi >= pj) break;
- SWAP(*pi,*pj);
+ INTP_SWAP(*pm,*pj);
+ for (;;) {
+ do ++pi; while (@TYPE@_LT(v + (*pi)*len, vp, len));
+ do --pj; while (@TYPE@_LT(vp, v + (*pj)*len, len));
+ if (pi >= pj) {
+ break;
+ }
+ INTP_SWAP(*pi,*pj);
}
pk = pr - 1;
- SWAP(*pi,*pk);
+ INTP_SWAP(*pi,*pk);
/* push largest partition on stack */
if (pi - pl < pr - pi) {
*sptr++ = pi + 1;
@@ -737,17 +870,19 @@ static int
}
/* insertion sort */
- for(pi = pl + 1; pi <= pr; ++pi) {
+ for (pi = pl + 1; pi <= pr; ++pi) {
vi = *pi;
vp = v + vi*len;
pj = pi;
pk = pi - 1;
- while (pj > pl && @lessthan@(vp, v + (*pk)*len, len)) {
+ while (pj > pl && @TYPE@_LT(vp, v + (*pk)*len, len)) {
*pj-- = *pk--;
}
*pj = vi;
}
- if (sptr == stack) break;
+ if (sptr == stack) {
+ break;
+ }
pr = *(--sptr);
pl = *(--sptr);
}
@@ -757,26 +892,26 @@ static int
static void
-@TYPE@_amergesort0(intp *pl, intp *pr, @type@ *v, intp *pw, int len)
+@TYPE@_amergesort0(npy_intp *pl, npy_intp *pr, @type@ *v, npy_intp *pw, int len)
{
@type@ *vp;
- intp vi, *pi, *pj, *pk, *pm;
+ npy_intp vi, *pi, *pj, *pk, *pm;
if (pr - pl > SMALL_MERGESORT) {
/* merge sort */
pm = pl + ((pr - pl) >> 1);
@TYPE@_amergesort0(pl,pm,v,pw,len);
@TYPE@_amergesort0(pm,pr,v,pw,len);
- for(pi = pw, pj = pl; pj < pm;) {
+ for (pi = pw, pj = pl; pj < pm;) {
*pi++ = *pj++;
}
pj = pw;
pk = pl;
while (pj < pi && pm < pr) {
- if (@lessequal@(v + (*pj)*len, v + (*pm)*len, len)) {
- *pk = *pj++;
- } else {
+ if (@TYPE@_LT(v + (*pm)*len, v + (*pj)*len, len)) {
*pk = *pm++;
+ } else {
+ *pk = *pj++;
}
pk++;
}
@@ -785,12 +920,12 @@ static void
}
} else {
/* insertion sort */
- for(pi = pl + 1; pi < pr; ++pi) {
+ for (pi = pl + 1; pi < pr; ++pi) {
vi = *pi;
vp = v + vi*len;
pj = pi;
pk = pi -1;
- while (pj > pl && @lessthan@(vp, v + (*pk)*len, len)) {
+ while (pj > pl && @TYPE@_LT(vp, v + (*pk)*len, len)) {
*pj-- = *pk--;
}
*pj = vi;
@@ -800,11 +935,11 @@ static void
static int
-@TYPE@_amergesort(@type@ *v, intp *tosort, intp num, PyArrayObject *arr)
+@TYPE@_amergesort(@type@ *v, npy_intp *tosort, npy_intp num, PyArrayObject *arr)
{
const size_t elsize = arr->descr->elsize;
const size_t len = elsize / sizeof(@type@);
- intp *pl, *pr, *pw;
+ npy_intp *pl, *pr, *pw;
pl = tosort;
pr = pl + num;
@@ -826,20 +961,23 @@ add_sortfuncs(void)
PyArray_Descr *descr;
/**begin repeat
- #TYPE=BOOL,BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE,CFLOAT,CDOUBLE,CLONGDOUBLE,STRING,UNICODE,DATETIME,TIMEDELTA#
- **/
+ *
+ * #TYPE = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG,
+ * LONGLONG, ULONGLONG, FLOAT, DOUBLE, LONGDOUBLE,
+ * CFLOAT, CDOUBLE, CLONGDOUBLE, STRING, UNICODE#
+ */
descr = PyArray_DescrFromType(PyArray_@TYPE@);
- descr->f->sort[PyArray_QUICKSORT] = \
+ descr->f->sort[PyArray_QUICKSORT] =
(PyArray_SortFunc *)@TYPE@_quicksort;
- descr->f->argsort[PyArray_QUICKSORT] = \
+ descr->f->argsort[PyArray_QUICKSORT] =
(PyArray_ArgSortFunc *)@TYPE@_aquicksort;
- descr->f->sort[PyArray_HEAPSORT] = \
+ descr->f->sort[PyArray_HEAPSORT] =
(PyArray_SortFunc *)@TYPE@_heapsort;
- descr->f->argsort[PyArray_HEAPSORT] = \
+ descr->f->argsort[PyArray_HEAPSORT] =
(PyArray_ArgSortFunc *)@TYPE@_aheapsort;
- descr->f->sort[PyArray_MERGESORT] = \
+ descr->f->sort[PyArray_MERGESORT] =
(PyArray_SortFunc *)@TYPE@_mergesort;
- descr->f->argsort[PyArray_MERGESORT] = \
+ descr->f->argsort[PyArray_MERGESORT] =
(PyArray_ArgSortFunc *)@TYPE@_amergesort;
/**end repeat**/
diff --git a/numpy/core/src/multiarray/arrayobject.c b/numpy/core/src/multiarray/arrayobject.c
index dccd4984f..2f49e03a5 100644
--- a/numpy/core/src/multiarray/arrayobject.c
+++ b/numpy/core/src/multiarray/arrayobject.c
@@ -1099,16 +1099,15 @@ PyArray_CheckStrides(int elsize, int nd, intp numbytes, intp offset,
static PyObject *
array_new(PyTypeObject *subtype, PyObject *args, PyObject *kwds)
{
- static char *kwlist[] = {"shape", "dtype", "buffer",
- "offset", "strides",
+ static char *kwlist[] = {"shape", "dtype", "buffer", "offset", "strides",
"order", NULL};
- PyArray_Descr *descr=NULL;
+ PyArray_Descr *descr = NULL;
int itemsize;
PyArray_Dims dims = {NULL, 0};
PyArray_Dims strides = {NULL, 0};
PyArray_Chunk buffer;
- longlong offset=0;
- NPY_ORDER order=PyArray_CORDER;
+ longlong offset = 0;
+ NPY_ORDER order = PyArray_CORDER;
int fortran = 0;
PyArrayObject *ret;
@@ -1268,73 +1267,65 @@ array_alloc(PyTypeObject *type, Py_ssize_t NPY_UNUSED(nitems))
NPY_NO_EXPORT PyTypeObject PyArray_Type = {
+#if defined(NPY_PY3K)
+ PyVarObject_HEAD_INIT(NULL, 0)
+#else
PyObject_HEAD_INIT(NULL)
- 0, /* ob_size */
- "numpy.ndarray", /* tp_name */
- sizeof(PyArrayObject), /* tp_basicsize */
- 0, /* tp_itemsize */
+ 0, /* ob_size */
+#endif
+ "numpy.ndarray", /* tp_name */
+ sizeof(PyArrayObject), /* tp_basicsize */
+ 0, /* tp_itemsize */
/* methods */
- (destructor)array_dealloc, /* tp_dealloc */
- (printfunc)NULL, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
- (cmpfunc)0, /* tp_compare */
- (reprfunc)array_repr, /* tp_repr */
- &array_as_number, /* tp_as_number */
- &array_as_sequence, /* tp_as_sequence */
- &array_as_mapping, /* tp_as_mapping */
- (hashfunc)0, /* tp_hash */
- (ternaryfunc)0, /* tp_call */
- (reprfunc)array_str, /* tp_str */
- (getattrofunc)0, /* tp_getattro */
- (setattrofunc)0, /* tp_setattro */
- &array_as_buffer, /* tp_as_buffer */
+ (destructor)array_dealloc, /* tp_dealloc */
+ (printfunc)NULL, /* tp_print */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+#if defined(NPY_PY3K)
+ 0, /* tp_reserved */
+#else
+ 0, /* tp_compare */
+#endif
+ (reprfunc)array_repr, /* tp_repr */
+ &array_as_number, /* tp_as_number */
+ &array_as_sequence, /* tp_as_sequence */
+ &array_as_mapping, /* tp_as_mapping */
+ (hashfunc)0, /* tp_hash */
+ (ternaryfunc)0, /* tp_call */
+ (reprfunc)array_str, /* tp_str */
+ (getattrofunc)0, /* tp_getattro */
+ (setattrofunc)0, /* tp_setattro */
+ &array_as_buffer, /* tp_as_buffer */
(Py_TPFLAGS_DEFAULT
| Py_TPFLAGS_BASETYPE
- | Py_TPFLAGS_CHECKTYPES), /* tp_flags */
- /*Documentation string */
- 0, /* tp_doc */
-
- (traverseproc)0, /* tp_traverse */
- (inquiry)0, /* tp_clear */
- (richcmpfunc)array_richcompare, /* tp_richcompare */
- offsetof(PyArrayObject, weakreflist), /* tp_weaklistoffset */
-
- /* Iterator support (use standard) */
-
- (getiterfunc)array_iter, /* tp_iter */
- (iternextfunc)0, /* tp_iternext */
-
- /* Sub-classing (new-style object) support */
-
- array_methods, /* tp_methods */
- 0, /* tp_members */
- array_getsetlist, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- (initproc)0, /* tp_init */
- array_alloc, /* tp_alloc */
- (newfunc)array_new, /* tp_new */
- 0, /* tp_free */
- 0, /* tp_is_gc */
- 0, /* tp_bases */
- 0, /* tp_mro */
- 0, /* tp_cache */
- 0, /* tp_subclasses */
- 0, /* tp_weaklist */
- 0, /* tp_del */
-
-#ifdef COUNT_ALLOCS
- /* these must be last and never explicitly initialized */
- 0, /* tp_allocs */
- 0, /* tp_frees */
- 0, /* tp_maxalloc */
- 0, /* tp_prev */
- 0, /* *tp_next */
-#endif
+ | Py_TPFLAGS_CHECKTYPES), /* tp_flags */
+ 0, /* tp_doc */
+
+ (traverseproc)0, /* tp_traverse */
+ (inquiry)0, /* tp_clear */
+ (richcmpfunc)array_richcompare, /* tp_richcompare */
+ offsetof(PyArrayObject, weakreflist), /* tp_weaklistoffset */
+ (getiterfunc)array_iter, /* tp_iter */
+ (iternextfunc)0, /* tp_iternext */
+ array_methods, /* tp_methods */
+ 0, /* tp_members */
+ array_getsetlist, /* tp_getset */
+ 0, /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ (initproc)0, /* tp_init */
+ array_alloc, /* tp_alloc */
+ (newfunc)array_new, /* tp_new */
+ 0, /* tp_free */
+ 0, /* tp_is_gc */
+ 0, /* tp_bases */
+ 0, /* tp_mro */
+ 0, /* tp_cache */
+ 0, /* tp_subclasses */
+ 0, /* tp_weaklist */
+ 0, /* tp_del */
};
diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src
index 7080fbe7a..420fcea7d 100644
--- a/numpy/core/src/multiarray/arraytypes.c.src
+++ b/numpy/core/src/multiarray/arraytypes.c.src
@@ -2147,7 +2147,13 @@ VOID_nonzero (char *ip, PyArrayObject *ap)
#undef __ALIGNED
-/****************** compare **********************************/
+/*
+ *****************************************************************************
+ ** COMPARISON FUNCTIONS **
+ *****************************************************************************
+ */
+
+/* boolean type */
static int
BOOL_compare(Bool *ip1, Bool *ip2, PyArrayObject *NPY_UNUSED(ap))
@@ -2155,48 +2161,144 @@ BOOL_compare(Bool *ip1, Bool *ip2, PyArrayObject *NPY_UNUSED(ap))
return (*ip1 ? (*ip2 ? 0 : 1) : (*ip2 ? -1 : 0));
}
+
+/* integer types */
+
/**begin repeat
-#fname=BYTE,UBYTE,SHORT,USHORT,INT,UINT,LONG,ULONG,LONGLONG,ULONGLONG,FLOAT,DOUBLE,LONGDOUBLE,DATETIME,TIMEDELTA#
-#type=byte, ubyte, short, ushort, int, uint, long, ulong, longlong, ulonglong, float, double, longdouble, datetime, timedelta#
-*/
+ * #TYPE = BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG,
+ * LONGLONG, ULONGLONG, DATETIME, TIMEDELTA#
+ * #type = byte, ubyte, short, ushort, int, uint, long, ulong,
+ * longlong, ulonglong, datetime, timedelta#
+ */
static int
-@fname@_compare (@type@ *ip1, @type@ *ip2, PyArrayObject *NPY_UNUSED(ap))
+@TYPE@_compare (@type@ *pa, @type@ *pb, PyArrayObject *NPY_UNUSED(ap))
{
- return *ip1 < *ip2 ? -1 : *ip1 == *ip2 ? 0 : 1;
+ const @type@ a = *pa;
+ const @type@ b = *pb;
+
+ return a < b ? -1 : a == b ? 0 : 1;
}
/**end repeat**/
-/* compare imaginary part first, then complex if equal imaginary */
+
+/* float types */
+
+/*
+ * The real/complex comparison functions are compatible with the new sort
+ * order for nans introduced in numpy 1.4.0. All nan values now compare
+ * larger than non-nan values and are sorted to the end. The comparison
+ * order is:
+ *
+ * Real: [R, nan]
+ * Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj]
+ *
+ * where complex values with the same nan placements are sorted according
+ * to the non-nan part if it exists. If both the real and imaginary parts
+ * of complex types are non-nan the order is the same as the real parts
+ * unless they happen to be equal, in which case the order is that of the
+ * imaginary parts.
+ */
+
/**begin repeat
-#fname=CFLOAT, CDOUBLE, CLONGDOUBLE#
-#type= float, double, longdouble#
-*/
+ * #TYPE = FLOAT, DOUBLE, LONGDOUBLE#
+ * #type = float, double, longdouble#
+ */
+
+#define LT(a,b) ((a) < (b) || ((b) != (b) && (a) ==(a)))
static int
-@fname@_compare (@type@ *ip1, @type@ *ip2, PyArrayObject *NPY_UNUSED(ap))
+@TYPE@_compare(@type@ *pa, @type@ *pb)
{
- if (*ip1 == *ip2) {
- return ip1[1]<ip2[1] ? -1 : (ip1[1] == ip2[1] ? 0 : 1);
+ const @type@ a = *pa;
+ const @type@ b = *pb;
+ int ret;
+
+ if (LT(a,b)) {
+ ret = -1;
+ }
+ else if (LT(b,a)) {
+ ret = 1;
+ }
+ else {
+ ret = 0;
+ }
+ return ret;
+}
+
+
+static int
+C@TYPE@_compare(@type@ *pa, @type@ *pb)
+{
+ const @type@ ar = pa[0];
+ const @type@ ai = pa[1];
+ const @type@ br = pb[0];
+ const @type@ bi = pb[1];
+ int ret;
+
+ if (ar < br) {
+ if (ai == ai || bi != bi) {
+ ret = -1;
+ }
+ else {
+ ret = 1;
+ }
+ }
+ else if (br < ar) {
+ if (bi == bi || ai != ai) {
+ ret = 1;
+ }
+ else {
+ ret = -1;
+ }
+ }
+ else if (ar == br || (ar != ar && br != br)) {
+ if (LT(ai,bi)) {
+ ret = -1;
+ }
+ else if (LT(bi,ai)) {
+ ret = 1;
+ }
+ else {
+ ret = 0;
+ }
+ }
+ else if (ar == ar) {
+ ret = -1;
}
else {
- return *ip1 < *ip2 ? -1 : 1;
+ ret = 1;
}
+
+ return ret;
}
- /**end repeat**/
+
+#undef LT
+
+/**end repeat**/
+
+
+/* object type */
static int
OBJECT_compare(PyObject **ip1, PyObject **ip2, PyArrayObject *NPY_UNUSED(ap))
{
if ((*ip1 == NULL) || (*ip2 == NULL)) {
- if (ip1 == ip2) return 1;
- if (ip1 == NULL) return -1;
+ if (ip1 == ip2) {
+ return 1;
+ }
+ if (ip1 == NULL) {
+ return -1;
+ }
return 1;
}
return PyObject_Compare(*ip1, *ip2);
}
+
+/* string type */
+
static int
STRING_compare(char *ip1, char *ip2, PyArrayObject *ap)
{
@@ -2213,33 +2315,38 @@ STRING_compare(char *ip1, char *ip2, PyArrayObject *ap)
return 0;
}
-/* taken from Python */
+
+/* unicode type */
+
static int
UNICODE_compare(PyArray_UCS4 *ip1, PyArray_UCS4 *ip2,
PyArrayObject *ap)
{
- int itemsize=ap->descr->elsize;
- PyArray_UCS4 c1, c2;
-
- if (itemsize < 0) return 0;
+ int itemsize = ap->descr->elsize;
+ if (itemsize < 0) {
+ return 0;
+ }
while(itemsize-- > 0) {
- c1 = *ip1++;
- c2 = *ip2++;
-
- if (c1 != c2)
+ PyArray_UCS4 c1 = *ip1++;
+ PyArray_UCS4 c2 = *ip2++;
+ if (c1 != c2) {
return (c1 < c2) ? -1 : 1;
+ }
}
return 0;
}
-/* If fields are defined, then compare on first field and if equal
- compare on second field. Continue until done or comparison results
- in not_equal.
- Must align data passed on to sub-comparisons.
-*/
+/* void type */
+/*
+ * If fields are defined, then compare on first field and if equal
+ * compare on second field. Continue until done or comparison results
+ * in not_equal.
+ *
+ * Must align data passed on to sub-comparisons.
+ */
static int
VOID_compare(char *ip1, char *ip2, PyArrayObject *ap)
{
@@ -2247,17 +2354,18 @@ VOID_compare(char *ip1, char *ip2, PyArrayObject *ap)
PyObject *names, *key;
PyObject *tup, *title;
char *nip1, *nip2;
- int i, offset, res=0;
+ int i, offset, res = 0;
- if (!PyArray_HASFIELDS(ap))
+ if (!PyArray_HASFIELDS(ap)) {
return STRING_compare(ip1, ip2, ap);
-
+ }
descr = ap->descr;
- /* Compare on the first-field. If equal, then
- compare on the second-field, etc.
+ /*
+ * Compare on the first-field. If equal, then
+ * compare on the second-field, etc.
*/
names = descr->names;
- for (i=0; i<PyTuple_GET_SIZE(names); i++) {
+ for (i = 0; i < PyTuple_GET_SIZE(names); i++) {
key = PyTuple_GET_ITEM(names, i);
tup = PyDict_GetItem(descr->fields, key);
if (!PyArg_ParseTuple(tup, "Oi|O", &new, &offset,
@@ -2271,15 +2379,18 @@ VOID_compare(char *ip1, char *ip2, PyArrayObject *ap)
if (((intp)(nip1) % new->alignment) != 0) {
/* create buffer and copy */
nip1 = _pya_malloc(new->elsize);
- if (nip1 == NULL) goto finish;
+ if (nip1 == NULL) {
+ goto finish;
+ }
memcpy(nip1, ip1+offset, new->elsize);
}
if (((intp)(nip2) % new->alignment) != 0) {
/* copy data to a buffer */
nip2 = _pya_malloc(new->elsize);
if (nip2 == NULL) {
- if (nip1 != ip1+offset)
+ if (nip1 != ip1+offset) {
_pya_free(nip1);
+ }
goto finish;
}
memcpy(nip2, ip2+offset, new->elsize);
@@ -2294,7 +2405,9 @@ VOID_compare(char *ip1, char *ip2, PyArrayObject *ap)
_pya_free(nip2);
}
}
- if (res != 0) break;
+ if (res != 0) {
+ break;
+ }
}
finish:
diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c
index 92723dbf1..e4873913a 100644
--- a/numpy/core/src/multiarray/ctors.c
+++ b/numpy/core/src/multiarray/ctors.c
@@ -2978,16 +2978,16 @@ PyArray_FromFile(FILE *fp, PyArray_Descr *dtype, intp num, char *sep)
{
PyArrayObject *ret;
size_t nread = 0;
- char *tmp;
if (PyDataType_REFCHK(dtype)) {
PyErr_SetString(PyExc_ValueError,
- "cannot read into object array");
+ "Cannot read into object array");
Py_DECREF(dtype);
return NULL;
}
if (dtype->elsize == 0) {
- PyErr_SetString(PyExc_ValueError, "0-sized elements.");
+ PyErr_SetString(PyExc_ValueError,
+ "The elements are 0-sized.");
Py_DECREF(dtype);
return NULL;
}
@@ -2997,28 +2997,20 @@ PyArray_FromFile(FILE *fp, PyArray_Descr *dtype, intp num, char *sep)
else {
if (dtype->f->scanfunc == NULL) {
PyErr_SetString(PyExc_ValueError,
- "don't know how to read " \
- "character files with that " \
- "array type");
+ "Unable to read character files of that array type");
Py_DECREF(dtype);
return NULL;
}
- ret = array_from_text(dtype, num, sep, &nread,
- fp,
- (next_element) fromfile_next_element,
- (skip_separator) fromfile_skip_separator,
- NULL);
+ ret = array_from_text(dtype, num, sep, &nread, fp,
+ (next_element) fromfile_next_element,
+ (skip_separator) fromfile_skip_separator, NULL);
}
if (((intp) nread) < num) {
- fprintf(stderr, "%ld items requested but only %ld read\n",
- (long) num, (long) nread);
- /* Make sure realloc is > 0 */
- tmp = PyDataMem_RENEW(ret->data,
- NPY_MAX(nread,1) * ret->descr->elsize);
- /* FIXME: This should not raise a memory error when nread == 0
- We should return an empty array or at least raise an EOF Error.
- */
- if ((tmp == NULL) || (nread == 0)) {
+ /* Realloc memory for smaller number of elements */
+ const size_t nsize = NPY_MAX(nread,1)*ret->descr->elsize;
+ char *tmp;
+
+ if((tmp = PyDataMem_RENEW(ret->data, nsize)) == NULL) {
Py_DECREF(ret);
return PyErr_NoMemory();
}
diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c
index 909af2243..edcab8b09 100644
--- a/numpy/core/src/multiarray/descriptor.c
+++ b/numpy/core/src/multiarray/descriptor.c
@@ -2626,62 +2626,62 @@ static PyMappingMethods descr_as_mapping = {
};
/****************** End of Mapping Protocol ******************************/
+
NPY_NO_EXPORT PyTypeObject PyArrayDescr_Type = {
+#if defined(NPY_PY3K)
+ PyVarObject_HEAD_INIT(NULL, 0)
+#else
PyObject_HEAD_INIT(NULL)
- 0, /* ob_size */
- "numpy.dtype", /* tp_name */
- sizeof(PyArray_Descr), /* tp_basicsize */
- 0, /* tp_itemsize */
+ 0, /* ob_size */
+#endif
+ "numpy.dtype", /* tp_name */
+ sizeof(PyArray_Descr), /* tp_basicsize */
+ 0, /* tp_itemsize */
/* methods */
- (destructor)arraydescr_dealloc, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
- 0, /* tp_compare */
- (reprfunc)arraydescr_repr, /* tp_repr */
- 0, /* tp_as_number */
- &descr_as_sequence, /* tp_as_sequence */
- &descr_as_mapping, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- (reprfunc)arraydescr_str, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
- Py_TPFLAGS_DEFAULT, /* tp_flags */
- 0, /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- (richcmpfunc)arraydescr_richcompare, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- arraydescr_methods, /* tp_methods */
- arraydescr_members, /* tp_members */
- arraydescr_getsets, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- 0, /* tp_init */
- 0, /* tp_alloc */
- arraydescr_new, /* tp_new */
- 0, /* tp_free */
- 0, /* tp_is_gc */
- 0, /* tp_bases */
- 0, /* tp_mro */
- 0, /* tp_cache */
- 0, /* tp_subclasses */
- 0, /* tp_weaklist */
- 0, /* tp_del */
-
-#ifdef COUNT_ALLOCS
- /* these must be last and never explicitly initialized */
- 0, /* tp_allocs */
- 0, /* tp_frees */
- 0, /* tp_maxalloc */
- 0, /* tp_prev */
- 0, /* *tp_next */
+ (destructor)arraydescr_dealloc, /* tp_dealloc */
+ 0, /* tp_print */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+#if defined(NPY_PY3K)
+ (void *)0, /* tp_reserved */
+#else
+ 0, /* tp_compare */
#endif
+ (reprfunc)arraydescr_repr, /* tp_repr */
+ 0, /* tp_as_number */
+ &descr_as_sequence, /* tp_as_sequence */
+ &descr_as_mapping, /* tp_as_mapping */
+ 0, /* tp_hash */
+ 0, /* tp_call */
+ (reprfunc)arraydescr_str, /* tp_str */
+ 0, /* tp_getattro */
+ 0, /* tp_setattro */
+ 0, /* tp_as_buffer */
+ Py_TPFLAGS_DEFAULT, /* tp_flags */
+ 0, /* tp_doc */
+ 0, /* tp_traverse */
+ 0, /* tp_clear */
+ (richcmpfunc)arraydescr_richcompare, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ 0, /* tp_iter */
+ 0, /* tp_iternext */
+ arraydescr_methods, /* tp_methods */
+ arraydescr_members, /* tp_members */
+ arraydescr_getsets, /* tp_getset */
+ 0, /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ 0, /* tp_init */
+ 0, /* tp_alloc */
+ arraydescr_new, /* tp_new */
+ 0, /* tp_free */
+ 0, /* tp_is_gc */
+ 0, /* tp_bases */
+ 0, /* tp_mro */
+ 0, /* tp_cache */
+ 0, /* tp_subclasses */
+ 0, /* tp_weaklist */
+ 0, /* tp_del */
};
diff --git a/numpy/core/src/multiarray/flagsobject.c b/numpy/core/src/multiarray/flagsobject.c
index 869ed613e..38121f910 100644
--- a/numpy/core/src/multiarray/flagsobject.c
+++ b/numpy/core/src/multiarray/flagsobject.c
@@ -553,61 +553,60 @@ arrayflags_new(PyTypeObject *NPY_UNUSED(self), PyObject *args, PyObject *NPY_UNU
}
NPY_NO_EXPORT PyTypeObject PyArrayFlags_Type = {
+#if defined(NPY_PY3K)
+ PyVarObject_HEAD_INIT(NULL, 0)
+#else
PyObject_HEAD_INIT(NULL)
- 0,
+ 0, /* ob_size */
+#endif
"numpy.flagsobj",
sizeof(PyArrayFlagsObject),
- 0, /* tp_itemsize */
+ 0, /* tp_itemsize */
/* methods */
- (destructor)arrayflags_dealloc, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
- (cmpfunc)arrayflags_compare, /* tp_compare */
- (reprfunc)arrayflags_print, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- &arrayflags_as_mapping, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- (reprfunc)arrayflags_print, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
- Py_TPFLAGS_DEFAULT, /* tp_flags */
- 0, /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- 0, /* tp_methods */
- 0, /* tp_members */
- arrayflags_getsets, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- 0, /* tp_init */
- 0, /* tp_alloc */
- arrayflags_new, /* tp_new */
- 0, /* tp_free */
- 0, /* tp_is_gc */
- 0, /* tp_bases */
- 0, /* tp_mro */
- 0, /* tp_cache */
- 0, /* tp_subclasses */
- 0, /* tp_weaklist */
- 0, /* tp_del */
-
-#ifdef COUNT_ALLOCS
- /* these must be last and never explicitly initialized */
- 0, /* tp_allocs */
- 0, /* tp_frees */
- 0, /* tp_maxalloc */
- 0, /* tp_prev */
- 0, /* *tp_next */
+ (destructor)arrayflags_dealloc, /* tp_dealloc */
+ 0, /* tp_print */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+#if defined(NPY_PY3K)
+ 0, /* tp_reserved */
+#else
+ (cmpfunc)arrayflags_compare, /* tp_compare */
#endif
+ (reprfunc)arrayflags_print, /* tp_repr */
+ 0, /* tp_as_number */
+ 0, /* tp_as_sequence */
+ &arrayflags_as_mapping, /* tp_as_mapping */
+ 0, /* tp_hash */
+ 0, /* tp_call */
+ (reprfunc)arrayflags_print, /* tp_str */
+ 0, /* tp_getattro */
+ 0, /* tp_setattro */
+ 0, /* tp_as_buffer */
+ Py_TPFLAGS_DEFAULT, /* tp_flags */
+ 0, /* tp_doc */
+ 0, /* tp_traverse */
+ 0, /* tp_clear */
+ 0, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ 0, /* tp_iter */
+ 0, /* tp_iternext */
+ 0, /* tp_methods */
+ 0, /* tp_members */
+ arrayflags_getsets, /* tp_getset */
+ 0, /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ 0, /* tp_init */
+ 0, /* tp_alloc */
+ arrayflags_new, /* tp_new */
+ 0, /* tp_free */
+ 0, /* tp_is_gc */
+ 0, /* tp_bases */
+ 0, /* tp_mro */
+ 0, /* tp_cache */
+ 0, /* tp_subclasses */
+ 0, /* tp_weaklist */
+ 0, /* tp_del */
};
diff --git a/numpy/core/src/multiarray/global.c b/numpy/core/src/multiarray/global.c
deleted file mode 100644
index 22306da23..000000000
--- a/numpy/core/src/multiarray/global.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#include "config.h"
-
-NPY_NO_EXPORT int NPY_NUMUSERTYPES = 0;
diff --git a/numpy/core/src/multiarray/iterators.c b/numpy/core/src/multiarray/iterators.c
index 6f25d9432..d6eac84ef 100644
--- a/numpy/core/src/multiarray/iterators.c
+++ b/numpy/core/src/multiarray/iterators.c
@@ -266,27 +266,33 @@ slice_GetIndices(PySliceObject *r, intp length,
/* Aided by Peter J. Verveer's nd_image package and numpy's arraymap ****/
/* and Python's array iterator ***/
-/*NUMPY_API
- * Get Iterator.
- */
-NPY_NO_EXPORT PyObject *
-PyArray_IterNew(PyObject *obj)
+/* get the dataptr from its current coordinates for simple iterator */
+static char*
+get_ptr_simple(PyArrayIterObject* iter, npy_intp *coordinates)
{
- PyArrayIterObject *it;
- int i, nd;
- PyArrayObject *ao = (PyArrayObject *)obj;
+ npy_intp i;
+ char *ret;
- if (!PyArray_Check(ao)) {
- PyErr_BadInternalCall();
- return NULL;
- }
+ ret = iter->ao->data;
- it = (PyArrayIterObject *)_pya_malloc(sizeof(PyArrayIterObject));
- PyObject_Init((PyObject *)it, &PyArrayIter_Type);
- /* it = PyObject_New(PyArrayIterObject, &PyArrayIter_Type);*/
- if (it == NULL) {
- return NULL;
+ for(i = 0; i < iter->ao->nd; ++i) {
+ ret += coordinates[i] * iter->strides[i];
}
+
+ return ret;
+}
+
+/*
+ * This is common initialization code between PyArrayIterObject and
+ * PyArrayNeighborhoodIterObject
+ *
+ * Increase ao refcount
+ */
+static PyObject *
+array_iter_base_init(PyArrayIterObject *it, PyArrayObject *ao)
+{
+ int nd, i;
+
nd = ao->nd;
PyArray_UpdateFlags(ao, CONTIGUOUS);
if (PyArray_ISCONTIGUOUS(ao)) {
@@ -307,12 +313,50 @@ PyArray_IterNew(PyObject *obj)
if (i > 0) {
it->factors[nd-i-1] = it->factors[nd-i] * ao->dimensions[nd-i];
}
+ it->bounds[i][0] = 0;
+ it->bounds[i][1] = ao->dimensions[i] - 1;
+ it->limits[i][0] = 0;
+ it->limits[i][1] = ao->dimensions[i] - 1;
+ it->limits_sizes[i] = it->limits[i][1] - it->limits[i][0] + 1;
}
+
+ it->translate = &get_ptr_simple;
PyArray_ITER_RESET(it);
return (PyObject *)it;
}
+static void
+array_iter_base_dealloc(PyArrayIterObject *it)
+{
+ Py_XDECREF(it->ao);
+}
+
+/*NUMPY_API
+ * Get Iterator.
+ */
+NPY_NO_EXPORT PyObject *
+PyArray_IterNew(PyObject *obj)
+{
+ PyArrayIterObject *it;
+ PyArrayObject *ao = (PyArrayObject *)obj;
+
+ if (!PyArray_Check(ao)) {
+ PyErr_BadInternalCall();
+ return NULL;
+ }
+
+ it = (PyArrayIterObject *)_pya_malloc(sizeof(PyArrayIterObject));
+ PyObject_Init((PyObject *)it, &PyArrayIter_Type);
+ /* it = PyObject_New(PyArrayIterObject, &PyArrayIter_Type);*/
+ if (it == NULL) {
+ return NULL;
+ }
+
+ array_iter_base_init(it, ao);
+ return (PyObject *)it;
+}
+
/*NUMPY_API
* Get Iterator broadcast to a particular shape
*/
@@ -502,7 +546,7 @@ arrayiter_next(PyArrayIterObject *it)
static void
arrayiter_dealloc(PyArrayIterObject *it)
{
- Py_XDECREF(it->ao);
+ array_iter_base_dealloc(it);
_pya_free(it);
}
@@ -1174,8 +1218,12 @@ iter_coords_get(PyArrayIterObject *self)
int i;
val = self->index;
for (i = 0; i < nd; i++) {
- self->coordinates[i] = val / self->factors[i];
- val = val % self->factors[i];
+ if (self->factors[i] != 0) {
+ self->coordinates[i] = val / self->factors[i];
+ val = val % self->factors[i];
+ } else {
+ self->coordinates[i] = 0;
+ }
}
}
return PyArray_IntTupleFromIntp(nd, self->coordinates);
@@ -1189,63 +1237,62 @@ static PyGetSetDef iter_getsets[] = {
};
NPY_NO_EXPORT PyTypeObject PyArrayIter_Type = {
+#if defined(NPY_PY3K)
+ PyVarObject_HEAD_INIT(NULL, 0)
+#else
PyObject_HEAD_INIT(NULL)
- 0, /* ob_size */
- "numpy.flatiter", /* tp_name */
- sizeof(PyArrayIterObject), /* tp_basicsize */
- 0, /* tp_itemsize */
+ 0, /* ob_size */
+#endif
+ "numpy.flatiter", /* tp_name */
+ sizeof(PyArrayIterObject), /* tp_basicsize */
+ 0, /* tp_itemsize */
/* methods */
- (destructor)arrayiter_dealloc, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
- 0, /* tp_compare */
- 0, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- &iter_as_mapping, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
- Py_TPFLAGS_DEFAULT, /* tp_flags */
- 0, /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- (richcmpfunc)iter_richcompare, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- (iternextfunc)arrayiter_next, /* tp_iternext */
- iter_methods, /* tp_methods */
- iter_members, /* tp_members */
- iter_getsets, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- 0, /* tp_init */
- 0, /* tp_alloc */
- 0, /* tp_new */
- 0, /* tp_free */
- 0, /* tp_is_gc */
- 0, /* tp_bases */
- 0, /* tp_mro */
- 0, /* tp_cache */
- 0, /* tp_subclasses */
- 0, /* tp_weaklist */
- 0, /* tp_del */
-#ifdef COUNT_ALLOCS
- /* these must be last and never explicitly initialized */
- 0, /* tp_allocs */
- 0, /* tp_frees */
- 0, /* tp_maxalloc */
- 0, /* tp_prev */
- 0, /* *tp_next */
+ (destructor)arrayiter_dealloc, /* tp_dealloc */
+ 0, /* tp_print */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+#if defined(NPY_PY3K)
+ 0, /* tp_reserved */
+#else
+ 0, /* tp_compare */
#endif
-
+ 0, /* tp_repr */
+ 0, /* tp_as_number */
+ 0, /* tp_as_sequence */
+ &iter_as_mapping, /* tp_as_mapping */
+ 0, /* tp_hash */
+ 0, /* tp_call */
+ 0, /* tp_str */
+ 0, /* tp_getattro */
+ 0, /* tp_setattro */
+ 0, /* tp_as_buffer */
+ Py_TPFLAGS_DEFAULT, /* tp_flags */
+ 0, /* tp_doc */
+ 0, /* tp_traverse */
+ 0, /* tp_clear */
+ (richcmpfunc)iter_richcompare, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ 0, /* tp_iter */
+ (iternextfunc)arrayiter_next, /* tp_iternext */
+ iter_methods, /* tp_methods */
+ iter_members, /* tp_members */
+ iter_getsets, /* tp_getset */
+ 0, /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ 0, /* tp_init */
+ 0, /* tp_alloc */
+ 0, /* tp_new */
+ 0, /* tp_free */
+ 0, /* tp_is_gc */
+ 0, /* tp_bases */
+ 0, /* tp_mro */
+ 0, /* tp_cache */
+ 0, /* tp_subclasses */
+ 0, /* tp_weaklist */
+ 0, /* tp_del */
};
/** END of Array Iterator **/
@@ -1652,61 +1699,387 @@ static PyMethodDef arraymultiter_methods[] = {
};
NPY_NO_EXPORT PyTypeObject PyArrayMultiIter_Type = {
+#if defined(NPY_PY3K)
+ PyVarObject_HEAD_INIT(NULL, 0)
+#else
PyObject_HEAD_INIT(NULL)
- 0, /* ob_size */
- "numpy.broadcast", /* tp_name */
- sizeof(PyArrayMultiIterObject), /* tp_basicsize */
- 0, /* tp_itemsize */
+ 0, /* ob_size */
+#endif
+ "numpy.broadcast", /* tp_name */
+ sizeof(PyArrayMultiIterObject), /* tp_basicsize */
+ 0, /* tp_itemsize */
/* methods */
- (destructor)arraymultiter_dealloc, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
- 0, /* tp_compare */
- 0, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- 0, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
- Py_TPFLAGS_DEFAULT, /* tp_flags */
- 0, /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- (iternextfunc)arraymultiter_next, /* tp_iternext */
- arraymultiter_methods, /* tp_methods */
- arraymultiter_members, /* tp_members */
- arraymultiter_getsetlist, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- (initproc)0, /* tp_init */
- 0, /* tp_alloc */
- arraymultiter_new, /* tp_new */
- 0, /* tp_free */
- 0, /* tp_is_gc */
- 0, /* tp_bases */
- 0, /* tp_mro */
- 0, /* tp_cache */
- 0, /* tp_subclasses */
- 0, /* tp_weaklist */
- 0, /* tp_del */
-
-#ifdef COUNT_ALLOCS
- /* these must be last and never explicitly initialized */
- 0, /* tp_allocs */
- 0, /* tp_frees */
- 0, /* tp_maxalloc */
- 0, /* tp_prev */
- 0, /* *tp_next */
+ (destructor)arraymultiter_dealloc, /* tp_dealloc */
+ 0, /* tp_print */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+#if defined(NPY_PY3K)
+ 0, /* tp_reserved */
+#else
+ 0, /* tp_compare */
+#endif
+ 0, /* tp_repr */
+ 0, /* tp_as_number */
+ 0, /* tp_as_sequence */
+ 0, /* tp_as_mapping */
+ 0, /* tp_hash */
+ 0, /* tp_call */
+ 0, /* tp_str */
+ 0, /* tp_getattro */
+ 0, /* tp_setattro */
+ 0, /* tp_as_buffer */
+ Py_TPFLAGS_DEFAULT, /* tp_flags */
+ 0, /* tp_doc */
+ 0, /* tp_traverse */
+ 0, /* tp_clear */
+ 0, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ 0, /* tp_iter */
+ (iternextfunc)arraymultiter_next, /* tp_iternext */
+ arraymultiter_methods, /* tp_methods */
+ arraymultiter_members, /* tp_members */
+ arraymultiter_getsetlist, /* tp_getset */
+ 0, /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ (initproc)0, /* tp_init */
+ 0, /* tp_alloc */
+ arraymultiter_new, /* tp_new */
+ 0, /* tp_free */
+ 0, /* tp_is_gc */
+ 0, /* tp_bases */
+ 0, /* tp_mro */
+ 0, /* tp_cache */
+ 0, /* tp_subclasses */
+ 0, /* tp_weaklist */
+ 0, /* tp_del */
+};
+
+/*========================= Neighborhood iterator ======================*/
+
+static void neighiter_dealloc(PyArrayNeighborhoodIterObject* iter);
+
+static char* _set_constant(PyArrayNeighborhoodIterObject* iter,
+ PyArrayObject *fill)
+{
+ char *ret;
+ PyArrayIterObject *ar = iter->_internal_iter;
+ int storeflags, st;
+
+ ret = PyDataMem_NEW(ar->ao->descr->elsize);
+ if (ret == NULL) {
+ PyErr_SetNone(PyExc_MemoryError);
+ return NULL;
+ }
+
+ if (PyArray_ISOBJECT(ar->ao)) {
+ memcpy(ret, fill->data, sizeof(PyObject*));
+ Py_INCREF(*(PyObject**)ret);
+ } else {
+ /* Non-object types */
+
+ storeflags = ar->ao->flags;
+ ar->ao->flags |= BEHAVED;
+ st = ar->ao->descr->f->setitem((PyObject*)fill, ret, ar->ao);
+ ar->ao->flags = storeflags;
+
+ if (st < 0) {
+ PyDataMem_FREE(ret);
+ return NULL;
+ }
+ }
+
+ return ret;
+}
+
+#define _INF_SET_PTR(c) \
+ bd = coordinates[c] + p->coordinates[c]; \
+ if (bd < p->limits[c][0] || bd > p->limits[c][1]) { \
+ return niter->constant; \
+ } \
+ _coordinates[c] = bd;
+
+/* set the dataptr from its current coordinates */
+static char*
+get_ptr_constant(PyArrayIterObject* _iter, npy_intp *coordinates)
+{
+ int i;
+ npy_intp bd, _coordinates[NPY_MAXDIMS];
+ PyArrayNeighborhoodIterObject *niter = (PyArrayNeighborhoodIterObject*)_iter;
+ PyArrayIterObject *p = niter->_internal_iter;
+
+ for(i = 0; i < niter->nd; ++i) {
+ _INF_SET_PTR(i)
+ }
+
+ return p->translate(p, _coordinates);
+}
+#undef _INF_SET_PTR
+
+#define _NPY_IS_EVEN(x) ((x) % 2 == 0)
+
+/* For an array x of dimension n, and given index i, returns j, 0 <= j < n
+ * such as x[i] = x[j], with x assumed to be mirrored. For example, for x =
+ * {1, 2, 3} (n = 3)
+ *
+ * index -5 -4 -3 -2 -1 0 1 2 3 4 5 6
+ * value 2 3 3 2 1 1 2 3 3 2 1 1
+ *
+ * _npy_pos_index_mirror(4, 3) will return 1, because x[4] = x[1]*/
+static inline npy_intp
+__npy_pos_remainder(npy_intp i, npy_intp n)
+{
+ npy_intp k, l, j;
+
+ /* Mirror i such as it is guaranteed to be positive */
+ if (i < 0) {
+ i = - i - 1;
+ }
+
+ /* compute k and l such as i = k * n + l, 0 <= l < k */
+ k = i / n;
+ l = i - k * n;
+
+ if (_NPY_IS_EVEN(k)) {
+ j = l;
+ } else {
+ j = n - 1 - l;
+ }
+ return j;
+}
+#undef _NPY_IS_EVEN
+
+#define _INF_SET_PTR_MIRROR(c) \
+ lb = p->limits[c][0]; \
+ bd = coordinates[c] + p->coordinates[c] - lb; \
+ _coordinates[c] = lb + __npy_pos_remainder(bd, p->limits_sizes[c]);
+
+/* set the dataptr from its current coordinates */
+static char*
+get_ptr_mirror(PyArrayIterObject* _iter, npy_intp *coordinates)
+{
+ int i;
+ npy_intp bd, _coordinates[NPY_MAXDIMS], lb;
+ PyArrayNeighborhoodIterObject *niter = (PyArrayNeighborhoodIterObject*)_iter;
+ PyArrayIterObject *p = niter->_internal_iter;
+
+ for(i = 0; i < niter->nd; ++i) {
+ _INF_SET_PTR_MIRROR(i)
+ }
+
+ return p->translate(p, _coordinates);
+}
+#undef _INF_SET_PTR_MIRROR
+
+/* compute l such as i = k * n + l, 0 <= l < |k| */
+static inline npy_intp
+__npy_euclidean_division(npy_intp i, npy_intp n)
+{
+ npy_intp l;
+
+ l = i % n;
+ if (l < 0) {
+ l += n;
+ }
+ return l;
+}
+
+#define _INF_SET_PTR_CIRCULAR(c) \
+ lb = p->limits[c][0]; \
+ bd = coordinates[c] + p->coordinates[c] - lb; \
+ _coordinates[c] = lb + __npy_euclidean_division(bd, p->limits_sizes[c]);
+
+static char*
+get_ptr_circular(PyArrayIterObject* _iter, npy_intp *coordinates)
+{
+ int i;
+ npy_intp bd, _coordinates[NPY_MAXDIMS], lb;
+ PyArrayNeighborhoodIterObject *niter = (PyArrayNeighborhoodIterObject*)_iter;
+ PyArrayIterObject *p = niter->_internal_iter;
+
+ for(i = 0; i < niter->nd; ++i) {
+ _INF_SET_PTR_CIRCULAR(i)
+ }
+ return p->translate(p, _coordinates);
+}
+
+#undef _INF_SET_PTR_CIRCULAR
+
+/*
+ * fill and x->ao should have equivalent types
+ */
+/*NUMPY_API*/
+NPY_NO_EXPORT PyObject*
+PyArray_NeighborhoodIterNew(PyArrayIterObject *x, intp *bounds,
+ int mode, PyArrayObject* fill)
+{
+ int i;
+ PyArrayNeighborhoodIterObject *ret;
+
+ ret = _pya_malloc(sizeof(*ret));
+ if (ret == NULL) {
+ return NULL;
+ }
+ PyObject_Init((PyObject *)ret, &PyArrayNeighborhoodIter_Type);
+
+ array_iter_base_init((PyArrayIterObject*)ret, x->ao);
+ Py_INCREF(x);
+ ret->_internal_iter = x;
+
+ ret->nd = x->ao->nd;
+
+ for (i = 0; i < ret->nd; ++i) {
+ ret->dimensions[i] = x->ao->dimensions[i];
+ }
+
+ /* Compute the neighborhood size and copy the shape */
+ ret->size = 1;
+ for (i = 0; i < ret->nd; ++i) {
+ ret->bounds[i][0] = bounds[2 * i];
+ ret->bounds[i][1] = bounds[2 * i + 1];
+ ret->size *= (ret->bounds[i][1] - ret->bounds[i][0]) + 1;
+
+ /* limits keep track of valid ranges for the neighborhood: if a bound
+ * of the neighborhood is outside the array, then limits is the same as
+ * boundaries. On the contrary, if a bound is strictly inside the
+ * array, then limits correspond to the array range. For example, for
+ * an array [1, 2, 3], if bounds are [-1, 3], limits will be [-1, 3],
+ * but if bounds are [1, 2], then limits will be [0, 2].
+ *
+ * This is used by neighborhood iterators stacked on top of this one */
+ ret->limits[i][0] = ret->bounds[i][0] < 0 ? ret->bounds[i][0] : 0;
+ ret->limits[i][1] = ret->bounds[i][1] >= ret->dimensions[i] - 1 ?
+ ret->bounds[i][1] :
+ ret->dimensions[i] - 1;
+ ret->limits_sizes[i] = (ret->limits[i][1] - ret->limits[i][0]) + 1;
+ }
+
+ switch (mode) {
+ case NPY_NEIGHBORHOOD_ITER_ZERO_PADDING:
+ ret->constant = PyArray_Zero(x->ao);
+ ret->mode = mode;
+ ret->translate = &get_ptr_constant;
+ break;
+ case NPY_NEIGHBORHOOD_ITER_ONE_PADDING:
+ ret->constant = PyArray_One(x->ao);
+ ret->mode = mode;
+ ret->translate = &get_ptr_constant;
+ break;
+ case NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING:
+ /* New reference in returned value of _set_constant if array
+ * object */
+ assert(PyArray_EquivArrTypes(x->ao, fill) == NPY_TRUE);
+ ret->constant = _set_constant(ret, fill);
+ if (ret->constant == NULL) {
+ goto clean_x;
+ }
+ ret->mode = mode;
+ ret->translate = &get_ptr_constant;
+ break;
+ case NPY_NEIGHBORHOOD_ITER_MIRROR_PADDING:
+ ret->mode = mode;
+ ret->constant = NULL;
+ ret->translate = &get_ptr_mirror;
+ break;
+ case NPY_NEIGHBORHOOD_ITER_CIRCULAR_PADDING:
+ ret->mode = mode;
+ ret->constant = NULL;
+ ret->translate = &get_ptr_circular;
+ break;
+ default:
+ PyErr_SetString(PyExc_ValueError, "Unsupported padding mode");
+ goto clean_x;
+ }
+
+ /*
+ * XXX: we force x iterator to be non contiguous because we need
+ * coordinates... Modifying the iterator here is not great
+ */
+ x->contiguous = 0;
+
+ PyArrayNeighborhoodIter_Reset(ret);
+
+ return (PyObject*)ret;
+
+clean_x:
+ Py_DECREF(ret->_internal_iter);
+ array_iter_base_dealloc((PyArrayIterObject*)ret);
+ _pya_free((PyArrayObject*)ret);
+ return NULL;
+}
+
+static void neighiter_dealloc(PyArrayNeighborhoodIterObject* iter)
+{
+ if (iter->mode == NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING) {
+ if (PyArray_ISOBJECT(iter->_internal_iter->ao)) {
+ Py_DECREF(*(PyObject**)iter->constant);
+ }
+ }
+ if (iter->constant != NULL) {
+ PyDataMem_FREE(iter->constant);
+ }
+ Py_DECREF(iter->_internal_iter);
+
+ array_iter_base_dealloc((PyArrayIterObject*)iter);
+ _pya_free((PyArrayObject*)iter);
+}
+
+NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type = {
+#if defined(NPY_PY3K)
+ PyVarObject_HEAD_INIT(NULL, 0)
+#else
+ PyObject_HEAD_INIT(NULL)
+ 0, /* ob_size */
+#endif
+ "numpy.neigh_internal_iter", /* tp_name*/
+ sizeof(PyArrayNeighborhoodIterObject), /* tp_basicsize*/
+ 0, /* tp_itemsize*/
+ (destructor)neighiter_dealloc, /* tp_dealloc*/
+ 0, /* tp_print*/
+ 0, /* tp_getattr*/
+ 0, /* tp_setattr*/
+#if defined(NPY_PY3K)
+ 0, /* tp_reserved */
+#else
+ 0, /* tp_compare */
#endif
+ 0, /* tp_repr*/
+ 0, /* tp_as_number*/
+ 0, /* tp_as_sequence*/
+ 0, /* tp_as_mapping*/
+ 0, /* tp_hash */
+ 0, /* tp_call*/
+ 0, /* tp_str*/
+ 0, /* tp_getattro*/
+ 0, /* tp_setattro*/
+ 0, /* tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT, /* tp_flags*/
+ 0, /* tp_doc */
+ 0, /* tp_traverse */
+ 0, /* tp_clear */
+ 0, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ 0, /* tp_iter */
+ (iternextfunc)0, /* tp_iternext */
+ 0, /* tp_methods */
+ 0, /* tp_members */
+ 0, /* tp_getset */
+ 0, /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ (initproc)0, /* tp_init */
+ 0, /* tp_alloc */
+ 0, /* tp_new */
+ 0, /* tp_free */
+ 0, /* tp_is_gc */
+ 0, /* tp_bases */
+ 0, /* tp_mro */
+ 0, /* tp_cache */
+ 0, /* tp_subclasses */
+ 0, /* tp_weaklist */
+ 0, /* tp_del */
};
diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c
index a1c787a97..389adf02f 100644
--- a/numpy/core/src/multiarray/mapping.c
+++ b/numpy/core/src/multiarray/mapping.c
@@ -22,9 +22,9 @@
static PyObject *
array_subscript_simple(PyArrayObject *self, PyObject *op);
-/*************************************************************************
- **************** Implement Mapping Protocol ***************************
- *************************************************************************/
+/******************************************************************************
+ *** IMPLEMENT MAPPING PROTOCOL ***
+ *****************************************************************************/
NPY_NO_EXPORT Py_ssize_t
array_length(PyArrayObject *self)
@@ -1612,63 +1612,62 @@ arraymapiter_dealloc(PyArrayMapIterObject *mit)
* slice syntax.
*/
NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type = {
+#if defined(NPY_PY3K)
+ PyVarObject_HEAD_INIT(NULL, 0)
+#else
PyObject_HEAD_INIT(NULL)
- 0, /* ob_size */
- "numpy.mapiter", /* tp_name */
- sizeof(PyArrayIterObject), /* tp_basicsize */
- 0, /* tp_itemsize */
+ 0, /* ob_size */
+#endif
+ "numpy.mapiter", /* tp_name */
+ sizeof(PyArrayIterObject), /* tp_basicsize */
+ 0, /* tp_itemsize */
/* methods */
- (destructor)arraymapiter_dealloc, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
- 0, /* tp_compare */
- 0, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- 0, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
- Py_TPFLAGS_DEFAULT, /* tp_flags */
- 0, /* tp_doc */
- (traverseproc)0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- (iternextfunc)0, /* tp_iternext */
- 0, /* tp_methods */
- 0, /* tp_members */
- 0, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- (initproc)0, /* tp_init */
- 0, /* tp_alloc */
- 0, /* tp_new */
- 0, /* tp_free */
- 0, /* tp_is_gc */
- 0, /* tp_bases */
- 0, /* tp_mro */
- 0, /* tp_cache */
- 0, /* tp_subclasses */
- 0, /* tp_weaklist */
- 0, /* tp_del */
-
-#ifdef COUNT_ALLOCS
- /* these must be last and never explicitly initialized */
- 0, /* tp_allocs */
- 0, /* tp_frees */
- 0, /* tp_maxalloc */
- 0, /* tp_prev */
- 0, /* *tp_next */
+ (destructor)arraymapiter_dealloc, /* tp_dealloc */
+ 0, /* tp_print */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+#if defined(NPY_PY3K)
+ 0, /* tp_reserved */
+#else
+ 0, /* tp_compare */
#endif
+ 0, /* tp_repr */
+ 0, /* tp_as_number */
+ 0, /* tp_as_sequence */
+ 0, /* tp_as_mapping */
+ 0, /* tp_hash */
+ 0, /* tp_call */
+ 0, /* tp_str */
+ 0, /* tp_getattro */
+ 0, /* tp_setattro */
+ 0, /* tp_as_buffer */
+ Py_TPFLAGS_DEFAULT, /* tp_flags */
+ 0, /* tp_doc */
+ 0, /* tp_traverse */
+ 0, /* tp_clear */
+ 0, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ 0, /* tp_iter */
+ 0, /* tp_iternext */
+ 0, /* tp_methods */
+ 0, /* tp_members */
+ 0, /* tp_getset */
+ 0, /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ 0, /* tp_init */
+ 0, /* tp_alloc */
+ 0, /* tp_new */
+ 0, /* tp_free */
+ 0, /* tp_is_gc */
+ 0, /* tp_bases */
+ 0, /* tp_mro */
+ 0, /* tp_cache */
+ 0, /* tp_subclasses */
+ 0, /* tp_weaklist */
+ 0, /* tp_del */
};
/** END of Subscript Iterator **/
diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c
index 8e9bf24e4..de99ca137 100644
--- a/numpy/core/src/multiarray/methods.c
+++ b/numpy/core/src/multiarray/methods.c
@@ -767,6 +767,49 @@ array_wraparray(PyArrayObject *self, PyObject *args)
return NULL;
}
arr = PyTuple_GET_ITEM(args, 0);
+ if (arr == NULL) {
+ return NULL;
+ }
+ if (!PyArray_Check(arr)) {
+ PyErr_SetString(PyExc_TypeError,
+ "can only be called with ndarray object");
+ return NULL;
+ }
+
+ if (self->ob_type != arr->ob_type){
+ Py_INCREF(PyArray_DESCR(arr));
+ ret = PyArray_NewFromDescr(self->ob_type,
+ PyArray_DESCR(arr),
+ PyArray_NDIM(arr),
+ PyArray_DIMS(arr),
+ PyArray_STRIDES(arr), PyArray_DATA(arr),
+ PyArray_FLAGS(arr), (PyObject *)self);
+ if (ret == NULL) {
+ return NULL;
+ }
+ Py_INCREF(arr);
+ PyArray_BASE(ret) = arr;
+ return ret;
+ } else {
+ /*The type was set in __array_prepare__*/
+ Py_INCREF(arr);
+ return arr;
+ }
+}
+
+
+static PyObject *
+array_preparearray(PyArrayObject *self, PyObject *args)
+{
+ PyObject *arr;
+ PyObject *ret;
+
+ if (PyTuple_Size(args) < 1) {
+ PyErr_SetString(PyExc_TypeError,
+ "only accepts 1 argument");
+ return NULL;
+ }
+ arr = PyTuple_GET_ITEM(args, 0);
if (!PyArray_Check(arr)) {
PyErr_SetString(PyExc_TypeError,
"can only be called with ndarray object");
@@ -2031,6 +2074,8 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = {
/* for subtypes */
{"__array__", (PyCFunction)array_getarray,
METH_VARARGS, NULL},
+ {"__array_prepare__", (PyCFunction)array_preparearray,
+ METH_VARARGS, NULL},
{"__array_wrap__", (PyCFunction)array_wraparray,
METH_VARARGS, NULL},
diff --git a/numpy/core/src/multiarray/multiarray_tests.c.src b/numpy/core/src/multiarray/multiarray_tests.c.src
new file mode 100644
index 000000000..c09ccbc9d
--- /dev/null
+++ b/numpy/core/src/multiarray/multiarray_tests.c.src
@@ -0,0 +1,390 @@
+#include <Python.h>
+#include "numpy/ndarrayobject.h"
+
+/*
+ * TODO:
+ * - Handle mode
+ */
+
+/**begin repeat
+ * #type = double, int#
+ * #typenum = NPY_DOUBLE, NPY_INT#
+ */
+static int copy_@type@(PyArrayIterObject *itx, PyArrayNeighborhoodIterObject *niterx,
+ npy_intp *bounds,
+ PyObject **out)
+{
+ npy_intp i, j;
+ @type@ *ptr;
+ npy_intp odims[NPY_MAXDIMS];
+ PyArrayObject *aout;
+
+ /*
+ * For each point in itx, copy the current neighborhood into an array which
+ * is appended at the output list
+ */
+ for (i = 0; i < itx->size; ++i) {
+ PyArrayNeighborhoodIter_Reset(niterx);
+
+ for (j = 0; j < itx->ao->nd; ++j) {
+ odims[j] = bounds[2 * j + 1] - bounds[2 * j] + 1;
+ }
+ aout = (PyArrayObject*)PyArray_SimpleNew(itx->ao->nd, odims, @typenum@);
+ if (aout == NULL) {
+ return -1;
+ }
+
+ ptr = (@type@*)aout->data;
+
+ for (j = 0; j < niterx->size; ++j) {
+ *ptr = *((@type@*)niterx->dataptr);
+ PyArrayNeighborhoodIter_Next(niterx);
+ ptr += 1;
+ }
+
+ Py_INCREF(aout);
+ PyList_Append(*out, (PyObject*)aout);
+ Py_DECREF(aout);
+ PyArray_ITER_NEXT(itx);
+ }
+
+ return 0;
+}
+/**end repeat**/
+
+static int copy_object(PyArrayIterObject *itx, PyArrayNeighborhoodIterObject *niterx,
+ npy_intp *bounds,
+ PyObject **out)
+{
+ npy_intp i, j;
+ npy_intp odims[NPY_MAXDIMS];
+ PyArrayObject *aout;
+ PyArray_CopySwapFunc *copyswap = itx->ao->descr->f->copyswap;
+ npy_int itemsize = PyArray_ITEMSIZE(itx->ao);
+
+ /*
+ * For each point in itx, copy the current neighborhood into an array which
+ * is appended at the output list
+ */
+ for (i = 0; i < itx->size; ++i) {
+ PyArrayNeighborhoodIter_Reset(niterx);
+
+ for (j = 0; j < itx->ao->nd; ++j) {
+ odims[j] = bounds[2 * j + 1] - bounds[2 * j] + 1;
+ }
+ aout = (PyArrayObject*)PyArray_SimpleNew(itx->ao->nd, odims, NPY_OBJECT);
+ if (aout == NULL) {
+ return -1;
+ }
+
+ for (j = 0; j < niterx->size; ++j) {
+ copyswap(aout->data + j * itemsize, niterx->dataptr, 0, NULL);
+ PyArrayNeighborhoodIter_Next(niterx);
+ }
+
+ Py_INCREF(aout);
+ PyList_Append(*out, (PyObject*)aout);
+ Py_DECREF(aout);
+ PyArray_ITER_NEXT(itx);
+ }
+
+ return 0;
+}
+
+static PyObject*
+test_neighborhood_iterator(PyObject* NPY_UNUSED(self), PyObject* args)
+{
+ PyObject *x, *fill, *out, *b;
+ PyArrayObject *ax, *afill;
+ PyArrayIterObject *itx;
+ int i, typenum, mode, st;
+ npy_intp bounds[NPY_MAXDIMS*2];
+ PyArrayNeighborhoodIterObject *niterx;
+
+ if (!PyArg_ParseTuple(args, "OOOi", &x, &b, &fill, &mode)) {
+ return NULL;
+ }
+
+ if (!PySequence_Check(b)) {
+ return NULL;
+ }
+
+ typenum = PyArray_ObjectType(x, 0);
+ typenum = PyArray_ObjectType(fill, typenum);
+
+ ax = (PyArrayObject*)PyArray_FromObject(x, typenum, 1, 10);
+ if (ax == NULL) {
+ return NULL;
+ }
+ if (PySequence_Size(b) != 2 * ax->nd) {
+ PyErr_SetString(PyExc_ValueError,
+ "bounds sequence size not compatible with x input");
+ goto clean_ax;
+ }
+
+ out = PyList_New(0);
+ if (out == NULL) {
+ goto clean_ax;
+ }
+
+ itx = (PyArrayIterObject*)PyArray_IterNew(x);
+ if (itx == NULL) {
+ goto clean_out;
+ }
+
+ /* Compute boundaries for the neighborhood iterator */
+ for (i = 0; i < 2 * ax->nd; ++i) {
+ PyObject* bound;
+ bound = PySequence_GetItem(b, i);
+ if (bounds == NULL) {
+ goto clean_itx;
+ }
+ if (!PyInt_Check(bound)) {
+ PyErr_SetString(PyExc_ValueError, "bound not long");
+ Py_DECREF(bound);
+ goto clean_itx;
+ }
+ bounds[i] = PyInt_AsLong(bound);
+ Py_DECREF(bound);
+ }
+
+ /* Create the neighborhood iterator */
+ afill = NULL;
+ if (mode == NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING) {
+ afill = (PyArrayObject *)PyArray_FromObject(fill, typenum, 0, 0);
+ if (afill == NULL) {
+ goto clean_itx;
+ }
+ }
+
+ niterx = (PyArrayNeighborhoodIterObject*)PyArray_NeighborhoodIterNew(
+ (PyArrayIterObject*)itx, bounds, mode, afill);
+ if (niterx == NULL) {
+ goto clean_afill;
+ }
+
+ switch (typenum) {
+ case NPY_OBJECT:
+ st = copy_object(itx, niterx, bounds, &out);
+ break;
+ case NPY_INT:
+ st = copy_int(itx, niterx, bounds, &out);
+ break;
+ case NPY_DOUBLE:
+ st = copy_double(itx, niterx, bounds, &out);
+ break;
+ default:
+ PyErr_SetString(PyExc_ValueError, "Type not supported");
+ goto clean_niterx;
+ }
+
+ if (st) {
+ goto clean_niterx;
+ }
+
+ Py_DECREF(niterx);
+ Py_XDECREF(afill);
+ Py_DECREF(itx);
+
+ Py_DECREF(ax);
+
+ return out;
+
+clean_niterx:
+ Py_DECREF(niterx);
+clean_afill:
+ Py_XDECREF(afill);
+clean_itx:
+ Py_DECREF(itx);
+clean_out:
+ Py_DECREF(out);
+clean_ax:
+ Py_DECREF(ax);
+ return NULL;
+}
+
+static int
+copy_double_double(PyArrayNeighborhoodIterObject *itx,
+ PyArrayNeighborhoodIterObject *niterx,
+ npy_intp *bounds,
+ PyObject **out)
+{
+ npy_intp i, j;
+ double *ptr;
+ npy_intp odims[NPY_MAXDIMS];
+ PyArrayObject *aout;
+
+ /*
+ * For each point in itx, copy the current neighborhood into an array which
+ * is appended at the output list
+ */
+ PyArrayNeighborhoodIter_Reset(itx);
+ for (i = 0; i < itx->size; ++i) {
+ for (j = 0; j < itx->ao->nd; ++j) {
+ odims[j] = bounds[2 * j + 1] - bounds[2 * j] + 1;
+ }
+ aout = (PyArrayObject*)PyArray_SimpleNew(itx->ao->nd, odims, NPY_DOUBLE);
+ if (aout == NULL) {
+ return -1;
+ }
+
+ ptr = (double*)aout->data;
+
+ PyArrayNeighborhoodIter_Reset(niterx);
+ for (j = 0; j < niterx->size; ++j) {
+ *ptr = *((double*)niterx->dataptr);
+ ptr += 1;
+ PyArrayNeighborhoodIter_Next(niterx);
+ }
+ Py_INCREF(aout);
+ PyList_Append(*out, (PyObject*)aout);
+ Py_DECREF(aout);
+ PyArrayNeighborhoodIter_Next(itx);
+ }
+ return 0;
+}
+
+static PyObject*
+test_neighborhood_iterator_oob(PyObject* NPY_UNUSED(self), PyObject* args)
+{
+ PyObject *x, *out, *b1, *b2;
+ PyArrayObject *ax;
+ PyArrayIterObject *itx;
+ int i, typenum, mode1, mode2, st;
+ npy_intp bounds[NPY_MAXDIMS*2];
+ PyArrayNeighborhoodIterObject *niterx1, *niterx2;
+
+ if (!PyArg_ParseTuple(args, "OOiOi", &x, &b1, &mode1, &b2, &mode2)) {
+ return NULL;
+ }
+
+ if (!PySequence_Check(b1) || !PySequence_Check(b2)) {
+ return NULL;
+ }
+
+ typenum = PyArray_ObjectType(x, 0);
+
+ ax = (PyArrayObject*)PyArray_FromObject(x, typenum, 1, 10);
+ if (ax == NULL) {
+ return NULL;
+ }
+ if (PySequence_Size(b1) != 2 * ax->nd) {
+ PyErr_SetString(PyExc_ValueError,
+ "bounds sequence 1 size not compatible with x input");
+ goto clean_ax;
+ }
+ if (PySequence_Size(b2) != 2 * ax->nd) {
+ PyErr_SetString(PyExc_ValueError,
+ "bounds sequence 2 size not compatible with x input");
+ goto clean_ax;
+ }
+
+ out = PyList_New(0);
+ if (out == NULL) {
+ goto clean_ax;
+ }
+
+ itx = (PyArrayIterObject*)PyArray_IterNew(x);
+ if (itx == NULL) {
+ goto clean_out;
+ }
+
+ /* Compute boundaries for the neighborhood iterator */
+ for (i = 0; i < 2 * ax->nd; ++i) {
+ PyObject* bound;
+ bound = PySequence_GetItem(b1, i);
+ if (bounds == NULL) {
+ goto clean_itx;
+ }
+ if (!PyInt_Check(bound)) {
+ PyErr_SetString(PyExc_ValueError, "bound not long");
+ Py_DECREF(bound);
+ goto clean_itx;
+ }
+ bounds[i] = PyInt_AsLong(bound);
+ Py_DECREF(bound);
+ }
+
+ /* Create the neighborhood iterator */
+ niterx1 = (PyArrayNeighborhoodIterObject*)PyArray_NeighborhoodIterNew(
+ (PyArrayIterObject*)itx, bounds,
+ mode1, NULL);
+ if (niterx1 == NULL) {
+ goto clean_out;
+ }
+
+ for (i = 0; i < 2 * ax->nd; ++i) {
+ PyObject* bound;
+ bound = PySequence_GetItem(b2, i);
+ if (bounds == NULL) {
+ goto clean_itx;
+ }
+ if (!PyInt_Check(bound)) {
+ PyErr_SetString(PyExc_ValueError, "bound not long");
+ Py_DECREF(bound);
+ goto clean_itx;
+ }
+ bounds[i] = PyInt_AsLong(bound);
+ Py_DECREF(bound);
+ }
+
+ niterx2 = (PyArrayNeighborhoodIterObject*)PyArray_NeighborhoodIterNew(
+ (PyArrayIterObject*)niterx1, bounds,
+ mode2, NULL);
+ if (niterx1 == NULL) {
+ goto clean_niterx1;
+ }
+
+ switch (typenum) {
+ case NPY_DOUBLE:
+ st = copy_double_double(niterx1, niterx2, bounds, &out);
+ break;
+ default:
+ PyErr_SetString(PyExc_ValueError, "Type not supported");
+ goto clean_niterx2;
+ }
+
+ if (st) {
+ goto clean_niterx2;
+ }
+
+ Py_DECREF(niterx2);
+ Py_DECREF(niterx1);
+ Py_DECREF(itx);
+ Py_DECREF(ax);
+ return out;
+
+clean_niterx2:
+ Py_DECREF(niterx2);
+clean_niterx1:
+ Py_DECREF(niterx1);
+clean_itx:
+ Py_DECREF(itx);
+clean_out:
+ Py_DECREF(out);
+clean_ax:
+ Py_DECREF(ax);
+ return NULL;
+}
+
+static PyMethodDef Multiarray_TestsMethods[] = {
+ {"test_neighborhood_iterator", test_neighborhood_iterator, METH_VARARGS, NULL},
+ {"test_neighborhood_iterator_oob", test_neighborhood_iterator_oob, METH_VARARGS, NULL},
+ {NULL, NULL, 0, NULL} /* Sentinel */
+};
+
+PyMODINIT_FUNC
+initmultiarray_tests(void)
+{
+ PyObject *m;
+
+ m = Py_InitModule("multiarray_tests", Multiarray_TestsMethods);
+ if (m == NULL) {
+ return;
+ }
+ import_array();
+ if (PyErr_Occurred()) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "cannot load umath_tests module.");
+ }
+}
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index 6f9c34313..a793e0364 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -27,7 +27,7 @@
#include "config.h"
-#include "global.c"
+NPY_NO_EXPORT int NPY_NUMUSERTYPES = 0;
#define PyAO PyArrayObject
@@ -906,7 +906,7 @@ PyArray_CopyAndTranspose(PyObject *op)
}
/*
- * Implementation which is common between PyArray_Correlate and PyArray_Acorrelate
+ * Implementation which is common between PyArray_Correlate and PyArray_Correlate2
*
* inverted is set to 1 if computed correlate(ap2, ap1), 0 otherwise
*/
@@ -1065,13 +1065,13 @@ _pyarray_revert(PyArrayObject *ret)
}
/*NUMPY_API
- * acorrelate(a1,a2,mode)
+ * correlate(a1,a2,mode)
*
- * This function computes the usual correlation (acorrelate(a1, a2) !=
- * accorrelate(a2, a1), and conjugate the second argument for complex inputs
+ * This function computes the usual correlation (correlate(a1, a2) !=
+ * correlate(a2, a1), and conjugate the second argument for complex inputs
*/
NPY_NO_EXPORT PyObject *
-PyArray_Acorrelate(PyObject *op1, PyObject *op2, int mode)
+PyArray_Correlate2(PyObject *op1, PyObject *op2, int mode)
{
PyArrayObject *ap1, *ap2, *ret = NULL;
int typenum;
@@ -1796,7 +1796,7 @@ static PyObject *array_correlate(PyObject *NPY_UNUSED(dummy), PyObject *args, Py
}
static PyObject*
-array_acorrelate(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds)
+array_correlate2(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds)
{
PyObject *shape, *a0;
int mode = 0;
@@ -1806,7 +1806,7 @@ array_acorrelate(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds)
&a0, &shape, &mode)) {
return NULL;
}
- return PyArray_Acorrelate(a0, shape, mode);
+ return PyArray_Correlate2(a0, shape, mode);
}
static PyObject *
@@ -2448,8 +2448,8 @@ static struct PyMethodDef array_module_methods[] = {
{"correlate",
(PyCFunction)array_correlate,
METH_VARARGS | METH_KEYWORDS, NULL},
- {"acorrelate",
- (PyCFunction)array_acorrelate,
+ {"correlate2",
+ (PyCFunction)array_correlate2,
METH_VARARGS | METH_KEYWORDS, NULL},
{"frombuffer",
(PyCFunction)array_frombuffer,
@@ -2710,6 +2710,11 @@ PyMODINIT_FUNC initmultiarray(void) {
if (PyType_Ready(&PyArrayMultiIter_Type) < 0) {
return;
}
+ PyArrayNeighborhoodIter_Type.tp_new = PyType_GenericNew;
+ if (PyType_Ready(&PyArrayNeighborhoodIter_Type) < 0) {
+ return;
+ }
+
PyArrayDescr_Type.tp_hash = PyArray_DescrHash;
if (PyType_Ready(&PyArrayDescr_Type) < 0) {
return;
diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src
index 28ba7f47a..e50106866 100644
--- a/numpy/core/src/multiarray/scalartypes.c.src
+++ b/numpy/core/src/multiarray/scalartypes.c.src
@@ -37,63 +37,62 @@ NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[] = {
* Floating, ComplexFloating, Flexible, Character, TimeInteger#
*/
NPY_NO_EXPORT PyTypeObject Py@NAME@ArrType_Type = {
+#if defined(NPY_PY3K)
+ PyVarObject_HEAD_INIT(NULL, 0)
+#else
PyObject_HEAD_INIT(NULL)
- 0, /* ob_size*/
- "numpy.@name@", /* tp_name*/
- sizeof(PyObject), /* tp_basicsize*/
- 0, /* tp_itemsize */
+ 0, /* ob_size */
+#endif
+ "numpy.@name@", /* tp_name*/
+ sizeof(PyObject), /* tp_basicsize*/
+ 0, /* tp_itemsize */
/* methods */
- 0, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
- 0, /* tp_compare */
- 0, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- 0, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
- 0, /* tp_flags */
- 0, /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- 0, /* tp_methods */
- 0, /* tp_members */
- 0, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- 0, /* tp_init */
- 0, /* tp_alloc */
- 0, /* tp_new */
- 0, /* tp_free */
- 0, /* tp_is_gc */
- 0, /* tp_bases */
- 0, /* tp_mro */
- 0, /* tp_cache */
- 0, /* tp_subclasses */
- 0, /* tp_weaklist */
- 0, /* tp_del */
-
-#ifdef COUNT_ALLOCS
- /* these must be last and never explicitly initialized */
- 0, /* tp_allocs */
- 0, /* tp_frees */
- 0, /* tp_maxalloc */
- 0, /* tp_prev */
- 0, /* *tp_next */
+ 0, /* tp_dealloc */
+ 0, /* tp_print */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+#if defined(NPY_PY3K)
+ 0, /* tp_reserved */
+#else
+ 0, /* tp_compare */
#endif
+ 0, /* tp_repr */
+ 0, /* tp_as_number */
+ 0, /* tp_as_sequence */
+ 0, /* tp_as_mapping */
+ 0, /* tp_hash */
+ 0, /* tp_call */
+ 0, /* tp_str */
+ 0, /* tp_getattro */
+ 0, /* tp_setattro */
+ 0, /* tp_as_buffer */
+ 0, /* tp_flags */
+ 0, /* tp_doc */
+ 0, /* tp_traverse */
+ 0, /* tp_clear */
+ 0, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ 0, /* tp_iter */
+ 0, /* tp_iternext */
+ 0, /* tp_methods */
+ 0, /* tp_members */
+ 0, /* tp_getset */
+ 0, /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ 0, /* tp_init */
+ 0, /* tp_alloc */
+ 0, /* tp_new */
+ 0, /* tp_free */
+ 0, /* tp_is_gc */
+ 0, /* tp_bases */
+ 0, /* tp_mro */
+ 0, /* tp_cache */
+ 0, /* tp_subclasses */
+ 0, /* tp_weaklist */
+ 0, /* tp_del */
};
/**end repeat**/
@@ -1805,63 +1804,62 @@ static PyBufferProcs gentype_as_buffer = {
#define LEAFFLAGS Py_TPFLAGS_DEFAULT | Py_TPFLAGS_CHECKTYPES
NPY_NO_EXPORT PyTypeObject PyGenericArrType_Type = {
+#if defined(NPY_PY3K)
+ PyVarObject_HEAD_INIT(NULL, 0)
+#else
PyObject_HEAD_INIT(NULL)
- 0, /* ob_size*/
- "numpy.generic", /* tp_name*/
- sizeof(PyObject), /* tp_basicsize*/
- 0, /* tp_itemsize */
+ 0, /* ob_size */
+#endif
+ "numpy.generic", /* tp_name*/
+ sizeof(PyObject), /* tp_basicsize*/
+ 0, /* tp_itemsize */
/* methods */
- 0, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
- 0, /* tp_compare */
- 0, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- 0, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
- 0, /* tp_flags */
- 0, /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- 0, /* tp_methods */
- 0, /* tp_members */
- 0, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- 0, /* tp_init */
- 0, /* tp_alloc */
- 0, /* tp_new */
- 0, /* tp_free */
- 0, /* tp_is_gc */
- 0, /* tp_bases */
- 0, /* tp_mro */
- 0, /* tp_cache */
- 0, /* tp_subclasses */
- 0, /* tp_weaklist */
- 0, /* tp_del */
-
-#ifdef COUNT_ALLOCS
- /* these must be last and never explicitly initialized */
- 0, /* tp_allocs */
- 0, /* tp_frees */
- 0, /* tp_maxalloc */
- 0, /* tp_prev */
- 0, /* *tp_next */
+ 0, /* tp_dealloc */
+ 0, /* tp_print */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+#if defined(NPY_PY3K)
+ 0, /* tp_reserved */
+#else
+ 0, /* tp_compare */
#endif
+ 0, /* tp_repr */
+ 0, /* tp_as_number */
+ 0, /* tp_as_sequence */
+ 0, /* tp_as_mapping */
+ 0, /* tp_hash */
+ 0, /* tp_call */
+ 0, /* tp_str */
+ 0, /* tp_getattro */
+ 0, /* tp_setattro */
+ 0, /* tp_as_buffer */
+ 0, /* tp_flags */
+ 0, /* tp_doc */
+ 0, /* tp_traverse */
+ 0, /* tp_clear */
+ 0, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ 0, /* tp_iter */
+ 0, /* tp_iternext */
+ 0, /* tp_methods */
+ 0, /* tp_members */
+ 0, /* tp_getset */
+ 0, /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ 0, /* tp_init */
+ 0, /* tp_alloc */
+ 0, /* tp_new */
+ 0, /* tp_free */
+ 0, /* tp_is_gc */
+ 0, /* tp_bases */
+ 0, /* tp_mro */
+ 0, /* tp_cache */
+ 0, /* tp_subclasses */
+ 0, /* tp_weaklist */
+ 0, /* tp_del */
};
static void
@@ -2610,61 +2608,61 @@ object_arrtype_call(PyObjectScalarObject *obj, PyObject *args, PyObject *kwds)
}
NPY_NO_EXPORT PyTypeObject PyObjectArrType_Type = {
+#if defined(NPY_PY3K)
+ PyVarObject_HEAD_INIT(NULL, 0)
+#else
PyObject_HEAD_INIT(NULL)
- 0, /* ob_size*/
- "numpy.object_", /* tp_name*/
- sizeof(PyObjectScalarObject), /* tp_basicsize*/
- 0, /* tp_itemsize */
- (destructor)object_arrtype_dealloc, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
- 0, /* tp_compare */
- 0, /* tp_repr */
- 0, /* tp_as_number */
- &object_arrtype_as_sequence, /* tp_as_sequence */
- &object_arrtype_as_mapping, /* tp_as_mapping */
- 0, /* tp_hash */
- (ternaryfunc)object_arrtype_call, /* tp_call */
- 0, /* tp_str */
- (getattrofunc)object_arrtype_getattro, /* tp_getattro */
- (setattrofunc)object_arrtype_setattro, /* tp_setattro */
- &object_arrtype_as_buffer, /* tp_as_buffer */
- 0, /* tp_flags */
- 0, /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- 0, /* tp_methods */
- 0, /* tp_members */
- 0, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- 0, /* tp_init */
- 0, /* tp_alloc */
- 0, /* tp_new */
- 0, /* tp_free */
- 0, /* tp_is_gc */
- 0, /* tp_bases */
- 0, /* tp_mro */
- 0, /* tp_cache */
- 0, /* tp_subclasses */
- 0, /* tp_weaklist */
- 0, /* tp_del */
-#ifdef COUNT_ALLOCS
- /* these must be last and never explicitly initialized */
- 0, /* tp_allocs */
- 0, /* tp_frees */
- 0, /* tp_maxalloc */
- 0, /* tp_prev */
- 0, /* *tp_next */
+ 0, /* ob_size */
#endif
+ "numpy.object_", /* tp_name*/
+ sizeof(PyObjectScalarObject), /* tp_basicsize*/
+ 0, /* tp_itemsize */
+ (destructor)object_arrtype_dealloc, /* tp_dealloc */
+ 0, /* tp_print */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+#if defined(NPY_PY3K)
+ 0, /* tp_reserved */
+#else
+ 0, /* tp_compare */
+#endif
+ 0, /* tp_repr */
+ 0, /* tp_as_number */
+ &object_arrtype_as_sequence, /* tp_as_sequence */
+ &object_arrtype_as_mapping, /* tp_as_mapping */
+ 0, /* tp_hash */
+ (ternaryfunc)object_arrtype_call, /* tp_call */
+ 0, /* tp_str */
+ (getattrofunc)object_arrtype_getattro, /* tp_getattro */
+ (setattrofunc)object_arrtype_setattro, /* tp_setattro */
+ &object_arrtype_as_buffer, /* tp_as_buffer */
+ 0, /* tp_flags */
+ 0, /* tp_doc */
+ 0, /* tp_traverse */
+ 0, /* tp_clear */
+ 0, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ 0, /* tp_iter */
+ 0, /* tp_iternext */
+ 0, /* tp_methods */
+ 0, /* tp_members */
+ 0, /* tp_getset */
+ 0, /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ 0, /* tp_init */
+ 0, /* tp_alloc */
+ 0, /* tp_new */
+ 0, /* tp_free */
+ 0, /* tp_is_gc */
+ 0, /* tp_bases */
+ 0, /* tp_mro */
+ 0, /* tp_cache */
+ 0, /* tp_subclasses */
+ 0, /* tp_weaklist */
+ 0, /* tp_del */
};
static PyObject *
@@ -2714,61 +2712,61 @@ gen_arrtype_subscript(PyObject *self, PyObject *key)
* #ex = _,_,_,#
*/
NPY_NO_EXPORT PyTypeObject Py@NAME@ArrType_Type = {
+#if defined(NPY_PY3K)
+ PyVarObject_HEAD_INIT(NULL, 0)
+#else
PyObject_HEAD_INIT(NULL)
- 0, /* ob_size*/
- "numpy.@name@@ex@", /* tp_name*/
- sizeof(Py@NAME@ScalarObject), /* tp_basicsize*/
- 0, /* tp_itemsize */
- 0, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
- 0, /* tp_compare */
- 0, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- 0, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
- 0, /* tp_flags */
- 0, /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- 0, /* tp_methods */
- 0, /* tp_members */
- 0, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- 0, /* tp_init */
- 0, /* tp_alloc */
- 0, /* tp_new */
- 0, /* tp_free */
- 0, /* tp_is_gc */
- 0, /* tp_bases */
- 0, /* tp_mro */
- 0, /* tp_cache */
- 0, /* tp_subclasses */
- 0, /* tp_weaklist */
- 0, /* tp_del */
-#ifdef COUNT_ALLOCS
- /* these must be last and never explicitly initialized */
- 0, /* tp_allocs */
- 0, /* tp_frees */
- 0, /* tp_maxalloc */
- 0, /* tp_prev */
- 0, /* *tp_next */
+ 0, /* ob_size */
#endif
+ "numpy.@name@@ex@", /* tp_name*/
+ sizeof(Py@NAME@ScalarObject), /* tp_basicsize*/
+ 0, /* tp_itemsize */
+ 0, /* tp_dealloc */
+ 0, /* tp_print */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+#if defined(NPY_PY3K)
+ 0, /* tp_reserved */
+#else
+ 0, /* tp_compare */
+#endif
+ 0, /* tp_repr */
+ 0, /* tp_as_number */
+ 0, /* tp_as_sequence */
+ 0, /* tp_as_mapping */
+ 0, /* tp_hash */
+ 0, /* tp_call */
+ 0, /* tp_str */
+ 0, /* tp_getattro */
+ 0, /* tp_setattro */
+ 0, /* tp_as_buffer */
+ 0, /* tp_flags */
+ 0, /* tp_doc */
+ 0, /* tp_traverse */
+ 0, /* tp_clear */
+ 0, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ 0, /* tp_iter */
+ 0, /* tp_iternext */
+ 0, /* tp_methods */
+ 0, /* tp_members */
+ 0, /* tp_getset */
+ 0, /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ 0, /* tp_init */
+ 0, /* tp_alloc */
+ 0, /* tp_new */
+ 0, /* tp_free */
+ 0, /* tp_is_gc */
+ 0, /* tp_bases */
+ 0, /* tp_mro */
+ 0, /* tp_cache */
+ 0, /* tp_subclasses */
+ 0, /* tp_weaklist */
+ 0, /* tp_del */
};
/**end repeat**/
@@ -2797,61 +2795,61 @@ NPY_NO_EXPORT PyTypeObject Py@NAME@ArrType_Type = {
#define _THIS_SIZE "256"
#endif
NPY_NO_EXPORT PyTypeObject Py@NAME@ArrType_Type = {
+#if defined(NPY_PY3K)
+ PyVarObject_HEAD_INIT(NULL, 0)
+#else
PyObject_HEAD_INIT(NULL)
- 0, /* ob_size*/
- "numpy.@name@" _THIS_SIZE, /* tp_name*/
- sizeof(Py@NAME@ScalarObject), /* tp_basicsize*/
- 0, /* tp_itemsize */
- 0, /* tp_dealloc */
- 0, /* tp_print */
- 0, /* tp_getattr */
- 0, /* tp_setattr */
- 0, /* tp_compare */
- 0, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- 0, /* tp_as_mapping */
- 0, /* tp_hash */
- 0, /* tp_call */
- 0, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
- 0, /* tp_flags */
- 0, /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- 0, /* tp_methods */
- 0, /* tp_members */
- 0, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- 0, /* tp_init */
- 0, /* tp_alloc */
- 0, /* tp_new */
- 0, /* tp_free */
- 0, /* tp_is_gc */
- 0, /* tp_bases */
- 0, /* tp_mro */
- 0, /* tp_cache */
- 0, /* tp_subclasses */
- 0, /* tp_weaklist */
- 0, /* tp_del */
-#ifdef COUNT_ALLOCS
- /* these must be last and never explicitly initialized */
- 0, /* tp_allocs */
- 0, /* tp_frees */
- 0, /* tp_maxalloc */
- 0, /* tp_prev */
- 0, /* *tp_next */
+ 0, /* ob_size */
+#endif
+ "numpy.@name@" _THIS_SIZE, /* tp_name*/
+ sizeof(Py@NAME@ScalarObject), /* tp_basicsize*/
+ 0, /* tp_itemsize */
+ 0, /* tp_dealloc */
+ 0, /* tp_print */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+#if defined(NPY_PY3K)
+ 0, /* tp_reserved */
+#else
+ 0, /* tp_compare */
#endif
+ 0, /* tp_repr */
+ 0, /* tp_as_number */
+ 0, /* tp_as_sequence */
+ 0, /* tp_as_mapping */
+ 0, /* tp_hash */
+ 0, /* tp_call */
+ 0, /* tp_str */
+ 0, /* tp_getattro */
+ 0, /* tp_setattro */
+ 0, /* tp_as_buffer */
+ 0, /* tp_flags */
+ 0, /* tp_doc */
+ 0, /* tp_traverse */
+ 0, /* tp_clear */
+ 0, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ 0, /* tp_iter */
+ 0, /* tp_iternext */
+ 0, /* tp_methods */
+ 0, /* tp_members */
+ 0, /* tp_getset */
+ 0, /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ 0, /* tp_init */
+ 0, /* tp_alloc */
+ 0, /* tp_new */
+ 0, /* tp_free */
+ 0, /* tp_is_gc */
+ 0, /* tp_bases */
+ 0, /* tp_mro */
+ 0, /* tp_cache */
+ 0, /* tp_subclasses */
+ 0, /* tp_weaklist */
+ 0, /* tp_del */
};
#undef _THIS_SIZE
@@ -2895,62 +2893,62 @@ static PyMappingMethods gentype_as_mapping = {
#define _THIS_DOC "Composed of two " _THIS_SIZE2 " bit floats"
- NPY_NO_EXPORT PyTypeObject Py@NAME@ArrType_Type = {
- PyObject_HEAD_INIT(NULL)
- 0, /* ob_size*/
- "numpy.@name@" _THIS_SIZE1, /* tp_name*/
- sizeof(Py@NAME@ScalarObject), /* tp_basicsize*/
- 0, /* tp_itemsize*/
- 0, /* tp_dealloc*/
- 0, /* tp_print*/
- 0, /* tp_getattr*/
- 0, /* tp_setattr*/
- 0, /* tp_compare*/
- 0, /* tp_repr*/
- 0, /* tp_as_number*/
- 0, /* tp_as_sequence*/
- 0, /* tp_as_mapping*/
- 0, /* tp_hash */
- 0, /* tp_call*/
- 0, /* tp_str*/
- 0, /* tp_getattro*/
- 0, /* tp_setattro*/
- 0, /* tp_as_buffer*/
- Py_TPFLAGS_DEFAULT, /* tp_flags*/
- _THIS_DOC, /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- 0, /* tp_methods */
- 0, /* tp_members */
- 0, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- 0, /* tp_init */
- 0, /* tp_alloc */
- 0, /* tp_new */
- 0, /* tp_free */
- 0, /* tp_is_gc */
- 0, /* tp_bases */
- 0, /* tp_mro */
- 0, /* tp_cache */
- 0, /* tp_subclasses */
- 0, /* tp_weaklist */
- 0, /* tp_del */
-#ifdef COUNT_ALLOCS
- /* these must be last and never explicitly initialized */
- 0, /* tp_allocs */
- 0, /* tp_frees */
- 0, /* tp_maxalloc */
- 0, /* tp_prev */
- 0, /* *tp_next */
+NPY_NO_EXPORT PyTypeObject Py@NAME@ArrType_Type = {
+#if defined(NPY_PY3K)
+ PyVarObject_HEAD_INIT(0, 0)
+#else
+ PyObject_HEAD_INIT(0)
+ 0, /* ob_size */
+#endif
+ "numpy.@name@" _THIS_SIZE1, /* tp_name*/
+ sizeof(Py@NAME@ScalarObject), /* tp_basicsize*/
+ 0, /* tp_itemsize*/
+ 0, /* tp_dealloc*/
+ 0, /* tp_print*/
+ 0, /* tp_getattr*/
+ 0, /* tp_setattr*/
+#if defined(NPY_PY3K)
+ 0, /* tp_reserved */
+#else
+ 0, /* tp_compare */
#endif
+ 0, /* tp_repr*/
+ 0, /* tp_as_number*/
+ 0, /* tp_as_sequence*/
+ 0, /* tp_as_mapping*/
+ 0, /* tp_hash */
+ 0, /* tp_call*/
+ 0, /* tp_str*/
+ 0, /* tp_getattro*/
+ 0, /* tp_setattro*/
+ 0, /* tp_as_buffer*/
+ Py_TPFLAGS_DEFAULT, /* tp_flags*/
+ _THIS_DOC, /* tp_doc */
+ 0, /* tp_traverse */
+ 0, /* tp_clear */
+ 0, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ 0, /* tp_iter */
+ 0, /* tp_iternext */
+ 0, /* tp_methods */
+ 0, /* tp_members */
+ 0, /* tp_getset */
+ 0, /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ 0, /* tp_init */
+ 0, /* tp_alloc */
+ 0, /* tp_new */
+ 0, /* tp_free */
+ 0, /* tp_is_gc */
+ 0, /* tp_bases */
+ 0, /* tp_mro */
+ 0, /* tp_cache */
+ 0, /* tp_subclasses */
+ 0, /* tp_weaklist */
+ 0, /* tp_del */
};
#undef _THIS_SIZE1
#undef _THIS_SIZE2
diff --git a/numpy/core/src/_signbit.c b/numpy/core/src/npymath/_signbit.c
index a2ad38162..a2ad38162 100644
--- a/numpy/core/src/_signbit.c
+++ b/numpy/core/src/npymath/_signbit.c
diff --git a/numpy/core/src/npy_math.c.src b/numpy/core/src/npymath/npy_math.c.src
index 21fc7d427..3fde802a2 100644
--- a/numpy/core/src/npy_math.c.src
+++ b/numpy/core/src/npymath/npy_math.c.src
@@ -40,6 +40,18 @@
* #ifdef SYMBOL_DEFINED_WEIRD_PLATFORM
* double exp(double);
* #endif
+ *
+ * Some of the code is taken from msun library in FreeBSD, with the following
+ * notice:
+ *
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunPro, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
*/
#include <Python.h>
@@ -48,6 +60,8 @@
#include "config.h"
#include "numpy/npy_math.h"
+#include "npy_math_private.h"
+
/*
*****************************************************************************
** BASIC MATH FUNCTIONS **
@@ -56,61 +70,158 @@
/* Original code by Konrad Hinsen. */
#ifndef HAVE_EXPM1
-static double expm1(double x)
+double npy_expm1(double x)
{
- double u = exp(x);
+ double u = npy_exp(x);
if (u == 1.0) {
return x;
} else if (u-1.0 == -1.0) {
return -1;
} else {
- return (u-1.0) * x/log(u);
+ return (u-1.0) * x/npy_log(u);
}
}
#endif
#ifndef HAVE_LOG1P
-static double log1p(double x)
+double npy_log1p(double x)
{
double u = 1. + x;
if (u == 1.0) {
return x;
} else {
- return log(u) * x / (u - 1);
+ return npy_log(u) * x / (u - 1);
}
}
#endif
+/* Taken from FreeBSD mlib, adapted for numpy
+ *
+ * XXX: we could be a bit faster by reusing high/low words for inf/nan
+ * classification instead of calling npy_isinf/npy_isnan: we should have some
+ * macros for this, though, instead of doing it manually
+ */
+#ifndef HAVE_ATAN2
+/* XXX: we should have this in npy_math.h */
+#define NPY_DBL_EPSILON 1.2246467991473531772E-16
+double npy_atan2(double y, double x)
+{
+ npy_int32 k, m, iy, ix, hx, hy;
+ npy_uint32 lx,ly;
+ double z;
+
+ EXTRACT_WORDS(hx, lx, x);
+ ix = hx & 0x7fffffff;
+ EXTRACT_WORDS(hy, ly, y);
+ iy = hy & 0x7fffffff;
+
+ /* if x or y is nan, return nan */
+ if (npy_isnan(x * y)) {
+ return x + y;
+ }
+
+ if (x == 1.0) {
+ return npy_atan(y);
+ }
+
+ m = 2 * npy_signbit(x) + npy_signbit(y);
+ if (y == 0.0) {
+ switch(m) {
+ case 0:
+ case 1: return y; /* atan(+-0,+anything)=+-0 */
+ case 2: return NPY_PI;/* atan(+0,-anything) = pi */
+ case 3: return -NPY_PI;/* atan(-0,-anything) =-pi */
+ }
+ }
+
+ if (x == 0.0) {
+ return y > 0 ? NPY_PI_2 : -NPY_PI_2;
+ }
+
+ if (npy_isinf(x)) {
+ if (npy_isinf(y)) {
+ switch(m) {
+ case 0: return NPY_PI_4;/* atan(+INF,+INF) */
+ case 1: return -NPY_PI_4;/* atan(-INF,+INF) */
+ case 2: return 3.0*NPY_PI_4;/*atan(+INF,-INF)*/
+ case 3: return -3.0*NPY_PI_4;/*atan(-INF,-INF)*/
+ }
+ } else {
+ switch(m) {
+ case 0: return NPY_PZERO; /* atan(+...,+INF) */
+ case 1: return NPY_NZERO; /* atan(-...,+INF) */
+ case 2: return NPY_PI; /* atan(+...,-INF) */
+ case 3: return -NPY_PI; /* atan(-...,-INF) */
+ }
+ }
+ }
+
+ if (npy_isinf(y)) {
+ return y > 0 ? NPY_PI_2 : -NPY_PI_2;
+ }
+
+ /* compute y/x */
+ k = (iy - ix)>>20;
+ if(k > 60) { /* |y/x| > 2**60 */
+ z = NPY_PI_2 + 0.5 * NPY_DBL_EPSILON;
+ m &= 1;
+ } else if(hx < 0 && k < -60) {
+ z = 0.0; /* 0 > |y|/x > -2**-60 */
+ } else {
+ z = npy_atan(npy_fabs(y/x)); /* safe to do y/x */
+ }
+
+ switch (m) {
+ case 0: return z ; /* atan(+,+) */
+ case 1: return -z ; /* atan(-,+) */
+ case 2: return NPY_PI - (z - NPY_DBL_EPSILON);/* atan(+,-) */
+ default: /* case 3 */
+ return (z - NPY_DBL_EPSILON) - NPY_PI;/* atan(-,-) */
+ }
+}
+
+#endif
+
#ifndef HAVE_HYPOT
-static double hypot(double x, double y)
+double npy_hypot(double x, double y)
{
double yx;
- x = fabs(x);
- y = fabs(y);
+ /* Handle the case where x or y is a NaN */
+ if (npy_isnan(x * y)) {
+ if (npy_isinf(x) || npy_isinf(y)) {
+ return NPY_INFINITY;
+ } else {
+ return NPY_NAN;
+ }
+ }
+
+ x = npy_fabs(x);
+ y = npy_fabs(y);
if (x < y) {
double temp = x;
x = y;
y = temp;
}
- if (x == 0.)
+ if (x == 0.) {
return 0.;
+ }
else {
yx = y/x;
- return x*sqrt(1.+yx*yx);
+ return x*npy_sqrt(1.+yx*yx);
}
}
#endif
#ifndef HAVE_ACOSH
-static double acosh(double x)
+double npy_acosh(double x)
{
- return 2*log(sqrt((x+1.0)/2)+sqrt((x-1.0)/2));
+ return 2*npy_log(npy_sqrt((x+1.0)/2)+npy_sqrt((x-1.0)/2));
}
#endif
#ifndef HAVE_ASINH
-static double asinh(double xx)
+double npy_asinh(double xx)
{
double x, d;
int sign;
@@ -125,37 +236,37 @@ static double asinh(double xx)
if (x > 1e8) {
d = x;
} else {
- d = sqrt(x*x + 1);
+ d = npy_sqrt(x*x + 1);
}
- return sign*log1p(x*(1.0 + x/(d+1)));
+ return sign*npy_log1p(x*(1.0 + x/(d+1)));
}
#endif
#ifndef HAVE_ATANH
-static double atanh(double x)
+double npy_atanh(double x)
{
if (x > 0) {
- return -0.5*log1p(-2.0*x/(1.0 + x));
+ return -0.5*npy_log1p(-2.0*x/(1.0 + x));
}
else {
- return 0.5*log1p(2.0*x/(1.0 - x));
+ return 0.5*npy_log1p(2.0*x/(1.0 - x));
}
}
#endif
#ifndef HAVE_RINT
-static double rint(double x)
+double npy_rint(double x)
{
double y, r;
- y = floor(x);
+ y = npy_floor(x);
r = x - y;
if (r > 0.5) goto rndup;
/* Round to nearest even */
if (r==0.5) {
- r = y - 2.0*floor(0.5*y);
+ r = y - 2.0*npy_floor(0.5*y);
if (r==1.0) {
rndup:
y+=1.0;
@@ -166,30 +277,41 @@ static double rint(double x)
#endif
#ifndef HAVE_TRUNC
-static double trunc(double x)
+double npy_trunc(double x)
{
- return x < 0 ? ceil(x) : floor(x);
+ return x < 0 ? npy_ceil(x) : npy_floor(x);
}
#endif
#ifndef HAVE_EXP2
#define LOG2 0.69314718055994530943
-static double exp2(double x)
+double npy_exp2(double x)
{
- return exp(LOG2*x);
+ return npy_exp(LOG2*x);
}
#undef LOG2
#endif
#ifndef HAVE_LOG2
#define INVLOG2 1.4426950408889634074
-static double log2(double x)
+double npy_log2(double x)
{
- return INVLOG2*log(x);
+ return INVLOG2*npy_log(x);
}
#undef INVLOG2
#endif
+#ifndef HAVE_COPYSIGN
+double npy_copysign(double x, double y)
+{
+ npy_uint32 hx,hy;
+ GET_HIGH_WORD(hx,x);
+ GET_HIGH_WORD(hy,y);
+ SET_HIGH_WORD(x,(hx&0x7fffffff)|(hy&0x80000000));
+ return x;
+}
+#endif
+
/*
*****************************************************************************
** IEEE 754 FPU HANDLING **
@@ -247,25 +369,25 @@ int _npy_signbit_ld (long double x)
#undef @kind@@c@
#endif
#ifndef HAVE_@KIND@@C@
-static @type@ @kind@@c@(@type@ x)
+@type@ npy_@kind@@c@(@type@ x)
{
- return (@type@) @kind@((double)x);
+ return (@type@) npy_@kind@((double)x);
}
#endif
/**end repeat1**/
/**begin repeat1
- * #kind = atan2,hypot,pow,fmod#
- * #KIND = ATAN2,HYPOT,POW,FMOD#
+ * #kind = atan2,hypot,pow,fmod,copysign#
+ * #KIND = ATAN2,HYPOT,POW,FMOD,COPYSIGN#
*/
#ifdef @kind@@c@
#undef @kind@@c@
#endif
#ifndef HAVE_@KIND@@C@
-static @type@ @kind@@c@(@type@ x, @type@ y)
+@type@ npy_@kind@@c@(@type@ x, @type@ y)
{
- return (@type@) @kind@((double)x, (double) y);
+ return (@type@) npy_@kind@((double)x, (double) y);
}
#endif
/**end repeat1**/
@@ -274,10 +396,10 @@ static @type@ @kind@@c@(@type@ x, @type@ y)
#undef modf@c@
#endif
#ifndef HAVE_MODF@C@
-static @type@ modf@c@(@type@ x, @type@ *iptr)
+@type@ npy_modf@c@(@type@ x, @type@ *iptr)
{
double niptr;
- double y = modf((double)x, &niptr);
+ double y = npy_modf((double)x, &niptr);
*iptr = (@type@) niptr;
return (@type@) y;
}
@@ -285,27 +407,6 @@ static @type@ modf@c@(@type@ x, @type@ *iptr)
/**end repeat**/
-/*
- * Useful constants in three precisions:
- * XXX: those should really be in the header
- */
-
-/**begin repeat
- * #c = f, ,l#
- * #C = F, ,L#
- */
-#define NPY_E@c@ 2.7182818284590452353602874713526625@C@ /* e */
-#define NPY_LOG2E@c@ 1.4426950408889634073599246810018921@C@ /* log_2 e */
-#define NPY_LOG10E@c@ 0.4342944819032518276511289189166051@C@ /* log_10 e */
-#define NPY_LOGE2@c@ 0.6931471805599453094172321214581766@C@ /* log_e 2 */
-#define NPY_LOGE10@c@ 2.3025850929940456840179914546843642@C@ /* log_e 10 */
-#define NPY_PI@c@ 3.1415926535897932384626433832795029@C@ /* pi */
-#define NPY_PI_2@c@ 1.5707963267948966192313216916397514@C@ /* pi/2 */
-#define NPY_PI_4@c@ 0.7853981633974483096156608458198757@C@ /* pi/4 */
-#define NPY_1_PI@c@ 0.3183098861837906715377675267450287@C@ /* 1/pi */
-#define NPY_2_PI@c@ 0.6366197723675813430755350534900574@C@ /* 2/pi */
-/**end repeat**/
-
/*
* Non standard functions
*/
@@ -321,17 +422,17 @@ static @type@ modf@c@(@type@ x, @type@ *iptr)
#define RAD2DEG (180.0@c@/NPY_PI@c@)
#define DEG2RAD (NPY_PI@c@/180.0@c@)
-static @type@ rad2deg@c@(@type@ x)
+@type@ npy_rad2deg@c@(@type@ x)
{
return x*RAD2DEG;
}
-static @type@ deg2rad@c@(@type@ x)
+@type@ npy_deg2rad@c@(@type@ x)
{
return x*DEG2RAD;
}
-static @type@ log2_1p@c@(@type@ x)
+@type@ npy_log2_1p@c@(@type@ x)
{
@type@ u = 1 + x;
if (u == 1) {
@@ -341,9 +442,9 @@ static @type@ log2_1p@c@(@type@ x)
}
}
-static @type@ exp2_1m@c@(@type@ x)
+@type@ npy_exp2_1m@c@(@type@ x)
{
- @type@ u = exp@c@(x);
+ @type@ u = npy_exp@c@(x);
if (u == 1.0) {
return LOGE2*x;
} else if (u - 1 == -1) {
@@ -353,31 +454,36 @@ static @type@ exp2_1m@c@(@type@ x)
}
}
-static @type@ logaddexp@c@(@type@ x, @type@ y)
+@type@ npy_logaddexp@c@(@type@ x, @type@ y)
{
const @type@ tmp = x - y;
if (tmp > 0) {
return x + npy_log1p@c@(npy_exp@c@(-tmp));
}
- else {
+ else if (tmp <= 0) {
return y + npy_log1p@c@(npy_exp@c@(tmp));
}
+ else {
+ /* NaNs, or infinities of the same sign involved */
+ return x + y;
+ }
}
-static @type@ logaddexp2@c@(@type@ x, @type@ y)
+@type@ npy_logaddexp2@c@(@type@ x, @type@ y)
{
const @type@ tmp = x - y;
if (tmp > 0) {
- return x + log2_1p@c@(npy_exp2@c@(-tmp));
+ return x + npy_log2_1p@c@(npy_exp2@c@(-tmp));
+ }
+ else if (tmp <= 0) {
+ return y + npy_log2_1p@c@(npy_exp2@c@(tmp));
}
else {
- return y + log2_1p@c@(npy_exp2@c@(tmp));
+ /* NaNs, or infinities of the same sign involved */
+ return x + y;
}
}
-#define degrees@c@ rad2deg@c@
-#define radians@c@ deg2rad@c@
-
#undef LOGE2
#undef LOG2E
#undef RAD2DEG
@@ -386,38 +492,46 @@ static @type@ logaddexp2@c@(@type@ x, @type@ y)
/**end repeat**/
/*
- * Decorate all the functions: those are the public ones
+ * Decorate all the math functions which are available on the current platform
*/
/**begin repeat
* #type = npy_longdouble,double,float#
* #c = l,,f#
+ * #C = L,,F#
*/
/**begin repeat1
* #kind = sin,cos,tan,sinh,cosh,tanh,fabs,floor,ceil,rint,trunc,sqrt,log10,
- * log,exp,expm1,asin,acos,atan,asinh,acosh,atanh,log1p,exp2,log2,
- * rad2deg,deg2rad,exp2_1m#
+ * log,exp,expm1,asin,acos,atan,asinh,acosh,atanh,log1p,exp2,log2#
+ * #KIND = SIN,COS,TAN,SINH,COSH,TANH,FABS,FLOOR,CEIL,RINT,TRUNC,SQRT,LOG10,
+ * LOG,EXP,EXPM1,ASIN,ACOS,ATAN,ASINH,ACOSH,ATANH,LOG1P,EXP2,LOG2#
*/
-
+#ifdef HAVE_@KIND@@C@
@type@ npy_@kind@@c@(@type@ x)
{
return @kind@@c@(x);
}
+#endif
/**end repeat1**/
/**begin repeat1
- * #kind = atan2,hypot,pow,fmod,logaddexp,logaddexp2#
+ * #kind = atan2,hypot,pow,fmod,copysign#
+ * #KIND = ATAN2,HYPOT,POW,FMOD,COPYSIGN#
*/
+#ifdef HAVE_@KIND@@C@
@type@ npy_@kind@@c@(@type@ x, @type@ y)
{
return @kind@@c@(x, y);
}
+#endif
/**end repeat1**/
+#ifdef HAVE_MODF@C@
@type@ npy_modf@c@(@type@ x, @type@ *iptr)
{
return modf@c@(x, iptr);
}
+#endif
/**end repeat**/
diff --git a/numpy/core/src/npymath/npy_math_private.h b/numpy/core/src/npymath/npy_math_private.h
new file mode 100644
index 000000000..ea7c47fe8
--- /dev/null
+++ b/numpy/core/src/npymath/npy_math_private.h
@@ -0,0 +1,121 @@
+/*
+ *
+ * ====================================================
+ * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+ *
+ * Developed at SunPro, a Sun Microsystems, Inc. business.
+ * Permission to use, copy, modify, and distribute this
+ * software is freely granted, provided that this notice
+ * is preserved.
+ * ====================================================
+ */
+
+/*
+ * from: @(#)fdlibm.h 5.1 93/09/24
+ * $FreeBSD$
+ */
+
+#ifndef _NPY_MATH_PRIVATE_H_
+#define _NPY_MATH_PRIVATE_H_
+
+#include <numpy/npy_endian.h>
+
+/*
+ * The original fdlibm code used statements like:
+ * n0 = ((*(int*)&one)>>29)^1; * index of high word *
+ * ix0 = *(n0+(int*)&x); * high word of x *
+ * ix1 = *((1-n0)+(int*)&x); * low word of x *
+ * to dig two 32 bit words out of the 64 bit IEEE floating point
+ * value. That is non-ANSI, and, moreover, the gcc instruction
+ * scheduler gets it wrong. We instead use the following macros.
+ * Unlike the original code, we determine the endianness at compile
+ * time, not at run time; I don't see much benefit to selecting
+ * endianness at run time.
+ */
+
+/*
+ * A union which permits us to convert between a double and two 32 bit
+ * ints.
+ */
+
+/* XXX: not really, but we already make this assumption elsewhere. Will have to
+ * fix this at some point */
+#define IEEE_WORD_ORDER NPY_BYTE_ORDER
+
+#if IEEE_WORD_ORDER == NPY_BIG_ENDIAN
+
+typedef union
+{
+ double value;
+ struct
+ {
+ npy_uint32 msw;
+ npy_uint32 lsw;
+ } parts;
+} ieee_double_shape_type;
+
+#endif
+
+#if IEEE_WORD_ORDER == NPY_LITTLE_ENDIAN
+
+typedef union
+{
+ double value;
+ struct
+ {
+ npy_uint32 lsw;
+ npy_uint32 msw;
+ } parts;
+} ieee_double_shape_type;
+
+#endif
+
+/* Get two 32 bit ints from a double. */
+
+#define EXTRACT_WORDS(ix0,ix1,d) \
+do { \
+ ieee_double_shape_type ew_u; \
+ ew_u.value = (d); \
+ (ix0) = ew_u.parts.msw; \
+ (ix1) = ew_u.parts.lsw; \
+} while (0)
+
+/* Get the more significant 32 bit int from a double. */
+
+#define GET_HIGH_WORD(i,d) \
+do { \
+ ieee_double_shape_type gh_u; \
+ gh_u.value = (d); \
+ (i) = gh_u.parts.msw; \
+} while (0)
+
+/* Get the less significant 32 bit int from a double. */
+
+#define GET_LOW_WORD(i,d) \
+do { \
+ ieee_double_shape_type gl_u; \
+ gl_u.value = (d); \
+ (i) = gl_u.parts.lsw; \
+} while (0)
+
+/* Set the more significant 32 bits of a double from an int. */
+
+#define SET_HIGH_WORD(d,v) \
+do { \
+ ieee_double_shape_type sh_u; \
+ sh_u.value = (d); \
+ sh_u.parts.msw = (v); \
+ (d) = sh_u.value; \
+} while (0)
+
+/* Set the less significant 32 bits of a double from an int. */
+
+#define SET_LOW_WORD(d,v) \
+do { \
+ ieee_double_shape_type sl_u; \
+ sl_u.value = (d); \
+ sl_u.parts.lsw = (v); \
+ (d) = sl_u.value; \
+} while (0)
+
+#endif /* !_NPY_MATH_PRIVATE_H_ */
diff --git a/numpy/core/src/py3k_notes.txt b/numpy/core/src/py3k_notes.txt
new file mode 100644
index 000000000..e31755012
--- /dev/null
+++ b/numpy/core/src/py3k_notes.txt
@@ -0,0 +1,197 @@
+Notes on making the transition to python 3.x
+============================================
+
+PyTypeObject
+------------
+
+The PyTypeObject of py3k is binary compatible with the py2k version and the
+old initializers should work. However, there are several considerations to
+keep in mind.
+
+1) Because the first three slots are now part of a struct some compilers issue
+warnings if they are initialized in the old way.
+
+2) The compare slot has been made reserved in order to preserve binary
+compatibily while the tp_compare function went away. The tp_richcompare
+function has replaced it and we need to use that slot instead. This will
+likely require modifications in the searchsorted functions and generic sorts
+that currently use the compare function.
+
+3) The previous numpy practice of initializing the COUNT_ALLOCS slots was
+bogus. They are not supposed to be explicitly initialized and were out of
+place in any case because an extra base slot was added in python 2.6.
+
+Because of these facts it was thought better to use #ifdefs to bring the old
+initializers up to py3k snuff rather than just fill the tp_richcompare slot.
+They also serve to mark the places where changes have been made. The new form
+is shown below. Note that explicit initialization can stop once none of the
+remaining entries are non-zero, because zero is the default value that
+variables with non-local linkage receive.
+
+
+NPY_NO_EXPORT PyTypeObject Foo_Type = {
+#if defined(NPY_PY3K)
+ PyVarObject_HEAD_INIT(0,0)
+#else
+ PyObject_HEAD_INIT(0)
+ 0, /* ob_size */
+#endif
+ "numpy.foo" /* tp_name */
+ 0, /* tp_basicsize */
+ 0, /* tp_itemsize */
+ /* methods */
+ 0, /* tp_dealloc */
+ 0, /* tp_print */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+#if defined(NPY_PY3K)
+ (void *)0, /* tp_reserved */
+#else
+ 0, /* tp_compare */
+#endif
+ 0, /* tp_repr */
+ 0, /* tp_as_number */
+ 0, /* tp_as_sequence */
+ 0, /* tp_as_mapping */
+ 0, /* tp_hash */
+ 0, /* tp_call */
+ 0, /* tp_str */
+ 0, /* tp_getattro */
+ 0, /* tp_setattro */
+ 0, /* tp_as_buffer */
+ 0, /* tp_flags */
+ 0, /* tp_doc */
+ 0, /* tp_traverse */
+ 0, /* tp_clear */
+ 0, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ 0, /* tp_iter */
+ 0, /* tp_iternext */
+ 0, /* tp_methods */
+ 0, /* tp_members */
+ 0, /* tp_getset */
+ 0, /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ 0, /* tp_init */
+ 0, /* tp_alloc */
+ 0, /* tp_new */
+ 0, /* tp_free */
+ 0, /* tp_is_gc */
+ 0, /* tp_bases */
+ 0, /* tp_mro */
+ 0, /* tp_cache */
+ 0, /* tp_subclasses */
+ 0, /* tp_weaklist */
+ 0, /* tp_del */
+ 0 /* tp_version_tag (2.6) */
+};
+
+checklist of types having tp_compare but no tp_richcompare
+
+1) multiarray/flagsobject.c
+
+PyNumberMethods
+---------------
+
+Types with tp_as_number defined
+
+1) multiarray/arrayobject.c
+
+The PyNumberMethods struct has changed enough that it looks easiest to just
+have an alternate version. Note that np_divide, np_long, np_oct, np_hex, and
+np_inplace_divide have gone away. The slot np_int is what np_long used to be,
+tp_divide is now tp_floor_divide, and np_inplace_divide is now
+np_inplace_floor_divide. We will also have to make sure the *_true_divide
+variants are defined. This should also be done for python < 3.x, but that
+introduces a requirement for the Py_TPFLAGS_HAVE_CLASS in the type flag.
+
+/*
+ * Number implementations must check *both* arguments for proper type and
+ * implement the necessary conversions in the slot functions themselves.
+*/
+PyNumberMethods foo_number_methods = {
+ (binaryfunc)0, /* nb_add */
+ (binaryfunc)0, /* nb_subtract */
+ (binaryfunc)0, /* nb_multiply */
+ (binaryfunc)0, /* nb_remainder */
+ (binaryfunc)0, /* nb_divmod */
+ (ternaryfunc)0, /* nb_power */
+ (unaryfunc)0, /* nb_negative */
+ (unaryfunc)0, /* nb_positive */
+ (unaryfunc)0, /* nb_absolute */
+ (inquiry)0, /* nb_bool, nee nb_nonzero */
+ (unaryfunc)0, /* nb_invert */
+ (binaryfunc)0, /* nb_lshift */
+ (binaryfunc)0, /* nb_rshift */
+ (binaryfunc)0, /* nb_and */
+ (binaryfunc)0, /* nb_xor */
+ (binaryfunc)0, /* nb_or */
+ (unaryfunc)0, /* nb_int */
+ (void *)0, /* nb_reserved, nee nb_long */
+ (unaryfunc)0, /* nb_float */
+ (binaryfunc)0, /* nb_inplace_add */
+ (binaryfunc)0, /* nb_inplace_subtract */
+ (binaryfunc)0, /* nb_inplace_multiply */
+ (binaryfunc)0, /* nb_inplace_remainder */
+ (ternaryfunc)0, /* nb_inplace_power */
+ (binaryfunc)0, /* nb_inplace_lshift */
+ (binaryfunc)0, /* nb_inplace_rshift */
+ (binaryfunc)0, /* nb_inplace_and */
+ (binaryfunc)0, /* nb_inplace_xor */
+ (binaryfunc)0, /* nb_inplace_or */
+ (binaryfunc)0, /* nb_floor_divide */
+ (binaryfunc)0, /* nb_true_divide */
+ (binaryfunc)0, /* nb_inplace_floor_divide */
+ (binaryfunc)0, /* nb_inplace_true_divide */
+ (unaryfunc)0 /* nb_index */
+};
+
+PySequenceMethods
+-----------------
+
+Types with tp_as_sequence defined
+
+1) multiarray/descriptor.c
+2) multiarray/scalartypes.c.src
+3) multiarray/arrayobject.c
+
+PySequenceMethods in py3k are binary compatible with py2k, but some of the
+slots have gone away. I suspect this means some functions need redefining so
+the semantics of the slots needs to be checked.
+
+PySequenceMethods foo_sequence_methods = {
+ (lenfunc)0, /* sq_length */
+ (binaryfunc)0, /* sq_concat */
+ (ssizeargfunc)0, /* sq_repeat */
+ (ssizeargfunc)0, /* sq_item */
+ (void *)0, /* nee sq_slice */
+ (ssizeobjargproc)0, /* sq_ass_item */
+ (void *)0, /* nee sq_ass_slice */
+ (objobjproc)0, /* sq_contains */
+ (binaryfunc)0, /* sq_inplace_concat */
+ (ssizeargfunc)0 /* sq_inplace_repeat */
+};
+
+PyMappingMethods
+----------------
+
+Types with tp_as_mapping defined
+
+1) multiarray/descriptor.c
+2) multiarray/iterators.c
+3) multiarray/scalartypes.c.src
+4) multiarray/flagsobject.c
+5) multiarray/arrayobject.c
+
+PyMappingMethods in py3k look to be the same as in py2k. The semantics
+of the slots needs to be checked.
+
+PyMappingMethods foo_mapping_methods = {
+ (lenfunc)0, /* mp_length */
+ (binaryfunc)0, /* mp_subscript */
+ (objobjargproc)0 /* mp_ass_subscript */
+};
+
diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src
index 25e15bca1..10cfa8716 100644
--- a/numpy/core/src/umath/loops.c.src
+++ b/numpy/core/src/umath/loops.c.src
@@ -1044,6 +1044,16 @@ NPY_NO_EXPORT void
}
/**end repeat1**/
+NPY_NO_EXPORT void
+@TYPE@_copysign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func))
+{
+ BINARY_LOOP {
+ const @type@ in1 = *(@type@ *)ip1;
+ const @type@ in2 = *(@type@ *)ip2;
+ *((@type@ *)op1)= npy_copysign@c@(in1, in2);
+ }
+}
+
/**begin repeat1
* #kind = maximum, minimum#
* #OP = >=, <=#
@@ -1265,9 +1275,18 @@ C@TYPE@_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func
const @type@ in1i = ((@type@ *)ip1)[1];
const @type@ in2r = ((@type@ *)ip2)[0];
const @type@ in2i = ((@type@ *)ip2)[1];
- @type@ d = in2r*in2r + in2i*in2i;
- ((@type@ *)op1)[0] = (in1r*in2r + in1i*in2i)/d;
- ((@type@ *)op1)[1] = (in1i*in2r - in1r*in2i)/d;
+ if (npy_fabs@c@(in2r) >= npy_fabs@c@(in2i)) {
+ const @type@ rat = in2i/in2r;
+ const @type@ scl = 1.0@c@/(in2r + in2i*rat);
+ ((@type@ *)op1)[0] = (in1r + in1i*rat)*scl;
+ ((@type@ *)op1)[1] = (in1i - in1r*rat)*scl;
+ }
+ else {
+ const @type@ rat = in2r/in2i;
+ const @type@ scl = 1.0@c@/(in2i + in2r*rat);
+ ((@type@ *)op1)[0] = (in1r*rat + in1i)*scl;
+ ((@type@ *)op1)[1] = (in1i*rat - in1r)*scl;
+ }
}
}
@@ -1279,9 +1298,16 @@ C@TYPE@_floor_divide(char **args, intp *dimensions, intp *steps, void *NPY_UNUSE
const @type@ in1i = ((@type@ *)ip1)[1];
const @type@ in2r = ((@type@ *)ip2)[0];
const @type@ in2i = ((@type@ *)ip2)[1];
- @type@ d = in2r*in2r + in2i*in2i;
- ((@type@ *)op1)[0] = npy_floor@c@((in1r*in2r + in1i*in2i)/d);
- ((@type@ *)op1)[1] = 0;
+ if (npy_fabs@c@(in2r) >= npy_fabs@c@(in2i)) {
+ const @type@ rat = in2i/in2r;
+ ((@type@ *)op1)[0] = npy_floor@c@((in1r + in1i*rat)/(in2r + in2i*rat));
+ ((@type@ *)op1)[1] = 0;
+ }
+ else {
+ const @type@ rat = in2r/in2i;
+ ((@type@ *)op1)[0] = npy_floor@c@((in1r*rat + in1i)/(in2i + in2r*rat));
+ ((@type@ *)op1)[1] = 0;
+ }
}
}
diff --git a/numpy/core/src/umath/loops.h b/numpy/core/src/umath/loops.h
index 9de4c5893..bf33ea88c 100644
--- a/numpy/core/src/umath/loops.h
+++ b/numpy/core/src/umath/loops.h
@@ -1527,6 +1527,9 @@ NPY_NO_EXPORT void
FLOAT_signbit(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func));
+NPY_NO_EXPORT void
+FLOAT_copysign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func));
+
NPY_NO_EXPORT void
FLOAT_maximum(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func));
@@ -1668,6 +1671,9 @@ NPY_NO_EXPORT void
DOUBLE_signbit(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func));
+NPY_NO_EXPORT void
+DOUBLE_copysign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func));
+
NPY_NO_EXPORT void
DOUBLE_maximum(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func));
@@ -1809,6 +1815,9 @@ NPY_NO_EXPORT void
LONGDOUBLE_signbit(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func));
+NPY_NO_EXPORT void
+LONGDOUBLE_copysign(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func));
+
NPY_NO_EXPORT void
LONGDOUBLE_maximum(char **args, intp *dimensions, intp *steps, void *NPY_UNUSED(func));
diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c
index 97fcc8124..94152ccb7 100644
--- a/numpy/core/src/umath/ufunc_object.c
+++ b/numpy/core/src/umath/ufunc_object.c
@@ -239,6 +239,122 @@ static char *_types_msg = "function not supported for these types, " \
"and can't coerce safely to supported types";
/*
+ * This function analyzes the input arguments
+ * and determines an appropriate __array_prepare__ function to call
+ * for the outputs.
+ *
+ * If an output argument is provided, then it is wrapped
+ * with its own __array_prepare__ not with the one determined by
+ * the input arguments.
+ *
+ * if the provided output argument is already an ndarray,
+ * the wrapping function is None (which means no wrapping will
+ * be done --- not even PyArray_Return).
+ *
+ * A NULL is placed in output_wrap for outputs that
+ * should just have PyArray_Return called.
+ */
+static void
+_find_array_prepare(PyObject *args, PyObject **output_wrap, int nin, int nout)
+{
+ Py_ssize_t nargs;
+ int i;
+ int np = 0;
+ PyObject *with_wrap[NPY_MAXARGS], *wraps[NPY_MAXARGS];
+ PyObject *obj, *wrap = NULL;
+
+ nargs = PyTuple_GET_SIZE(args);
+ for (i = 0; i < nin; i++) {
+ obj = PyTuple_GET_ITEM(args, i);
+ if (PyArray_CheckExact(obj) || PyArray_IsAnyScalar(obj)) {
+ continue;
+ }
+ wrap = PyObject_GetAttrString(obj, "__array_prepare__");
+ if (wrap) {
+ if (PyCallable_Check(wrap)) {
+ with_wrap[np] = obj;
+ wraps[np] = wrap;
+ ++np;
+ }
+ else {
+ Py_DECREF(wrap);
+ wrap = NULL;
+ }
+ }
+ else {
+ PyErr_Clear();
+ }
+ }
+ if (np > 0) {
+ /* If we have some wraps defined, find the one of highest priority */
+ wrap = wraps[0];
+ if (np > 1) {
+ double maxpriority = PyArray_GetPriority(with_wrap[0],
+ PyArray_SUBTYPE_PRIORITY);
+ for (i = 1; i < np; ++i) {
+ double priority = PyArray_GetPriority(with_wrap[i],
+ PyArray_SUBTYPE_PRIORITY);
+ if (priority > maxpriority) {
+ maxpriority = priority;
+ Py_DECREF(wrap);
+ wrap = wraps[i];
+ }
+ else {
+ Py_DECREF(wraps[i]);
+ }
+ }
+ }
+ }
+
+ /*
+ * Here wrap is the wrapping function determined from the
+ * input arrays (could be NULL).
+ *
+ * For all the output arrays decide what to do.
+ *
+ * 1) Use the wrap function determined from the input arrays
+ * This is the default if the output array is not
+ * passed in.
+ *
+ * 2) Use the __array_prepare__ method of the output object.
+ * This is special cased for
+ * exact ndarray so that no PyArray_Return is
+ * done in that case.
+ */
+ for (i = 0; i < nout; i++) {
+ int j = nin + i;
+ int incref = 1;
+ output_wrap[i] = wrap;
+ if (j < nargs) {
+ obj = PyTuple_GET_ITEM(args, j);
+ if (obj == Py_None) {
+ continue;
+ }
+ if (PyArray_CheckExact(obj)) {
+ output_wrap[i] = Py_None;
+ }
+ else {
+ PyObject *owrap = PyObject_GetAttrString(obj,
+ "__array_prepare__");
+ incref = 0;
+ if (!(owrap) || !(PyCallable_Check(owrap))) {
+ Py_XDECREF(owrap);
+ owrap = wrap;
+ incref = 1;
+ PyErr_Clear();
+ }
+ output_wrap[i] = owrap;
+ }
+ }
+ if (incref) {
+ Py_XINCREF(output_wrap[i]);
+ }
+ }
+ Py_XDECREF(wrap);
+ return;
+}
+
+/*
* Called for non-NULL user-defined functions.
* The object should be a CObject pointing to a linked-list of functions
* storing the function, data, and signature of all user-defined functions.
@@ -1059,6 +1175,7 @@ construct_arrays(PyUFuncLoopObject *loop, PyObject *args, PyArrayObject **mps,
npy_intp temp_dims[NPY_MAXDIMS];
npy_intp *out_dims;
int out_nd;
+ PyObject *wraparr[NPY_MAXARGS];
/* Check number of arguments */
nargs = PyTuple_Size(args);
@@ -1337,16 +1454,60 @@ construct_arrays(PyUFuncLoopObject *loop, PyObject *args, PyArrayObject **mps,
return -1;
}
- /* Recover mps[i]. */
- if (self->core_enabled) {
- PyArrayObject *ao = mps[i];
- mps[i] = (PyArrayObject *)mps[i]->base;
- Py_DECREF(ao);
- }
+ /* Recover mps[i]. */
+ if (self->core_enabled) {
+ PyArrayObject *ao = mps[i];
+ mps[i] = (PyArrayObject *)mps[i]->base;
+ Py_DECREF(ao);
+ }
}
/*
+ * Use __array_prepare__ on all outputs
+ * if present on one of the input arguments.
+ * If present for multiple inputs:
+ * use __array_prepare__ of input object with largest
+ * __array_priority__ (default = 0.0)
+ *
+ * Exception: we should not wrap outputs for items already
+ * passed in as output-arguments. These items should either
+ * be left unwrapped or wrapped by calling their own __array_prepare__
+ * routine.
+ *
+ * For each output argument, wrap will be either
+ * NULL --- call PyArray_Return() -- default if no output arguments given
+ * None --- array-object passed in don't call PyArray_Return
+ * method --- the __array_prepare__ method to call.
+ */
+ _find_array_prepare(args, wraparr, loop->ufunc->nin, loop->ufunc->nout);
+
+ /* wrap outputs */
+ for (i = 0; i < loop->ufunc->nout; i++) {
+ int j = loop->ufunc->nin+i;
+ PyObject *wrap;
+ wrap = wraparr[i];
+ if (wrap != NULL) {
+ if (wrap == Py_None) {
+ Py_DECREF(wrap);
+ continue;
+ }
+ PyObject *res = PyObject_CallFunction(wrap, "O(OOi)",
+ mps[j], loop->ufunc, args, i);
+ Py_DECREF(wrap);
+ if ((res == NULL) || (res == Py_None)) {
+ if (!PyErr_Occurred()){
+ PyErr_SetString(PyExc_TypeError,
+ "__array_prepare__ must return an ndarray or subclass thereof");
+ }
+ return -1;
+ }
+ Py_DECREF(mps[j]);
+ mps[j] = (PyArrayObject *)res;
+ }
+ }
+
+ /*
* If any of different type, or misaligned or swapped
* then must use buffers
*/
@@ -3827,7 +3988,10 @@ ufunc_repr(PyUFuncObject *self)
}
-/* -------------------------------------------------------- */
+/******************************************************************************
+ *** UFUNC METHODS ***
+ *****************************************************************************/
+
/*
* op.outer(a,b) is equivalent to op(a[:,NewAxis,NewAxis,etc.],b)
@@ -3962,6 +4126,10 @@ static struct PyMethodDef ufunc_methods[] = {
};
+/******************************************************************************
+ *** UFUNC GETSET ***
+ *****************************************************************************/
+
/* construct the string y1,y2,...,yn */
static PyObject *
@@ -4000,7 +4168,8 @@ _typecharfromnum(int num) {
static PyObject *
ufunc_get_doc(PyUFuncObject *self)
{
- /* Put docstring first or FindMethod finds it... could so some
+ /*
+ * Put docstring first or FindMethod finds it... could so some
* introspection on name and nin + nout to automate the first part
* of it the doc string shouldn't need the calling convention
* construct name(x1, x2, ...,[ out1, out2, ...]) __doc__
@@ -4148,65 +4317,68 @@ static PyGetSetDef ufunc_getset[] = {
{NULL, NULL, NULL, NULL, NULL}, /* Sentinel */
};
+
+/******************************************************************************
+ *** UFUNC TYPE OBJECT ***
+ *****************************************************************************/
+
NPY_NO_EXPORT PyTypeObject PyUFunc_Type = {
- PyObject_HEAD_INIT(0)
- 0, /* ob_size */
- "numpy.ufunc", /* tp_name */
- sizeof(PyUFuncObject), /* tp_basicsize */
- 0, /* tp_itemsize */
+#if defined(NPY_PY3K)
+ PyVarObject_HEAD_INIT(NULL, 0)
+#else
+ PyObject_HEAD_INIT(NULL)
+ 0, /* ob_size */
+#endif
+ "numpy.ufunc", /* tp_name */
+ sizeof(PyUFuncObject), /* tp_basicsize */
+ 0, /* tp_itemsize */
/* methods */
- (destructor)ufunc_dealloc, /* tp_dealloc */
- (printfunc)0, /* tp_print */
- (getattrfunc)0, /* tp_getattr */
- (setattrfunc)0, /* tp_setattr */
- (cmpfunc)0, /* tp_compare */
- (reprfunc)ufunc_repr, /* tp_repr */
- 0, /* tp_as_number */
- 0, /* tp_as_sequence */
- 0, /* tp_as_mapping */
- (hashfunc)0, /* tp_hash */
- (ternaryfunc)ufunc_generic_call, /* tp_call */
- (reprfunc)ufunc_repr, /* tp_str */
- 0, /* tp_getattro */
- 0, /* tp_setattro */
- 0, /* tp_as_buffer */
- Py_TPFLAGS_DEFAULT, /* tp_flags */
- NULL, /* tp_doc */ /* was Ufunctype__doc__ */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- ufunc_methods, /* tp_methods */
- 0, /* tp_members */
- ufunc_getset, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- 0, /* tp_init */
- 0, /* tp_alloc */
- 0, /* tp_new */
- 0, /* tp_free */
- 0, /* tp_is_gc */
- 0, /* tp_bases */
- 0, /* tp_mro */
- 0, /* tp_cache */
- 0, /* tp_subclasses */
- 0, /* tp_weaklist */
- 0, /* tp_del */
-
-#ifdef COUNT_ALLOCS
- /* these must be last and never explicitly initialized */
- 0, /* tp_allocs */
- 0, /* tp_frees */
- 0, /* tp_maxalloc */
- 0, /* tp_prev */
- 0, /* *tp_next */
+ (destructor)ufunc_dealloc, /* tp_dealloc */
+ 0, /* tp_print */
+ 0, /* tp_getattr */
+ 0, /* tp_setattr */
+#if defined(NPY_PY3K)
+ 0, /* tp_reserved */
+#else
+ 0, /* tp_compare */
#endif
+ (reprfunc)ufunc_repr, /* tp_repr */
+ 0, /* tp_as_number */
+ 0, /* tp_as_sequence */
+ 0, /* tp_as_mapping */
+ 0, /* tp_hash */
+ (ternaryfunc)ufunc_generic_call, /* tp_call */
+ (reprfunc)ufunc_repr, /* tp_str */
+ 0, /* tp_getattro */
+ 0, /* tp_setattro */
+ 0, /* tp_as_buffer */
+ Py_TPFLAGS_DEFAULT, /* tp_flags */
+ 0, /* tp_doc */
+ 0, /* tp_traverse */
+ 0, /* tp_clear */
+ 0, /* tp_richcompare */
+ 0, /* tp_weaklistoffset */
+ 0, /* tp_iter */
+ 0, /* tp_iternext */
+ ufunc_methods, /* tp_methods */
+ 0, /* tp_members */
+ ufunc_getset, /* tp_getset */
+ 0, /* tp_base */
+ 0, /* tp_dict */
+ 0, /* tp_descr_get */
+ 0, /* tp_descr_set */
+ 0, /* tp_dictoffset */
+ 0, /* tp_init */
+ 0, /* tp_alloc */
+ 0, /* tp_new */
+ 0, /* tp_free */
+ 0, /* tp_is_gc */
+ 0, /* tp_bases */
+ 0, /* tp_mro */
+ 0, /* tp_cache */
+ 0, /* tp_subclasses */
+ 0, /* tp_weaklist */
+ 0, /* tp_del */
};
/* End of code for ufunc objects */
-/* -------------------------------------------------------- */
diff --git a/numpy/core/tests/test_defmatrix.py b/numpy/core/tests/test_defmatrix.py
index e9f0d9a7f..40728bd29 100644
--- a/numpy/core/tests/test_defmatrix.py
+++ b/numpy/core/tests/test_defmatrix.py
@@ -1,5 +1,6 @@
from numpy.testing import *
from numpy.core import *
+from numpy.core.defmatrix import matrix_power
import numpy as np
class TestCtor(TestCase):
@@ -358,6 +359,15 @@ class TestNewScalarIndexing(TestCase):
assert_array_equal(x[:,[1,0]],x[:,::-1])
assert_array_equal(x[[2,1,0],:],x[::-1,:])
+class TestPower(TestCase):
+ def test_returntype(self):
+ a = array([[0,1],[0,0]])
+ assert type(matrix_power(a, 2)) is ndarray
+ a = mat(a)
+ assert type(matrix_power(a, 2)) is matrix
+
+ def test_list(self):
+ assert_array_equal(matrix_power([[0, 1], [0, 0]], 2), [[0, 0], [0, 0]])
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index 7022ef14d..57f1bd4c6 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -4,6 +4,7 @@ import os
import numpy as np
from numpy.testing import *
from numpy.core import *
+from numpy.core.multiarray_tests import test_neighborhood_iterator, test_neighborhood_iterator_oob
from test_print import in_foreign_locale
@@ -279,6 +280,24 @@ class TestMethods(TestCase):
self.failUnlessRaises(ValueError, lambda: a.transpose(0,1,2))
def test_sort(self):
+ # test ordering for floats and complex containing nans. It is only
+ # necessary to check the lessthan comparison, so sorts that
+ # only follow the insertion sort path are sufficient. We only
+ # test doubles and complex doubles as the logic is the same.
+
+ # check doubles
+ msg = "Test real sort order with nans"
+ a = np.array([np.nan, 1, 0])
+ b = sort(a)
+ assert_equal(b, a[::-1], msg)
+ # check complex
+ msg = "Test complex sort order with nans"
+ a = np.zeros(9, dtype=np.complex128)
+ a.real += [np.nan, np.nan, np.nan, 1, 0, 1, 1, 0, 0]
+ a.imag += [np.nan, 1, 0, np.nan, np.nan, 1, 0, 1, 0]
+ b = sort(a)
+ assert_equal(b, a[::-1], msg)
+
# all c scalar sorts use the same code with different types
# so it suffices to run a quick check with one type. The number
# of sorted items must be greater than ~50 to check the actual
@@ -466,6 +485,33 @@ class TestMethods(TestCase):
a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode)
assert_equal(a.argsort(kind='m'), r)
+ def test_searchsorted(self):
+ # test for floats and complex containing nans. The logic is the
+ # same for all float types so only test double types for now.
+ # The search sorted routines use the compare functions for the
+ # array type, so this checks if that is consistent with the sort
+ # order.
+
+ # check double
+ a = np.array([np.nan, 1, 0])
+ a = np.array([0, 1, np.nan])
+ msg = "Test real searchsorted with nans, side='l'"
+ b = a.searchsorted(a, side='l')
+ assert_equal(b, np.arange(3), msg)
+ msg = "Test real searchsorted with nans, side='r'"
+ b = a.searchsorted(a, side='r')
+ assert_equal(b, np.arange(1,4), msg)
+ # check double complex
+ a = np.zeros(9, dtype=np.complex128)
+ a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan]
+ a.imag += [0, 1, 0, 1, np.nan, np.nan, 0, 1, np.nan]
+ msg = "Test complex searchsorted with nans, side='l'"
+ b = a.searchsorted(a, side='l')
+ assert_equal(b, np.arange(9), msg)
+ msg = "Test complex searchsorted with nans, side='r'"
+ b = a.searchsorted(a, side='r')
+ assert_equal(b, np.arange(1,10), msg)
+
def test_flatten(self):
x0 = np.array([[1,2,3],[4,5,6]], np.int32)
x1 = np.array([[[1,2],[3,4]],[[5,6],[7,8]]], np.int32)
@@ -1058,6 +1104,282 @@ class TestChoose(TestCase):
A = np.choose(self.ind, (self.x, self.y2))
assert_equal(A, [[2,2,3],[2,2,3]])
-
+def can_use_decimal():
+ try:
+ from decimal import Decimal
+ return True
+ except ImportError:
+ return False
+
+# TODO: test for multidimensional
+NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4}
+class TestNeighborhoodIter(TestCase):
+ # Simple, 2d tests
+ def _test_simple2d(self, dt):
+ # Test zero and one padding for simple data type
+ x = np.array([[0, 1], [2, 3]], dtype=dt)
+ r = [np.array([[0, 0, 0], [0, 0, 1]], dtype=dt),
+ np.array([[0, 0, 0], [0, 1, 0]], dtype=dt),
+ np.array([[0, 0, 1], [0, 2, 3]], dtype=dt),
+ np.array([[0, 1, 0], [2, 3, 0]], dtype=dt)]
+ l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0], NEIGH_MODE['zero'])
+ assert_array_equal(l, r)
+
+ r = [np.array([[1, 1, 1], [1, 0, 1]], dtype=dt),
+ np.array([[1, 1, 1], [0, 1, 1]], dtype=dt),
+ np.array([[1, 0, 1], [1, 2, 3]], dtype=dt),
+ np.array([[0, 1, 1], [2, 3, 1]], dtype=dt)]
+ l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0], NEIGH_MODE['one'])
+ assert_array_equal(l, r)
+
+ r = [np.array([[4, 4, 4], [4, 0, 1]], dtype=dt),
+ np.array([[4, 4, 4], [0, 1, 4]], dtype=dt),
+ np.array([[4, 0, 1], [4, 2, 3]], dtype=dt),
+ np.array([[0, 1, 4], [2, 3, 4]], dtype=dt)]
+ l = test_neighborhood_iterator(x, [-1, 0, -1, 1], 4, NEIGH_MODE['constant'])
+ assert_array_equal(l, r)
+
+ def test_simple2d(self):
+ self._test_simple2d(np.float)
+
+ @dec.skipif(not can_use_decimal(),
+ "Skip neighborhood iterator tests for decimal objects " \
+ "(decimal module not available")
+ def test_simple2d_object(self):
+ from decimal import Decimal
+ self._test_simple2d(Decimal)
+
+ def _test_mirror2d(self, dt):
+ x = np.array([[0, 1], [2, 3]], dtype=dt)
+ r = [np.array([[0, 0, 1], [0, 0, 1]], dtype=dt),
+ np.array([[0, 1, 1], [0, 1, 1]], dtype=dt),
+ np.array([[0, 0, 1], [2, 2, 3]], dtype=dt),
+ np.array([[0, 1, 1], [2, 3, 3]], dtype=dt)]
+ l = test_neighborhood_iterator(x, [-1, 0, -1, 1], x[0], NEIGH_MODE['mirror'])
+ assert_array_equal(l, r)
+
+ def test_mirror2d(self):
+ self._test_mirror2d(np.float)
+
+ @dec.skipif(not can_use_decimal(),
+ "Skip neighborhood iterator tests for decimal objects " \
+ "(decimal module not available")
+ def test_mirror2d_object(self):
+ from decimal import Decimal
+ self._test_mirror2d(Decimal)
+
+ # Simple, 1d tests
+ def _test_simple(self, dt):
+ # Test padding with constant values
+ x = np.linspace(1, 5, 5).astype(dt)
+ r = [[0, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 0]]
+ l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['zero'])
+ assert_array_equal(l, r)
+
+ r = [[1, 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 1]]
+ l = test_neighborhood_iterator(x, [-1, 1], x[0], NEIGH_MODE['one'])
+ assert_array_equal(l, r)
+
+ r = [[x[4], 1, 2], [1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, x[4]]]
+ l = test_neighborhood_iterator(x, [-1, 1], x[4], NEIGH_MODE['constant'])
+ assert_array_equal(l, r)
+
+ def test_simple_float(self):
+ self._test_simple(np.float)
+
+ @dec.skipif(not can_use_decimal(),
+ "Skip neighborhood iterator tests for decimal objects " \
+ "(decimal module not available")
+ def test_simple_object(self):
+ from decimal import Decimal
+ self._test_simple(Decimal)
+
+ # Test mirror modes
+ def _test_mirror(self, dt):
+ x = np.linspace(1, 5, 5).astype(dt)
+ r = np.array([[2, 1, 1, 2, 3], [1, 1, 2, 3, 4], [1, 2, 3, 4, 5],
+ [2, 3, 4, 5, 5], [3, 4, 5, 5, 4]], dtype=dt)
+ l = test_neighborhood_iterator(x, [-2, 2], x[1], NEIGH_MODE['mirror'])
+ self.failUnless([i.dtype == dt for i in l])
+ assert_array_equal(l, r)
+
+ def test_mirror(self):
+ self._test_mirror(np.float)
+
+ @dec.skipif(not can_use_decimal(),
+ "Skip neighborhood iterator tests for decimal objects " \
+ "(decimal module not available")
+ def test_mirror_object(self):
+ from decimal import Decimal
+ self._test_mirror(Decimal)
+
+ # Circular mode
+ def _test_circular(self, dt):
+ x = np.linspace(1, 5, 5).astype(dt)
+ r = np.array([[4, 5, 1, 2, 3], [5, 1, 2, 3, 4], [1, 2, 3, 4, 5],
+ [2, 3, 4, 5, 1], [3, 4, 5, 1, 2]], dtype=dt)
+ l = test_neighborhood_iterator(x, [-2, 2], x[0], NEIGH_MODE['circular'])
+ assert_array_equal(l, r)
+
+ def test_circular(self):
+ self._test_circular(np.float)
+
+ @dec.skipif(not can_use_decimal(),
+ "Skip neighborhood iterator tests for decimal objects " \
+ "(decimal module not available")
+ def test_circular_object(self):
+ from decimal import Decimal
+ self._test_circular(Decimal)
+
+# Test stacking neighborhood iterators
+class TestStackedNeighborhoodIter(TestCase):
+ # Simple, 1d test: stacking 2 constant-padded neigh iterators
+ def test_simple_const(self):
+ dt = np.float64
+ # Test zero and one padding for simple data type
+ x = np.array([1, 2, 3], dtype=dt)
+ r = [np.array([0], dtype=dt),
+ np.array([0], dtype=dt),
+ np.array([1], dtype=dt),
+ np.array([2], dtype=dt),
+ np.array([3], dtype=dt),
+ np.array([0], dtype=dt),
+ np.array([0], dtype=dt)]
+ l = test_neighborhood_iterator_oob(x, [-2, 4], NEIGH_MODE['zero'],
+ [0, 0], NEIGH_MODE['zero'])
+ assert_array_equal(l, r)
+
+ r = [np.array([1, 0, 1], dtype=dt),
+ np.array([0, 1, 2], dtype=dt),
+ np.array([1, 2, 3], dtype=dt),
+ np.array([2, 3, 0], dtype=dt),
+ np.array([3, 0, 1], dtype=dt)]
+ l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
+ [-1, 1], NEIGH_MODE['one'])
+ assert_array_equal(l, r)
+
+ # 2nd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
+ # mirror padding
+ def test_simple_mirror(self):
+ dt = np.float64
+ # Stacking zero on top of mirror
+ x = np.array([1, 2, 3], dtype=dt)
+ r = [np.array([0, 1, 1], dtype=dt),
+ np.array([1, 1, 2], dtype=dt),
+ np.array([1, 2, 3], dtype=dt),
+ np.array([2, 3, 3], dtype=dt),
+ np.array([3, 3, 0], dtype=dt)]
+ l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['mirror'],
+ [-1, 1], NEIGH_MODE['zero'])
+ assert_array_equal(l, r)
+
+ # Stacking mirror on top of zero
+ x = np.array([1, 2, 3], dtype=dt)
+ r = [np.array([1, 0, 0], dtype=dt),
+ np.array([0, 0, 1], dtype=dt),
+ np.array([0, 1, 2], dtype=dt),
+ np.array([1, 2, 3], dtype=dt),
+ np.array([2, 3, 0], dtype=dt)]
+ l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
+ [-2, 0], NEIGH_MODE['mirror'])
+ assert_array_equal(l, r)
+
+ # Stacking mirror on top of zero: 2nd
+ x = np.array([1, 2, 3], dtype=dt)
+ r = [np.array([0, 1, 2], dtype=dt),
+ np.array([1, 2, 3], dtype=dt),
+ np.array([2, 3, 0], dtype=dt),
+ np.array([3, 0, 0], dtype=dt),
+ np.array([0, 0, 3], dtype=dt)]
+ l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
+ [0, 2], NEIGH_MODE['mirror'])
+ assert_array_equal(l, r)
+
+ # Stacking mirror on top of zero: 3rd
+ x = np.array([1, 2, 3], dtype=dt)
+ r = [np.array([1, 0, 0, 1, 2], dtype=dt),
+ np.array([0, 0, 1, 2, 3], dtype=dt),
+ np.array([0, 1, 2, 3, 0], dtype=dt),
+ np.array([1, 2, 3, 0, 0], dtype=dt),
+ np.array([2, 3, 0, 0, 3], dtype=dt)]
+ l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
+ [-2, 2], NEIGH_MODE['mirror'])
+ assert_array_equal(l, r)
+
+ # 3rd simple, 1d test: stacking 2 neigh iterators, mixing const padding and
+ # circular padding
+ def test_simple_circular(self):
+ dt = np.float64
+ # Stacking zero on top of mirror
+ x = np.array([1, 2, 3], dtype=dt)
+ r = [np.array([0, 3, 1], dtype=dt),
+ np.array([3, 1, 2], dtype=dt),
+ np.array([1, 2, 3], dtype=dt),
+ np.array([2, 3, 1], dtype=dt),
+ np.array([3, 1, 0], dtype=dt)]
+ l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['circular'],
+ [-1, 1], NEIGH_MODE['zero'])
+ assert_array_equal(l, r)
+
+ # Stacking mirror on top of zero
+ x = np.array([1, 2, 3], dtype=dt)
+ r = [np.array([3, 0, 0], dtype=dt),
+ np.array([0, 0, 1], dtype=dt),
+ np.array([0, 1, 2], dtype=dt),
+ np.array([1, 2, 3], dtype=dt),
+ np.array([2, 3, 0], dtype=dt)]
+ l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
+ [-2, 0], NEIGH_MODE['circular'])
+ assert_array_equal(l, r)
+
+ # Stacking mirror on top of zero: 2nd
+ x = np.array([1, 2, 3], dtype=dt)
+ r = [np.array([0, 1, 2], dtype=dt),
+ np.array([1, 2, 3], dtype=dt),
+ np.array([2, 3, 0], dtype=dt),
+ np.array([3, 0, 0], dtype=dt),
+ np.array([0, 0, 1], dtype=dt)]
+ l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
+ [0, 2], NEIGH_MODE['circular'])
+ assert_array_equal(l, r)
+
+ # Stacking mirror on top of zero: 3rd
+ x = np.array([1, 2, 3], dtype=dt)
+ r = [np.array([3, 0, 0, 1, 2], dtype=dt),
+ np.array([0, 0, 1, 2, 3], dtype=dt),
+ np.array([0, 1, 2, 3, 0], dtype=dt),
+ np.array([1, 2, 3, 0, 0], dtype=dt),
+ np.array([2, 3, 0, 0, 1], dtype=dt)]
+ l = test_neighborhood_iterator_oob(x, [-1, 3], NEIGH_MODE['zero'],
+ [-2, 2], NEIGH_MODE['circular'])
+ assert_array_equal(l, r)
+
+ # 4th simple, 1d test: stacking 2 neigh iterators, but with lower iterator
+ # being strictly within the array
+ def test_simple_strict_within(self):
+ dt = np.float64
+ # Stacking zero on top of zero, first neighborhood strictly inside the
+ # array
+ x = np.array([1, 2, 3], dtype=dt)
+ r = [np.array([1, 2, 3, 0], dtype=dt)]
+ l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
+ [-1, 2], NEIGH_MODE['zero'])
+ assert_array_equal(l, r)
+
+ # Stacking mirror on top of zero, first neighborhood strictly inside the
+ # array
+ x = np.array([1, 2, 3], dtype=dt)
+ r = [np.array([1, 2, 3, 3], dtype=dt)]
+ l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
+ [-1, 2], NEIGH_MODE['mirror'])
+ assert_array_equal(l, r)
+
+ # Stacking mirror on top of zero, first neighborhood strictly inside the
+ # array
+ x = np.array([1, 2, 3], dtype=dt)
+ r = [np.array([1, 2, 3, 1], dtype=dt)]
+ l = test_neighborhood_iterator_oob(x, [1, 1], NEIGH_MODE['zero'],
+ [-1, 2], NEIGH_MODE['circular'])
+ assert_array_equal(l, r)
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py
index 832b2893f..206c06e66 100644
--- a/numpy/core/tests/test_numeric.py
+++ b/numpy/core/tests/test_numeric.py
@@ -896,19 +896,20 @@ class _TestCorrelate(TestCase):
def test_float(self):
self._setup(np.float)
- z = np.correlate(self.x, self.y, 'full')
- assert_array_almost_equal(z, self.z1)
- z = np.correlate(self.y, self.x, 'full')
+ z = np.correlate(self.x, self.y, 'full', old_behavior=self.old_behavior)
assert_array_almost_equal(z, self.z1)
+ z = np.correlate(self.y, self.x, 'full', old_behavior=self.old_behavior)
+ assert_array_almost_equal(z, self.z2)
def test_object(self):
self._setup(Decimal)
- z = np.correlate(self.x, self.y, 'full')
- assert_array_almost_equal(z, self.z1)
- z = np.correlate(self.y, self.x, 'full')
+ z = np.correlate(self.x, self.y, 'full', old_behavior=self.old_behavior)
assert_array_almost_equal(z, self.z1)
+ z = np.correlate(self.y, self.x, 'full', old_behavior=self.old_behavior)
+ assert_array_almost_equal(z, self.z2)
class TestCorrelate(_TestCorrelate):
+ old_behavior = True
def _setup(self, dt):
# correlate uses an unconventional definition so that correlate(a, b)
# == correlate(b, a), so force the corresponding outputs to be the same
@@ -916,6 +917,7 @@ class TestCorrelate(_TestCorrelate):
_TestCorrelate._setup(self, dt)
self.z2 = self.z1
+ @dec.deprecated()
def test_complex(self):
x = np.array([1, 2, 3, 4+1j], dtype=np.complex)
y = np.array([-1, -2j, 3+1j], dtype=np.complex)
@@ -923,7 +925,16 @@ class TestCorrelate(_TestCorrelate):
z = np.correlate(x, y, 'full')
assert_array_almost_equal(z, r_z)
-class TestAcorrelate(_TestCorrelate):
+ @dec.deprecated()
+ def test_float(self):
+ _TestCorrelate.test_float(self)
+
+ @dec.deprecated()
+ def test_object(self):
+ _TestCorrelate.test_object(self)
+
+class TestCorrelateNew(_TestCorrelate):
+ old_behavior = False
def test_complex(self):
x = np.array([1, 2, 3, 4+1j], dtype=np.complex)
y = np.array([-1, -2j, 3+1j], dtype=np.complex)
@@ -932,8 +943,24 @@ class TestAcorrelate(_TestCorrelate):
#assert_array_almost_equal(z, r_z)
r_z = r_z[::-1].conjugate()
- z = np.acorrelate(y, x, 'full')
+ z = np.correlate(y, x, 'full', old_behavior=self.old_behavior)
assert_array_almost_equal(z, r_z)
+class TestArgwhere:
+ def test_2D(self):
+ x = np.arange(6).reshape((2, 3))
+ assert_array_equal(np.argwhere(x > 1),
+ [[0, 2],
+ [1, 0],
+ [1, 1],
+ [1, 2]])
+
+ def test_list(self):
+ assert_equal(np.argwhere([4, 0, 2, 1, 3]), [[0], [2], [3], [4]])
+
+ def test_masked_array(self):
+ a = np.ma.array([0, 1, 2, 3], mask=[0, 0, 1, 0])
+ assert_equal(np.argwhere(a), [[1], [3]])
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/core/tests/test_numerictypes.py b/numpy/core/tests/test_numerictypes.py
index 4e0bb462b..56ed4dbb1 100644
--- a/numpy/core/tests/test_numerictypes.py
+++ b/numpy/core/tests/test_numerictypes.py
@@ -338,13 +338,13 @@ class TestEmptyField(TestCase):
class TestCommonType(TestCase):
def test_scalar_loses1(self):
- res = np.find_common_type(['f4','f4','i4'],['f8'])
+ res = np.find_common_type(['f4','f4','i2'],['f8'])
assert(res == 'f4')
def test_scalar_loses2(self):
res = np.find_common_type(['f4','f4'],['i8'])
assert(res == 'f4')
def test_scalar_wins(self):
- res = np.find_common_type(['f4','f4','i4'],['c8'])
+ res = np.find_common_type(['f4','f4','i2'],['c8'])
assert(res == 'c8')
def test_scalar_wins2(self):
res = np.find_common_type(['u4','i4','i4'],['f4'])
diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py
index 94b10edb1..abea0a222 100644
--- a/numpy/core/tests/test_umath.py
+++ b/numpy/core/tests/test_umath.py
@@ -10,6 +10,29 @@ class TestDivision(TestCase):
assert_equal(x // 100, [0, 0, 0, 1, -1, -1, -1, -1, -2])
assert_equal(x % 100, [5, 10, 90, 0, 95, 90, 10, 0, 80])
+ def test_division_complex(self):
+ # check that implementation is correct
+ msg = "Complex division implementation check"
+ x = np.array([1. + 1.*1j, 1. + .5*1j, 1. + 2.*1j], dtype=np.complex128)
+ assert_almost_equal(x**2/x, x, err_msg=msg)
+ # check overflow, underflow
+ msg = "Complex division overflow/underflow check"
+ x = np.array([1.e+110, 1.e-110], dtype=np.complex128)
+ y = x**2/x
+ assert_almost_equal(y/x, [1, 1], err_msg=msg)
+
+ def test_floor_division_complex(self):
+ # check that implementation is correct
+ msg = "Complex floor division implementation check"
+ x = np.array([.9 + 1j, -.1 + 1j, .9 + .5*1j, .9 + 2.*1j], dtype=np.complex128)
+ y = np.array([0., -1., 0., 0.], dtype=np.complex128)
+ assert_equal(np.floor_divide(x**2,x), y, err_msg=msg)
+ # check overflow, underflow
+ msg = "Complex floor division overflow/underflow check"
+ x = np.array([1.e+110, 1.e-110], dtype=np.complex128)
+ y = np.floor_divide(x**2, x)
+ assert_equal(y, [1.e+110, 0], err_msg=msg)
+
class TestPower(TestCase):
def test_power_float(self):
x = np.array([1., 2., 3.])
@@ -42,7 +65,7 @@ class TestPower(TestCase):
def assert_complex_equal(x, y):
assert_array_equal(x.real, y.real)
assert_array_equal(x.imag, y.imag)
-
+
for z in [complex(0, np.inf), complex(1, np.inf)]:
z = np.array([z], dtype=np.complex_)
assert_complex_equal(z**1, z)
@@ -87,7 +110,25 @@ class TestLogAddExp2(object):
logxf = np.array(x, dtype=dt)
logyf = np.array(y, dtype=dt)
logzf = np.array(z, dtype=dt)
- assert_almost_equal(np.logaddexp(logxf, logyf), logzf)
+ assert_almost_equal(np.logaddexp2(logxf, logyf), logzf)
+
+ def test_inf(self) :
+ inf = np.inf
+ x = [inf, -inf, inf, -inf, inf, 1, -inf, 1]
+ y = [inf, inf, -inf, -inf, 1, inf, 1, -inf]
+ z = [inf, inf, inf, -inf, inf, inf, 1, 1]
+ for dt in ['f','d','g'] :
+ logxf = np.array(x, dtype=dt)
+ logyf = np.array(y, dtype=dt)
+ logzf = np.array(z, dtype=dt)
+ assert_equal(np.logaddexp2(logxf, logyf), logzf)
+
+ def test_nan(self):
+ assert np.isnan(np.logaddexp2(np.nan, np.inf))
+ assert np.isnan(np.logaddexp2(np.inf, np.nan))
+ assert np.isnan(np.logaddexp2(np.nan, 0))
+ assert np.isnan(np.logaddexp2(0, np.nan))
+ assert np.isnan(np.logaddexp2(np.nan, np.nan))
class TestLog(TestCase):
def test_log_values(self) :
@@ -130,6 +171,24 @@ class TestLogAddExp(object):
logzf = np.array(z, dtype=dt)
assert_almost_equal(np.logaddexp(logxf, logyf), logzf)
+ def test_inf(self) :
+ inf = np.inf
+ x = [inf, -inf, inf, -inf, inf, 1, -inf, 1]
+ y = [inf, inf, -inf, -inf, 1, inf, 1, -inf]
+ z = [inf, inf, inf, -inf, inf, inf, 1, 1]
+ for dt in ['f','d','g'] :
+ logxf = np.array(x, dtype=dt)
+ logyf = np.array(y, dtype=dt)
+ logzf = np.array(z, dtype=dt)
+ assert_equal(np.logaddexp(logxf, logyf), logzf)
+
+ def test_nan(self):
+ assert np.isnan(np.logaddexp(np.nan, np.inf))
+ assert np.isnan(np.logaddexp(np.inf, np.nan))
+ assert np.isnan(np.logaddexp(np.nan, 0))
+ assert np.isnan(np.logaddexp(0, np.nan))
+ assert np.isnan(np.logaddexp(np.nan, np.nan))
+
class TestLog1p(TestCase):
def test_log1p(self):
assert_almost_equal(ncu.log1p(0.2), ncu.log(1.2))
@@ -140,6 +199,94 @@ class TestExpm1(TestCase):
assert_almost_equal(ncu.expm1(0.2), ncu.exp(0.2)-1)
assert_almost_equal(ncu.expm1(1e-6), ncu.exp(1e-6)-1)
+class TestHypot(TestCase, object):
+ def test_simple(self):
+ assert_almost_equal(ncu.hypot(1, 1), ncu.sqrt(2))
+ assert_almost_equal(ncu.hypot(0, 0), 0)
+
+def assert_hypot_isnan(x, y):
+ assert np.isnan(ncu.hypot(x, y))
+
+def assert_hypot_isinf(x, y):
+ assert np.isinf(ncu.hypot(x, y))
+
+def test_hypot_special_values():
+ yield assert_hypot_isnan, np.nan, np.nan
+ yield assert_hypot_isnan, np.nan, 1
+ yield assert_hypot_isinf, np.nan, np.inf
+ yield assert_hypot_isinf, np.inf, np.nan
+ yield assert_hypot_isinf, np.inf, 0
+ yield assert_hypot_isinf, 0, np.inf
+
+def test_arctan2_special_values():
+ def assert_arctan2_isnan(x, y):
+ assert np.isnan(ncu.arctan2(x, y))
+
+ def assert_arctan2_ispinf(x, y):
+ assert np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) > 0
+
+ def assert_arctan2_isninf(x, y):
+ assert np.isinf(ncu.arctan2(x, y)) and ncu.arctan2(x, y) < 0
+
+ def assert_arctan2_ispzero(x, y):
+ assert ncu.arctan2(x, y) == 0 and not np.signbit(ncu.arctan2(x, y))
+
+ def assert_arctan2_isnzero(x, y):
+ assert ncu.arctan2(x, y) == 0 and np.signbit(ncu.arctan2(x, y))
+
+ # atan2(1, 1) returns pi/4.
+ yield assert_almost_equal, ncu.arctan2(1, 1), 0.25 * np.pi
+ yield assert_almost_equal, ncu.arctan2(-1, 1), -0.25 * np.pi
+ yield assert_almost_equal, ncu.arctan2(1, -1), 0.75 * np.pi
+
+ # atan2(+-0, -0) returns +-pi.
+ yield assert_almost_equal, ncu.arctan2(np.PZERO, np.NZERO), np.pi
+ yield assert_almost_equal, ncu.arctan2(np.NZERO, np.NZERO), -np.pi
+ # atan2(+-0, +0) returns +-0.
+ yield assert_arctan2_ispzero, np.PZERO, np.PZERO
+ yield assert_arctan2_isnzero, np.NZERO, np.PZERO
+
+ # atan2(+-0, x) returns +-pi for x < 0.
+ yield assert_almost_equal, ncu.arctan2(np.PZERO, -1), np.pi
+ yield assert_almost_equal, ncu.arctan2(np.NZERO, -1), -np.pi
+
+ # atan2(+-0, x) returns +-0 for x > 0.
+ yield assert_arctan2_ispzero, np.PZERO, 1
+ yield assert_arctan2_isnzero, np.NZERO, 1
+
+ # atan2(y, +-0) returns +pi/2 for y > 0.
+ yield assert_almost_equal, ncu.arctan2(1, np.PZERO), 0.5 * np.pi
+ yield assert_almost_equal, ncu.arctan2(1, np.NZERO), 0.5 * np.pi
+
+ # atan2(y, +-0) returns -pi/2 for y < 0.
+ yield assert_almost_equal, ncu.arctan2(-1, np.PZERO), -0.5 * np.pi
+ yield assert_almost_equal, ncu.arctan2(-1, np.NZERO), -0.5 * np.pi
+
+ # atan2(+-y, -infinity) returns +-pi for finite y > 0.
+ yield assert_almost_equal, ncu.arctan2(1, np.NINF), np.pi
+ yield assert_almost_equal, ncu.arctan2(-1, np.NINF), -np.pi
+
+ # atan2(+-y, +infinity) returns +-0 for finite y > 0.
+ yield assert_arctan2_ispzero, 1, np.inf
+ yield assert_arctan2_isnzero, -1, np.inf
+
+ # atan2(+-infinity, x) returns +-pi/2 for finite x.
+ yield assert_almost_equal, ncu.arctan2( np.inf, 1), 0.5 * np.pi
+ yield assert_almost_equal, ncu.arctan2(-np.inf, 1), -0.5 * np.pi
+
+ # atan2(+-infinity, -infinity) returns +-3*pi/4.
+ yield assert_almost_equal, ncu.arctan2( np.inf, -np.inf), 0.75 * np.pi
+ yield assert_almost_equal, ncu.arctan2(-np.inf, -np.inf), -0.75 * np.pi
+
+ # atan2(+-infinity, +infinity) returns +-pi/4.
+ yield assert_almost_equal, ncu.arctan2( np.inf, np.inf), 0.25 * np.pi
+ yield assert_almost_equal, ncu.arctan2(-np.inf, np.inf), -0.25 * np.pi
+
+ # atan2(nan, x) returns nan for any x, including inf
+ yield assert_arctan2_isnan, np.nan, np.inf
+ yield assert_arctan2_isnan, np.inf, np.nan
+ yield assert_arctan2_isnan, np.nan, np.nan
+
class TestMaximum(TestCase):
def test_reduce_complex(self):
assert_equal(np.maximum.reduce([1,2j]),1)
@@ -337,6 +484,38 @@ class TestSpecialMethods(TestCase):
a = A()
self.failUnlessRaises(RuntimeError, ncu.maximum, a, a)
+ def test_default_prepare(self):
+ class with_wrap(object):
+ __array_priority__ = 10
+ def __array__(self):
+ return np.zeros(1)
+ def __array_wrap__(self, arr, context):
+ return arr
+ a = with_wrap()
+ x = ncu.minimum(a, a)
+ assert_equal(x, np.zeros(1))
+ assert_equal(type(x), np.ndarray)
+
+ def test_prepare(self):
+ class with_prepare(np.ndarray):
+ __array_priority__ = 10
+ def __array_prepare__(self, arr, context):
+ # make sure we can return a new
+ return np.array(arr).view(type=with_prepare)
+ a = np.array(1).view(type=with_prepare)
+ x = np.add(a, a)
+ assert_equal(x, np.array(2))
+ assert_equal(type(x), with_prepare)
+
+ def test_failing_prepare(self):
+ class A(object):
+ def __array__(self):
+ return np.zeros(1)
+ def __array_prepare__(self, arr, context=None):
+ raise RuntimeError
+ a = A()
+ self.failUnlessRaises(RuntimeError, ncu.maximum, a, a)
+
def test_array_with_context(self):
class A(object):
def __array__(self, dtype=None, context=None):
@@ -637,6 +816,13 @@ def _check_branch_cut(f, x0, dx, re_sign=1, im_sign=-1, sig_zero_ok=False,
assert np.all(np.absolute(y0.real - ym.real*re_sign) < atol), (y0, ym)
assert np.all(np.absolute(y0.imag - ym.imag*im_sign) < atol), (y0, ym)
+def test_copysign():
+ assert np.copysign(1, -1) == -1
+ assert 1 / np.copysign(0, -1) < 0
+ assert 1 / np.copysign(0, 1) > 0
+ assert np.signbit(np.copysign(np.nan, -1))
+ assert not np.signbit(np.copysign(np.nan, 1))
+
def test_pos_nan():
"""Check np.nan is a positive nan."""
assert np.signbit(np.nan) == 0
diff --git a/numpy/distutils/__init__.py b/numpy/distutils/__init__.py
index 10dec7373..4ed08d7f6 100644
--- a/numpy/distutils/__init__.py
+++ b/numpy/distutils/__init__.py
@@ -7,6 +7,7 @@ import ccompiler
import unixccompiler
from info import __doc__
+from npy_pkg_config import *
try:
import __config__
diff --git a/numpy/distutils/command/__init__.py b/numpy/distutils/command/__init__.py
index dfe81d542..87546aeee 100644
--- a/numpy/distutils/command/__init__.py
+++ b/numpy/distutils/command/__init__.py
@@ -7,7 +7,7 @@ __revision__ = "$Id: __init__.py,v 1.3 2005/05/16 11:08:49 pearu Exp $"
distutils_all = [ 'build_py',
'clean',
- 'install_lib',
+ 'install_clib',
'install_scripts',
'bdist',
'bdist_dumb',
@@ -26,6 +26,7 @@ __all__ = ['build',
'install',
'install_data',
'install_headers',
+ 'install_lib',
'bdist_rpm',
'sdist',
] + distutils_all
diff --git a/numpy/distutils/command/build_clib.py b/numpy/distutils/command/build_clib.py
index 9f6be52eb..88fa809c7 100644
--- a/numpy/distutils/command/build_clib.py
+++ b/numpy/distutils/command/build_clib.py
@@ -3,6 +3,7 @@
import os
from glob import glob
+import shutil
from distutils.command.build_clib import build_clib as old_build_clib
from distutils.errors import DistutilsSetupError, DistutilsError, \
DistutilsFileError
@@ -27,11 +28,15 @@ class build_clib(old_build_clib):
user_options = old_build_clib.user_options + [
('fcompiler=', None,
"specify the Fortran compiler type"),
+ ('inplace', 'i', 'Build in-place'),
]
+ boolean_options = old_build_clib.boolean_options + ['inplace']
+
def initialize_options(self):
old_build_clib.initialize_options(self)
self.fcompiler = None
+ self.inplace = 0
return
def have_f_sources(self):
@@ -94,6 +99,14 @@ class build_clib(old_build_clib):
self.build_libraries(self.libraries)
+ if self.inplace:
+ for l in self.distribution.installed_libraries:
+ libname = self.compiler.library_filename(l.name)
+ source = os.path.join(self.build_clib, libname)
+ target = os.path.join(l.target_dir, libname)
+ self.mkpath(l.target_dir)
+ shutil.copy(source, target)
+
def get_source_files(self):
self.check_library_list(self.libraries)
filenames = []
diff --git a/numpy/distutils/command/build_ext.py b/numpy/distutils/command/build_ext.py
index 2b114d4a7..4d11d033d 100644
--- a/numpy/distutils/command/build_ext.py
+++ b/numpy/distutils/command/build_ext.py
@@ -57,8 +57,20 @@ class build_ext (old_build_ext):
self.run_command('build_src')
if self.distribution.has_c_libraries():
- self.run_command('build_clib')
- build_clib = self.get_finalized_command('build_clib')
+ if self.inplace:
+ if self.distribution.have_run.get('build_clib'):
+ log.warn('build_clib already run, it is too late to ' \
+ 'ensure in-place build of build_clib')
+ else:
+ build_clib = self.distribution.get_command_obj('build_clib')
+ build_clib.inplace = 1
+ build_clib.ensure_finalized()
+ build_clib.run()
+ self.distribution.have_run['build_clib'] = 1
+
+ else:
+ self.run_command('build_clib')
+ build_clib = self.get_finalized_command('build_clib')
self.library_dirs.append(build_clib.build_clib)
else:
build_clib = None
diff --git a/numpy/distutils/command/build_src.py b/numpy/distutils/command/build_src.py
index 4ba3f0a9a..818bd52fd 100644
--- a/numpy/distutils/command/build_src.py
+++ b/numpy/distutils/command/build_src.py
@@ -5,6 +5,7 @@ import os
import re
import sys
import shlex
+import copy
from distutils.command import build_ext
from distutils.dep_util import newer_group, newer
@@ -22,10 +23,29 @@ except ImportError:
#import numpy.f2py
from numpy.distutils import log
from numpy.distutils.misc_util import fortran_ext_match, \
- appendpath, is_string, is_sequence
+ appendpath, is_string, is_sequence, get_cmd
from numpy.distutils.from_template import process_file as process_f_file
from numpy.distutils.conv_template import process_file as process_c_file
+def subst_vars(target, source, d):
+ """Substitute any occurence of @foo@ by d['foo'] from source file into
+ target."""
+ var = re.compile('@([a-zA-Z_]+)@')
+ fs = open(source, 'r')
+ try:
+ ft = open(target, 'w')
+ try:
+ for l in fs.readlines():
+ m = var.search(l)
+ if m:
+ ft.write(l.replace('@%s@' % m.group(1), d[m.group(1)]))
+ else:
+ ft.write(l)
+ finally:
+ ft.close()
+ finally:
+ fs.close()
+
class build_src(build_ext.build_ext):
description = "build sources from SWIG, F2PY files or a function"
@@ -125,6 +145,7 @@ class build_src(build_ext.build_ext):
setattr(self, c, v)
def run(self):
+ log.info("build_src")
if not (self.extensions or self.libraries):
return
self.build_sources()
@@ -147,6 +168,7 @@ class build_src(build_ext.build_ext):
self.build_extension_sources(ext)
self.build_data_files_sources()
+ self.build_npy_pkg_config()
def build_data_files_sources(self):
if not self.data_files:
@@ -183,6 +205,61 @@ class build_src(build_ext.build_ext):
raise TypeError(repr(data))
self.data_files[:] = new_data_files
+
+ def _build_npy_pkg_config(self, info, gd):
+ import shutil
+ template, install_dir, subst_dict = info
+ template_dir = os.path.dirname(template)
+ for k, v in gd.items():
+ subst_dict[k] = v
+
+ if self.inplace == 1:
+ generated_dir = os.path.join(template_dir, install_dir)
+ else:
+ generated_dir = os.path.join(self.build_src, template_dir,
+ install_dir)
+ generated = os.path.basename(os.path.splitext(template)[0])
+ generated_path = os.path.join(generated_dir, generated)
+ if not os.path.exists(generated_dir):
+ os.makedirs(generated_dir)
+
+ subst_vars(generated_path, template, subst_dict)
+
+ # Where to install relatively to install prefix
+ full_install_dir = os.path.join(template_dir, install_dir)
+ return full_install_dir, generated_path
+
+ def build_npy_pkg_config(self):
+ log.info('build_src: building npy-pkg config files')
+
+ # XXX: another ugly workaround to circumvent distutils brain damage. We
+ # need the install prefix here, but finalizing the options of the
+ # install command when only building sources cause error. Instead, we
+ # copy the install command instance, and finalize the copy so that it
+ # does not disrupt how distutils want to do things when with the
+ # original install command instance.
+ install_cmd = copy.copy(get_cmd('install'))
+ if not install_cmd.finalized == 1:
+ install_cmd.finalize_options()
+ build_npkg = False
+ gd = {}
+ if hasattr(install_cmd, 'install_libbase'):
+ top_prefix = install_cmd.install_libbase
+ build_npkg = True
+ elif self.inplace == 1:
+ top_prefix = '.'
+ build_npkg = True
+
+ if build_npkg:
+ for pkg, infos in self.distribution.installed_pkg_config.items():
+ pkg_path = self.distribution.package_dir[pkg]
+ prefix = os.path.join(os.path.abspath(top_prefix), pkg_path)
+ d = {'prefix': prefix}
+ for info in infos:
+ install_dir, generated = self._build_npy_pkg_config(info, d)
+ self.distribution.data_files.append((install_dir,
+ [generated]))
+
def build_py_modules_sources(self):
if not self.py_modules:
return
diff --git a/numpy/distutils/command/config.py b/numpy/distutils/command/config.py
index 00821d260..dbf8e77a1 100644
--- a/numpy/distutils/command/config.py
+++ b/numpy/distutils/command/config.py
@@ -166,6 +166,35 @@ int main()
return self.try_compile(body, headers, include_dirs)
+ def check_type(self, type_name, headers=None, include_dirs=None,
+ library_dirs=None):
+ """Check type availability. Return True if the type can be compiled,
+ False otherwise"""
+ self._check_compiler()
+
+ # First check the type can be compiled
+ body = r"""
+int main() {
+ if ((%(name)s *) 0)
+ return 0;
+ if (sizeof (%(name)s))
+ return 0;
+}
+""" % {'name': type_name}
+
+ st = False
+ try:
+ try:
+ self._compile(body % {'type': type_name},
+ headers, include_dirs, 'c')
+ st = True
+ except distutils.errors.CompileError, e:
+ st = False
+ finally:
+ self._clean()
+
+ return st
+
def check_type_size(self, type_name, headers=None, include_dirs=None, library_dirs=None, expected=None):
"""Check size of a given type."""
self._check_compiler()
diff --git a/numpy/distutils/command/install.py b/numpy/distutils/command/install.py
index 87b549da8..099ad5c16 100644
--- a/numpy/distutils/command/install.py
+++ b/numpy/distutils/command/install.py
@@ -10,6 +10,9 @@ from distutils.file_util import write_file
class install(old_install):
+ # Always run install_clib - the command is cheap, so no need to bypass it
+ sub_commands = old_install.sub_commands + [('install_clib', lambda x: True)]
+
def finalize_options (self):
old_install.finalize_options(self)
self.install_lib = self.install_libbase
diff --git a/numpy/distutils/command/install_clib.py b/numpy/distutils/command/install_clib.py
new file mode 100644
index 000000000..638d4beac
--- /dev/null
+++ b/numpy/distutils/command/install_clib.py
@@ -0,0 +1,37 @@
+import os
+from distutils.core import Command
+from distutils.ccompiler import new_compiler
+from numpy.distutils.misc_util import get_cmd
+
+class install_clib(Command):
+ description = "Command to install installable C libraries"
+
+ user_options = []
+
+ def initialize_options(self):
+ self.install_dir = None
+ self.outfiles = []
+
+ def finalize_options(self):
+ self.set_undefined_options('install', ('install_lib', 'install_dir'))
+
+ def run (self):
+ build_clib_cmd = get_cmd("build_clib")
+ build_dir = build_clib_cmd.build_clib
+
+ # We need the compiler to get the library name -> filename association
+ if not build_clib_cmd.compiler:
+ compiler = new_compiler(compiler=None)
+ compiler.customize(self.distribution)
+ else:
+ compiler = build_clib_cmd.compiler
+
+ for l in self.distribution.installed_libraries:
+ target_dir = os.path.join(self.install_dir, l.target_dir)
+ name = compiler.library_filename(l.name)
+ source = os.path.join(build_dir, name)
+ self.mkpath(target_dir)
+ self.outfiles.append(self.copy_file(source, target_dir)[0])
+
+ def get_outputs(self):
+ return self.outfiles
diff --git a/numpy/distutils/command/scons.py b/numpy/distutils/command/scons.py
index f733dc4b2..28fb5cb03 100644
--- a/numpy/distutils/command/scons.py
+++ b/numpy/distutils/command/scons.py
@@ -10,7 +10,7 @@ from numpy.distutils.ccompiler import CCompiler
from numpy.distutils.fcompiler import FCompiler
from numpy.distutils.exec_command import find_executable
from numpy.distutils import log
-from numpy.distutils.misc_util import is_bootstrapping
+from numpy.distutils.misc_util import is_bootstrapping, get_cmd
def get_scons_build_dir():
"""Return the top path where everything produced by scons will be put.
@@ -45,14 +45,34 @@ def get_scons_local_path():
from numscons import get_scons_path
return get_scons_path()
-def get_distutils_libdir(cmd, pkg):
- """Returns the path where distutils install libraries, relatively to the
- scons build directory."""
+def _get_top_dir(pkg):
+ # XXX: this mess is necessary because scons is launched per package, and
+ # has no knowledge outside its build dir, which is package dependent. If
+ # one day numscons does not launch one process/package, this will be
+ # unnecessary.
from numscons import get_scons_build_dir
from numscons.core.utils import pkg_to_path
scdir = pjoin(get_scons_build_dir(), pkg_to_path(pkg))
n = scdir.count(os.sep)
- return pjoin(os.sep.join([os.pardir for i in range(n+1)]), cmd.build_lib)
+ return os.sep.join([os.pardir for i in range(n+1)])
+
+def get_distutils_libdir(cmd, pkg):
+ """Returns the path where distutils install libraries, relatively to the
+ scons build directory."""
+ return pjoin(_get_top_dir(pkg), cmd.build_lib)
+
+def get_distutils_clibdir(cmd, pkg):
+ """Returns the path where distutils put pure C libraries."""
+ return pjoin(_get_top_dir(pkg), cmd.build_temp)
+
+def get_distutils_install_prefix(pkg, inplace):
+ """Returns the installation path for the current package."""
+ from numscons.core.utils import pkg_to_path
+ if inplace == 1:
+ return pkg_to_path(pkg)
+ else:
+ install_cmd = get_cmd('install').get_finalized_command('install')
+ return pjoin(install_cmd.install_libbase, pkg_to_path(pkg))
def get_python_exec_invoc():
"""This returns the python executable from which this file is invocated."""
@@ -352,28 +372,26 @@ class scons(old_build_ext):
"this package " % str(e))
try:
- minver = "0.9.3"
- try:
- # version_info was added in 0.10.0
- from numscons import version_info
- except ImportError:
- from numscons import get_version
- if get_version() < minver:
- raise ValueError()
+ minver = [0, 10, 2]
+ # version_info was added in 0.10.0
+ from numscons import version_info
+ # Stupid me used string instead of numbers in version_info in
+ # dev versions of 0.10.0
+ if isinstance(version_info[0], str):
+ raise ValueError("Numscons %s or above expected " \
+ "(detected 0.10.0)" % str(minver))
+ if version_info[:3] < minver:
+ raise ValueError("Numscons %s or above expected (got %s) "
+ % (str(minver), str(version_info)))
except ImportError:
raise RuntimeError("You need numscons >= %s to build numpy "\
"with numscons (imported numscons path " \
"is %s)." % (minver, numscons.__file__))
- except ValueError:
- raise RuntimeError("You need numscons >= %s to build numpy "\
- "with numscons (detected %s )" \
- % (minver, get_version()))
else:
# nothing to do, just leave it here.
return
- print "is bootstrapping ? %s" % is_bootstrapping()
# XXX: when a scons script is missing, scons only prints warnings, and
# does not return a failure (status is 0). We have to detect this from
# distutils (this cannot work for recursive scons builds...)
@@ -422,6 +440,10 @@ class scons(old_build_ext):
# pdirname(sconscript))))
cmd.append('distutils_libdir=%s' %
protect_path(get_distutils_libdir(self, pkg_name)))
+ cmd.append('distutils_clibdir=%s' %
+ protect_path(get_distutils_clibdir(self, pkg_name)))
+ prefix = get_distutils_install_prefix(pkg_name, self.inplace)
+ cmd.append('distutils_install_prefix=%s' % protect_path(prefix))
if not self._bypass_distutils_cc:
cmd.append('cc_opt=%s' % self.scons_compiler)
diff --git a/numpy/distutils/conv_template.py b/numpy/distutils/conv_template.py
index f8a01c797..097df6fac 100644
--- a/numpy/distutils/conv_template.py
+++ b/numpy/distutils/conv_template.py
@@ -168,13 +168,13 @@ def parse_loop_header(loophead) :
nsub = size
elif nsub != size :
msg = "Mismatch in number of values:\n%s = %s" % (name, vals)
- raise ValueError, msg
+ raise ValueError(msg)
names.append((name,vals))
# generate list of dictionaries, one for each template iteration
dlist = []
if nsub is None :
- raise ValueError, "No substitution variables found"
+ raise ValueError("No substitution variables found")
for i in range(nsub) :
tmp = {}
for name,vals in names :
@@ -192,8 +192,8 @@ def parse_string(astr, env, level, line) :
try :
val = env[name]
except KeyError, e :
- msg = 'line %d: %s'%(line, e)
- raise ValueError, msg
+ msg = 'line %d: no definition of key "%s"'%(line, name)
+ raise ValueError(msg)
return val
code = [lineno]
@@ -213,7 +213,7 @@ def parse_string(astr, env, level, line) :
envlist = parse_loop_header(head)
except ValueError, e :
msg = "line %d: %s" % (newline, e)
- raise ValueError, msg
+ raise ValueError(msg)
for newenv in envlist :
newenv.update(env)
newcode = parse_string(text, newenv, newlevel, newline)
@@ -261,7 +261,7 @@ def process_file(source):
try:
code = process_str(''.join(lines))
except ValueError, e:
- raise ValueError, '"%s", %s' % (sourcefile, e)
+ raise ValueError('In "%s" loop at %s' % (sourcefile, e))
return '#line 1 "%s"\n%s' % (sourcefile, code)
@@ -299,5 +299,5 @@ if __name__ == "__main__":
try:
writestr = process_str(allstr)
except ValueError, e:
- raise ValueError, "file %s, %s" % (file, e)
+ raise ValueError("In %s loop at %s" % (file, e))
outfile.write(writestr)
diff --git a/numpy/distutils/core.py b/numpy/distutils/core.py
index 20fc3eac1..8481640bd 100644
--- a/numpy/distutils/core.py
+++ b/numpy/distutils/core.py
@@ -24,7 +24,8 @@ from numpy.distutils.extension import Extension
from numpy.distutils.numpy_distribution import NumpyDistribution
from numpy.distutils.command import config, config_compiler, \
build, build_py, build_ext, build_clib, build_src, build_scripts, \
- sdist, install_data, install_headers, install, bdist_rpm, scons
+ sdist, install_data, install_headers, install, bdist_rpm, scons, \
+ install_clib
from numpy.distutils.misc_util import get_data_files, is_sequence, is_string
numpy_cmdclass = {'build': build.build,
@@ -40,6 +41,7 @@ numpy_cmdclass = {'build': build.build,
'scons': scons.scons,
'install_data': install_data.install_data,
'install_headers': install_headers.install_headers,
+ 'install_clib': install_clib.install_clib,
'install': install.install,
'bdist_rpm': bdist_rpm.bdist_rpm,
}
diff --git a/numpy/distutils/fcompiler/gnu.py b/numpy/distutils/fcompiler/gnu.py
index 8e25d1cb0..3c1da28da 100644
--- a/numpy/distutils/fcompiler/gnu.py
+++ b/numpy/distutils/fcompiler/gnu.py
@@ -2,6 +2,8 @@ import re
import os
import sys
import warnings
+import platform
+from subprocess import Popen, PIPE, STDOUT
from numpy.distutils.cpuinfo import cpu
from numpy.distutils.fcompiler import FCompiler
@@ -19,6 +21,16 @@ _R_ARCHS = {"ppc": r"^Target: (powerpc-.*)$",
"x86_64": r"^Target: (i686-.*)$",
"ppc64": r"^Target: (powerpc-.*)$",}
+# XXX: handle cross compilation
+def is_win64():
+ return sys.platform == "win32" and platform.architecture()[0] == "64bit"
+
+if is_win64():
+ #_EXTRAFLAGS = ["-fno-leading-underscore"]
+ _EXTRAFLAGS = []
+else:
+ _EXTRAFLAGS = []
+
class GnuFCompiler(FCompiler):
compiler_type = 'gnu'
compiler_aliases = ('g77',)
@@ -220,10 +232,10 @@ class Gnu95FCompiler(GnuFCompiler):
executables = {
'version_cmd' : ["<F90>", "--version"],
'compiler_f77' : [None, "-Wall", "-ffixed-form",
- "-fno-second-underscore"],
- 'compiler_f90' : [None, "-Wall", "-fno-second-underscore"],
+ "-fno-second-underscore"] + _EXTRAFLAGS,
+ 'compiler_f90' : [None, "-Wall", "-fno-second-underscore"] + _EXTRAFLAGS,
'compiler_fix' : [None, "-Wall", "-ffixed-form",
- "-fno-second-underscore"],
+ "-fno-second-underscore"] + _EXTRAFLAGS,
'linker_so' : ["<F90>", "-Wall"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"],
@@ -291,6 +303,13 @@ class Gnu95FCompiler(GnuFCompiler):
i = opt.index("gcc")
opt.insert(i+1, "mingwex")
opt.insert(i+1, "mingw32")
+ # XXX: fix this mess, does not work for mingw
+ if is_win64():
+ c_compiler = self.c_compiler
+ if c_compiler and c_compiler.compiler_type == "msvc":
+ return []
+ else:
+ raise NotImplementedError("Only MS compiler supported with gfortran on win64")
return opt
def get_target(self):
@@ -303,12 +322,19 @@ class Gnu95FCompiler(GnuFCompiler):
return m.group(1)
return ""
+ def get_flags_opt(self):
+ if is_win64():
+ return ['-O0']
+ else:
+ return GnuFCompiler.get_flags_opt(self)
def _can_target(cmd, arch):
"""Return true is the command supports the -arch flag for the given
architecture."""
newcmd = cmd[:]
newcmd.extend(["-arch", arch, "-v"])
- st, out = exec_command(" ".join(newcmd))
+ p = Popen(newcmd, stderr=STDOUT, stdout=PIPE)
+ st = p.communicate()
+ out = p.stdout
if st == 0:
for line in out.splitlines():
m = re.search(_R_ARCHS[arch], line)
diff --git a/numpy/distutils/fcompiler/intel.py b/numpy/distutils/fcompiler/intel.py
index b3928015c..f72827332 100644
--- a/numpy/distutils/fcompiler/intel.py
+++ b/numpy/distutils/fcompiler/intel.py
@@ -122,7 +122,7 @@ class IntelItaniumFCompiler(IntelFCompiler):
compiler_aliases = ()
description = 'Intel Fortran Compiler for Itanium apps'
- version_match = intel_version_match('Itanium')
+ version_match = intel_version_match('Itanium|IA-64')
#Intel(R) Fortran Itanium(R) Compiler for Itanium(R)-based applications
#Version 9.1    Build 20060928 Package ID: l_fc_c_9.1.039
diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py
index ecd60375e..b44e7db30 100644
--- a/numpy/distutils/misc_util.py
+++ b/numpy/distutils/misc_util.py
@@ -23,7 +23,13 @@ __all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict',
'get_script_files', 'get_lib_source_files', 'get_data_files',
'dot_join', 'get_frame', 'minrelpath','njoin',
'is_sequence', 'is_string', 'as_list', 'gpaths', 'get_language',
- 'quote_args', 'get_build_architecture']
+ 'quote_args', 'get_build_architecture', 'get_info', 'get_pkg_info']
+
+class InstallableLib:
+ def __init__(self, name, build_info, target_dir):
+ self.name = name
+ self.build_info = build_info
+ self.target_dir = target_dir
def quote_args(args):
# don't used _nt_quote_args as it does not check if
@@ -589,8 +595,9 @@ def get_frame(level=0):
class Configuration(object):
_list_keys = ['packages', 'ext_modules', 'data_files', 'include_dirs',
- 'libraries', 'headers', 'scripts', 'py_modules', 'scons_data']
- _dict_keys = ['package_dir']
+ 'libraries', 'headers', 'scripts', 'py_modules', 'scons_data',
+ 'installed_libraries']
+ _dict_keys = ['package_dir', 'installed_pkg_config']
_extra_keys = ['name', 'version']
numpy_include_dirs = []
@@ -693,9 +700,15 @@ class Configuration(object):
self.setup_name = setup_name
def todict(self):
- """Return configuration distionary suitable for passing
- to distutils.core.setup() function.
"""
+ Return a dictionary compatible with the keyword arguments of distutils
+ setup function.
+
+ Example
+ -------
+ >>> setup(\**config.todict()).
+ """
+
self._optimize_data_files()
d = {}
known_keys = self.list_keys + self.dict_keys + self.extra_keys
@@ -728,6 +741,7 @@ class Configuration(object):
raise ValueError,'Unknown option: '+key
def get_distribution(self):
+ """Return the distutils distribution object for self."""
from numpy.distutils.core import get_distribution
return get_distribution()
@@ -793,7 +807,17 @@ class Configuration(object):
caller_level = 1):
"""Return list of subpackage configurations.
- '*' in subpackage_name is handled as a wildcard.
+ Parameters
+ ----------
+ subpackage_name: str,None
+ Name of the subpackage to get the configuration. '*' in
+ subpackage_name is handled as a wildcard.
+ subpackage_path: str
+ If None, then the path is assumed to be the local path plus the
+ subpackage_name. If a setup.py file is not found in the
+ subpackage_path, then a default configuration is used.
+ parent_name: str
+ Parent name.
"""
if subpackage_name is None:
if subpackage_path is None:
@@ -841,8 +865,22 @@ class Configuration(object):
def add_subpackage(self,subpackage_name,
subpackage_path=None,
standalone = False):
- """Add subpackage to configuration.
+ """Add a sub-package to the current Configuration instance.
+
+ This is useful in a setup.py script for adding sub-packages to a
+ package.
+
+ Parameters
+ ----------
+ subpackage_name: str
+ name of the subpackage
+ subpackage_path: str
+ if given, the subpackage path such as the subpackage is in
+ subpackage_path / subpackage_name. If None,the subpackage is
+ assumed to be located in the local path / subpackage_name.
+ standalone: bool
"""
+
if standalone:
parent_name = None
else:
@@ -869,11 +907,24 @@ class Configuration(object):
def add_data_dir(self,data_path):
"""Recursively add files under data_path to data_files list.
- Argument can be either
- - 2-sequence (<datadir suffix>,<path to data directory>)
- - path to data directory where python datadir suffix defaults
- to package dir.
+ Recursively add files under data_path to the list of data_files to be
+ installed (and distributed). The data_path can be either a relative
+ path-name, or an absolute path-name, or a 2-tuple where the first
+ argument shows where in the install directory the data directory
+ should be installed to.
+
+ Parameters
+ ----------
+ data_path: seq,str
+ Argument can be either
+
+ * 2-sequence (<datadir suffix>,<path to data directory>)
+ * path to data directory where python datadir suffix defaults
+ to package dir.
+
+ Notes
+ -----
Rules for installation paths:
foo/bar -> (foo/bar, foo/bar) -> parent/foo/bar
(gun, foo/bar) -> parent/gun
@@ -883,6 +934,30 @@ class Configuration(object):
/foo/bar -> (bar, /foo/bar) -> parent/bar
(gun, /foo/bar) -> parent/gun
(fun/*/gun/*, sun/foo/bar) -> parent/fun/foo/gun/bar
+
+ Examples
+ --------
+ For example suppose the source directory contains fun/foo.dat and
+ fun/bar/car.dat::
+
+ >>> self.add_data_dir('fun')
+ >>> self.add_data_dir(('sun', 'fun'))
+ >>> self.add_data_dir(('gun', '/full/path/to/fun'))
+
+ Will install data-files to the locations::
+
+ <package install directory>/
+ fun/
+ foo.dat
+ bar/
+ car.dat
+ sun/
+ foo.dat
+ bar/
+ car.dat
+ gun/
+ foo.dat
+ car.dat
"""
if is_sequence(data_path):
d, data_path = data_path
@@ -960,24 +1035,90 @@ class Configuration(object):
def add_data_files(self,*files):
"""Add data files to configuration data_files.
- Argument(s) can be either
- - 2-sequence (<datadir prefix>,<path to data file(s)>)
- - paths to data files where python datadir prefix defaults
- to package dir.
+
+ Parameters
+ ----------
+ files: sequence
+ Argument(s) can be either
+
+ * 2-sequence (<datadir prefix>,<path to data file(s)>)
+ * paths to data files where python datadir prefix defaults
+ to package dir.
+
+ Notes
+ -----
+ The form of each element of the files sequence is very flexible
+ allowing many combinations of where to get the files from the package
+ and where they should ultimately be installed on the system. The most
+ basic usage is for an element of the files argument sequence to be a
+ simple filename. This will cause that file from the local path to be
+ installed to the installation path of the self.name package (package
+ path). The file argument can also be a relative path in which case the
+ entire relative path will be installed into the package directory.
+ Finally, the file can be an absolute path name in which case the file
+ will be found at the absolute path name but installed to the package
+ path.
+
+ This basic behavior can be augmented by passing a 2-tuple in as the
+ file argument. The first element of the tuple should specify the
+ relative path (under the package install directory) where the
+ remaining sequence of files should be installed to (it has nothing to
+ do with the file-names in the source distribution). The second element
+ of the tuple is the sequence of files that should be installed. The
+ files in this sequence can be filenames, relative paths, or absolute
+ paths. For absolute paths the file will be installed in the top-level
+ package installation directory (regardless of the first argument).
+ Filenames and relative path names will be installed in the package
+ install directory under the path name given as the first element of
+ the tuple.
Rules for installation paths:
- file.txt -> (., file.txt)-> parent/file.txt
- foo/file.txt -> (foo, foo/file.txt) -> parent/foo/file.txt
- /foo/bar/file.txt -> (., /foo/bar/file.txt) -> parent/file.txt
- *.txt -> parent/a.txt, parent/b.txt
- foo/*.txt -> parent/foo/a.txt, parent/foo/b.txt
- */*.txt -> (*, */*.txt) -> parent/c/a.txt, parent/d/b.txt
- (sun, file.txt) -> parent/sun/file.txt
- (sun, bar/file.txt) -> parent/sun/file.txt
- (sun, /foo/bar/file.txt) -> parent/sun/file.txt
- (sun, *.txt) -> parent/sun/a.txt, parent/sun/b.txt
- (sun, bar/*.txt) -> parent/sun/a.txt, parent/sun/b.txt
- (sun/*, */*.txt) -> parent/sun/c/a.txt, parent/d/b.txt
+
+ #. file.txt -> (., file.txt)-> parent/file.txt
+ #. foo/file.txt -> (foo, foo/file.txt) -> parent/foo/file.txt
+ #. /foo/bar/file.txt -> (., /foo/bar/file.txt) -> parent/file.txt
+ #. *.txt -> parent/a.txt, parent/b.txt
+ #. foo/*.txt -> parent/foo/a.txt, parent/foo/b.txt
+ #. */*.txt -> (*, */*.txt) -> parent/c/a.txt, parent/d/b.txt
+ #. (sun, file.txt) -> parent/sun/file.txt
+ #. (sun, bar/file.txt) -> parent/sun/file.txt
+ #. (sun, /foo/bar/file.txt) -> parent/sun/file.txt
+ #. (sun, *.txt) -> parent/sun/a.txt, parent/sun/b.txt
+ #. (sun, bar/*.txt) -> parent/sun/a.txt, parent/sun/b.txt
+ #. (sun/*, */*.txt) -> parent/sun/c/a.txt, parent/d/b.txt
+
+ An additional feature is that the path to a data-file can actually be
+ a function that takes no arguments and returns the actual path(s) to
+ the data-files. This is useful when the data files are generated while
+ building the package.
+
+ Examples
+ --------
+ Add files to the list of data_files to be included with the package.
+
+ >>> self.add_data_files('foo.dat',
+ ('fun', ['gun.dat', 'nun/pun.dat', '/tmp/sun.dat']),
+ 'bar/cat.dat',
+ '/full/path/to/can.dat')
+
+ will install these data files to::
+
+ <package install directory>/
+ foo.dat
+ fun/
+ gun.dat
+ nun/
+ pun.dat
+ sun.dat
+ bar/
+ car.dat
+ can.dat
+
+ where <package install directory> is the package (or sub-package)
+ directory such as '/usr/lib/python2.4/site-packages/mypackage' ('C:
+ \\Python2.4 \\Lib \\site-packages \\mypackage') or
+ '/usr/lib/python2.4/site- packages/mypackage/mysubpackage' ('C:
+ \\Python2.4 \\Lib \\site-packages \\mypackage \\mysubpackage').
"""
if len(files)>1:
@@ -1047,6 +1188,10 @@ class Configuration(object):
def add_include_dirs(self,*paths):
"""Add paths to configuration include directories.
+
+ Add the given sequence of paths to the beginning of the include_dirs
+ list. This list will be visible to all extension modules of the
+ current package.
"""
include_dirs = self.paths(paths)
dist = self.get_distribution()
@@ -1061,10 +1206,21 @@ class Configuration(object):
def add_headers(self,*files):
"""Add installable headers to configuration.
- Argument(s) can be either
- - 2-sequence (<includedir suffix>,<path to header file(s)>)
- - path(s) to header file(s) where python includedir suffix will default
- to package name.
+
+ Add the given sequence of files to the beginning of the headers list.
+ By default, headers will be installed under <python-
+ include>/<self.name.replace('.','/')>/ directory. If an item of files
+ is a tuple, then its first argument specifies the actual installation
+ location relative to the <python-include> path.
+
+ Parameters
+ ----------
+ files: str, seq
+ Argument(s) can be either:
+
+ * 2-sequence (<includedir suffix>,<path to header file(s)>)
+ * path(s) to header file(s) where python includedir suffix will
+ default to package name.
"""
headers = []
for path in files:
@@ -1082,6 +1238,13 @@ class Configuration(object):
def paths(self,*paths,**kws):
"""Apply glob to paths and prepend local_path if needed.
+
+ Applies glob.glob(...) to each path in the sequence (if needed) and
+ pre-pends the local_path if needed. Because this is called on all
+ source lists, this allows wildcard characters to be specified in lists
+ of sources for extension modules and libraries and scripts and allows
+ path-names be relative to the source directory.
+
"""
include_non_existing = kws.get('include_non_existing',True)
return gpaths(paths,
@@ -1099,14 +1262,48 @@ class Configuration(object):
def add_extension(self,name,sources,**kw):
"""Add extension to configuration.
- Keywords:
- include_dirs, define_macros, undef_macros,
- library_dirs, libraries, runtime_library_dirs,
- extra_objects, extra_compile_args, extra_link_args,
- export_symbols, swig_opts, depends, language,
- f2py_options, module_dirs
- extra_info - dict or list of dict of keywords to be
- appended to keywords.
+ Create and add an Extension instance to the ext_modules list. This
+ method also takes the following optional keyword arguments that are
+ passed on to the Extension constructor.
+
+ Parameters
+ ----------
+ name: str
+ name of the extension
+ sources: seq
+ list of the sources. The list of sources may contain functions
+ (called source generators) which must take an extension instance
+ and a build directory as inputs and return a source file or list of
+ source files or None. If None is returned then no sources are
+ generated. If the Extension instance has no sources after
+ processing all source generators, then no extension module is
+ built.
+ include_dirs:
+ define_macros:
+ undef_macros:
+ library_dirs:
+ libraries:
+ runtime_library_dirs:
+ extra_objects:
+ extra_compile_args:
+ extra_link_args:
+ export_symbols:
+ swig_opts:
+ depends:
+ The depends list contains paths to files or directories that the
+ sources of the extension module depend on. If any path in the
+ depends list is newer than the extension module, then the module
+ will be rebuilt.
+ language:
+ f2py_options:
+ module_dirs:
+ extra_info: dict,list
+ dict or list of dict of keywords to be appended to keywords.
+
+ Notes
+ -----
+ The self.paths(...) method is applied to all lists that may contain
+ paths.
"""
ext_args = copy.copy(kw)
ext_args['name'] = dot_join(self.name,name)
@@ -1164,14 +1361,38 @@ class Configuration(object):
def add_library(self,name,sources,**build_info):
"""Add library to configuration.
- Valid keywords for build_info:
- depends
- macros
- include_dirs
- extra_compiler_args
- f2py_options
- language
+ Parameters
+ ----------
+ name: str
+ name of the extension
+ sources: seq
+ list of the sources. The list of sources may contain functions
+ (called source generators) which must take an extension instance
+ and a build directory as inputs and return a source file or list of
+ source files or None. If None is returned then no sources are
+ generated. If the Extension instance has no sources after
+ processing all source generators, then no extension module is
+ built.
+ build_info: dict
+ The following keys are allowed:
+
+ * depends
+ * macros
+ * include_dirs
+ * extra_compiler_args
+ * f2py_options
+ * language
"""
+ self._add_library(name, sources, None, build_info)
+
+ dist = self.get_distribution()
+ if dist is not None:
+ self.warn('distutils distribution has been initialized,'\
+ ' it may be too late to add a library '+ name)
+
+ def _add_library(self, name, sources, install_dir, build_info):
+ """Common implementation for add_library and add_installed_library. Do
+ not use directly"""
build_info = copy.copy(build_info)
name = name #+ '__OF__' + self.name
build_info['sources'] = sources
@@ -1183,12 +1404,116 @@ class Configuration(object):
self._fix_paths_dict(build_info)
- self.libraries.append((name,build_info))
+ # Add to libraries list so that it is build with build_clib
+ self.libraries.append((name, build_info))
+
+ def add_installed_library(self, name, sources, install_dir, build_info=None):
+ """Similar to add_library, but the corresponding library is installed.
+
+ Most C libraries are only used to build python extensions, but
+ libraries built through this method will be installed so that they can
+ be reused by third-party. install_dir is relative to the current
+ subpackage.
+
+ Parameters
+ ----------
+ name: str
+ name of the installed library
+ sources: seq
+ list of source files of the library
+ install_dir: str
+ path where to install the library (relatively to the current
+ sub-package)
+
+ See also
+ --------
+ add_library, add_npy_pkg_config, get_info
+
+ Notes
+ -----
+ The best way to encode the necessary options to link against those C
+ libraries is to use a libname.ini file, and use get_info to retrieve
+ those informations (see add_npy_pkg_config method for more
+ information).
+ """
+ if not build_info:
+ build_info = {}
+
+ install_dir = os.path.join(self.package_path, install_dir)
+ self._add_library(name, sources, install_dir, build_info)
+ self.installed_libraries.append(InstallableLib(name, build_info, install_dir))
+
+ def add_npy_pkg_config(self, template, install_dir, subst_dict=None):
+ """Generate a npy-pkg config file from the template, and install it in
+ given install directory, using subst_dict for variable substitution.
+
+ Parameters
+ ----------
+ template: str
+ the path of the template, relatively to the current package path
+ install_dir: str
+ where to install the npy-pkg config file, relatively to the current
+ package path
+ subst_dict: dict (None by default)
+ if given, any string of the form @key@ will be replaced by
+ subst_dict[key] in the template file when installed. The install
+ prefix is always available through the variable @prefix@, since the
+ install prefix is not easy to get reliably from setup.py.
+
+ See also
+ --------
+ add_installed_library, get_info
+
+ Notes
+ -----
+ This works for both standard installs and in-place builds, i.e. the
+ @prefix@ refer to the source directory for in-place builds.
+
+ Examples
+ --------
+ config.add_npy_pkg_config('foo.ini.in', 'lib', {'foo': bar})
+
+ Assuming the foo.ini.in file has the following content::
+
+ [meta]
+ Name=@foo@
+ Version=1.0
+ Description=dummy description
+
+ [default]
+ Cflags=-I@prefix@/include
+ Libs=
+
+ The generated file will have the following content::
+
+ [meta]
+ Name=bar
+ Version=1.0
+ Description=dummy description
+
+ [default]
+ Cflags=-Iprefix_dir/include
+ Libs=
+
+ and will be installed as foo.ini in the 'lib' subpath.
+ """
+ if subst_dict is None:
+ subst_dict = {}
+ basename = os.path.splitext(template)[0]
+ template = os.path.join(self.package_path, template)
+
+ if self.installed_pkg_config.has_key(self.name):
+ self.installed_pkg_config[self.name].append((template, install_dir,
+ subst_dict))
+ else:
+ self.installed_pkg_config[self.name] = [(template, install_dir,
+ subst_dict)]
- dist = self.get_distribution()
- if dist is not None:
- self.warn('distutils distribution has been initialized,'\
- ' it may be too late to add a library '+ name)
+ def add_scons_installed_library(self, name, install_dir):
+ """Add an scons-built installable library to distutils.
+ """
+ install_dir = os.path.join(self.package_path, install_dir)
+ self.installed_libraries.append(InstallableLib(name, {}, install_dir))
def add_sconscript(self, sconscript, subpackage_path=None,
standalone = False, pre_hook = None,
@@ -1240,6 +1565,10 @@ class Configuration(object):
def add_scripts(self,*files):
"""Add scripts to configuration.
+
+ Add the sequence of files to the beginning of the scripts list.
+ Scripts will be installed under the <prefix>/bin/ directory.
+
"""
scripts = self.paths(files)
dist = self.get_distribution()
@@ -1287,6 +1616,9 @@ class Configuration(object):
return s
def get_config_cmd(self):
+ """
+ Returns the numpy.distutils config command instance.
+ """
cmd = get_cmd('config')
cmd.ensure_finalized()
cmd.dump_source = 0
@@ -1298,14 +1630,24 @@ class Configuration(object):
return cmd
def get_build_temp_dir(self):
+ """
+ Return a path to a temporary directory where temporary files should be
+ placed.
+ """
cmd = get_cmd('build')
cmd.ensure_finalized()
return cmd.build_temp
def have_f77c(self):
"""Check for availability of Fortran 77 compiler.
+
Use it inside source generating function to ensure that
setup distribution instance has been initialized.
+
+ Notes
+ -----
+ True if a Fortran 77 compiler is available (because a simple Fortran 77
+ code was able to be compiled successfully).
"""
simple_fortran_subroutine = '''
subroutine simple
@@ -1317,8 +1659,14 @@ class Configuration(object):
def have_f90c(self):
"""Check for availability of Fortran 90 compiler.
+
Use it inside source generating function to ensure that
setup distribution instance has been initialized.
+
+ Notes
+ -----
+ True if a Fortran 90 compiler is available (because a simple Fortran
+ 90 code was able to be compiled successfully)
"""
simple_fortran_subroutine = '''
subroutine simple
@@ -1378,6 +1726,16 @@ class Configuration(object):
def get_version(self, version_file=None, version_variable=None):
"""Try to get version string of a package.
+
+ Return a version string of the current package or None if the version
+ information could not be detected.
+
+ Notes
+ -----
+ This method scans files named
+ __version__.py, <packagename>_version.py, version.py, and
+ __svn_version__.py for string variables version, __version\__, and
+ <packagename>_version, until a version number is found.
"""
version = getattr(self,'version',None)
if version is not None:
@@ -1431,11 +1789,20 @@ class Configuration(object):
return version
def make_svn_version_py(self, delete=True):
- """Generate package __svn_version__.py file from SVN revision number,
+ """Appends a data function to the data_files list that will generate
+ __svn_version__.py file to the current package directory.
+
+ Generate package __svn_version__.py file from SVN revision number,
it will be removed after python exits but will be available
when sdist, etc commands are executed.
+ Notes
+ -----
If __svn_version__.py existed before, nothing is done.
+
+ This is
+ intended for working with source directories that are in an SVN
+ repository.
"""
target = njoin(self.local_path,'__svn_version__.py')
revision = self._get_svn_revision(self.local_path)
@@ -1467,6 +1834,10 @@ class Configuration(object):
def make_config_py(self,name='__config__'):
"""Generate package __config__.py file containing system_info
information used during building the package.
+
+ This file is installed to the
+ package installation directory.
+
"""
self.py_modules.append((self.name,name,generate_config_py))
@@ -1478,6 +1849,9 @@ class Configuration(object):
def get_info(self,*names):
"""Get resources information.
+
+ Return information (from system_info.get_info) for all of the names in
+ the argument list in a single dictionary.
"""
from system_info import get_info, dict_append
info_dict = {}
@@ -1507,6 +1881,90 @@ def get_numpy_include_dirs():
# else running numpy/core/setup.py
return include_dirs
+def get_npy_pkg_dir():
+ """Return the path where to find the npy-pkg-config directory."""
+ # XXX: import here for bootstrapping reasons
+ import numpy
+ d = os.path.join(os.path.dirname(numpy.__file__),
+ 'core', 'lib', 'npy-pkg-config')
+ return d
+
+def get_pkg_info(pkgname, dirs=None):
+ """Given a clib package name, returns a info dict with the necessary
+ options to use the clib.
+
+ Parameters
+ ----------
+ pkgname: str
+ name of the package (should match the name of the .ini file, without
+ the extension, e.g. foo for the file foo.ini)
+ dirs: seq {None}
+ if given, should be a sequence of additional directories where to look
+ for npy-pkg-config files. Those directories are search prior to the
+ numpy one.
+
+ Note
+ ----
+ Raise a numpy.distutils.PkgNotFound exception if the package is not
+ found.
+
+ See Also
+ --------
+ add_npy_pkg_info, add_installed_library, get_info
+ """
+ from numpy.distutils.npy_pkg_config import read_config
+
+ if dirs:
+ dirs.append(get_npy_pkg_dir())
+ else:
+ dirs = [get_npy_pkg_dir()]
+ return read_config(pkgname, dirs)
+
+def get_info(pkgname, dirs=None):
+ """Given a clib package name, returns a info dict with the necessary
+ options to use the clib.
+
+ Parameters
+ ----------
+ pkgname: str
+ name of the package (should match the name of the .ini file, without
+ the extension, e.g. foo for the file foo.ini)
+ dirs: seq {None}
+ if given, should be a sequence of additional directories where to look
+ for npy-pkg-config files. Those directories are search prior to the
+ numpy one.
+
+ Note
+ ----
+ Raise a numpy.distutils.PkgNotFound exception if the package is not
+ found.
+
+ See Also
+ --------
+ add_npy_pkg_info, add_installed_library, get_pkg_info
+
+ Example
+ -------
+ To get the necessary informations for the npymath library from NumPy:
+
+ >>> npymath_info = get_info('npymath')
+ >>> config.add_extension('foo', sources=['foo.c'], extra_info=npymath_info)
+ """
+ from numpy.distutils.npy_pkg_config import parse_flags
+ pkg_info = get_pkg_info(pkgname, dirs)
+
+ # Translate LibraryInfo instance into a build_info dict
+ info = parse_flags(pkg_info.cflags())
+ for k, v in parse_flags(pkg_info.libs()).items():
+ info[k].extend(v)
+
+ # add_extension extra_info argument is ANAL
+ info['define_macros'] = info['macros']
+ del info['macros']
+ del info['ignored']
+
+ return info
+
def is_bootstrapping():
import __builtin__
try:
diff --git a/numpy/distutils/npy_pkg_config.py b/numpy/distutils/npy_pkg_config.py
new file mode 100644
index 000000000..2e42ee63a
--- /dev/null
+++ b/numpy/distutils/npy_pkg_config.py
@@ -0,0 +1,306 @@
+from ConfigParser import SafeConfigParser, NoOptionError
+import re
+import os
+import shlex
+
+__all__ = ['FormatError', 'PkgNotFound', 'LibraryInfo', 'VariableSet',
+ 'read_config', 'parse_flags']
+
+_VAR = re.compile('\$\{([a-zA-Z0-9_-]+)\}')
+
+class FormatError(IOError):
+ def __init__(self, msg):
+ self.msg = msg
+
+ def __str__(self):
+ return self.msg
+
+class PkgNotFound(IOError):
+ def __init__(self, msg):
+ self.msg = msg
+
+ def __str__(self):
+ return self.msg
+
+def parse_flags(line):
+ lexer = shlex.shlex(line)
+ lexer.whitespace_split = True
+
+ d = {'include_dirs': [], 'library_dirs': [], 'libraries': [],
+ 'macros': [], 'ignored': []}
+ def next_token(t):
+ if t.startswith('-I'):
+ if len(t) > 2:
+ d['include_dirs'].append(t[2:])
+ else:
+ t = lexer.get_token()
+ d['include_dirs'].append(t)
+ elif t.startswith('-L'):
+ if len(t) > 2:
+ d['library_dirs'].append(t[2:])
+ else:
+ t = lexer.get_token()
+ d['library_dirs'].append(t)
+ elif t.startswith('-l'):
+ d['libraries'].append(t[2:])
+ elif t.startswith('-D'):
+ d['macros'].append(t[2:])
+ else:
+ d['ignored'].append(t)
+ return lexer.get_token()
+
+ t = lexer.get_token()
+ while t:
+ t = next_token(t)
+
+ return d
+
+class LibraryInfo(object):
+ def __init__(self, name, description, version, sections, vars, requires=None):
+ self.name = name
+ self.description = description
+ if requires:
+ self.requires = requires
+ else:
+ self.requires = []
+ self.version = version
+ self._sections = sections
+ self.vars = vars
+
+ def sections(self):
+ return self._sections.keys()
+
+ def cflags(self, section="default"):
+ return self.vars.interpolate(self._sections[section]['cflags'])
+
+ def libs(self, section="default"):
+ return self.vars.interpolate(self._sections[section]['libs'])
+
+ def __str__(self):
+ m = ['Name: %s' % self.name]
+ m.append('Description: %s' % self.description)
+ if self.requires:
+ m.append('Requires:')
+ else:
+ m.append('Requires: %s' % ",".join(self.requires))
+ m.append('Version: %s' % self.version)
+
+ return "\n".join(m)
+
+class VariableSet(object):
+ def __init__(self, d):
+ self._raw_data = dict([(k, v) for k, v in d.items()])
+
+ self._re = {}
+ self._re_sub = {}
+
+ self._init_parse()
+
+ def _init_parse(self):
+ for k, v in self._raw_data.items():
+ self._init_parse_var(k, v)
+
+ def _init_parse_var(self, name, value):
+ self._re[name] = re.compile(r'\$\{%s\}' % name)
+ self._re_sub[name] = value
+
+ def interpolate(self, value):
+ # Brute force: we keep interpolating until there is no '${var}' anymore
+ # or until interpolated string is equal to input string
+ def _interpolate(value):
+ for k in self._re.keys():
+ value = self._re[k].sub(self._re_sub[k], value)
+ return value
+ while _VAR.search(value):
+ nvalue = _interpolate(value)
+ if nvalue == value:
+ break
+ value = nvalue
+
+ return value
+
+ def variables(self):
+ return self._raw_data.keys()
+
+ # Emulate a dict to set/get variables values
+ def __getitem__(self, name):
+ return self._raw_data[name]
+
+ def __setitem__(self, name, value):
+ self._raw_data[name] = value
+ self._init_parse_var(name, value)
+
+def parse_meta(config):
+ if not config.has_section('meta'):
+ raise FormatError("No meta section found !")
+
+ d = {}
+ for name, value in config.items('meta'):
+ d[name] = value
+
+ for k in ['name', 'description', 'version']:
+ if not d.has_key(k):
+ raise FormatError("Option %s (section [meta]) is mandatory, "
+ "but not found" % k)
+
+ if not d.has_key('requires'):
+ d['requires'] = []
+
+ return d
+
+def parse_variables(config):
+ if not config.has_section('variables'):
+ raise FormatError("No variables section found !")
+
+ d = {}
+
+ for name, value in config.items("variables"):
+ d[name] = value
+
+ return VariableSet(d)
+
+def parse_sections(config):
+ return meta_d, r
+
+def pkg_to_filename(pkg_name):
+ return "%s.ini" % pkg_name
+
+def parse_config(filename, dirs=None):
+ if dirs:
+ filenames = [os.path.join(d, filename) for d in dirs]
+ else:
+ filenames = [filename]
+
+ config = SafeConfigParser()
+ n = config.read(filenames)
+ if not len(n) >= 1:
+ raise PkgNotFound("Could not find file(s) %s" % str(filenames))
+
+ # Parse meta and variables sections
+ meta = parse_meta(config)
+
+ vars = {}
+ if config.has_section('variables'):
+ for name, value in config.items("variables"):
+ vars[name] = value.replace("\\", "\\\\")
+
+ # Parse "normal" sections
+ secs = [s for s in config.sections() if not s in ['meta', 'variables']]
+ sections = {}
+
+ requires = {}
+ for s in secs:
+ d = {}
+ if config.has_option(s, "requires"):
+ requires[s] = config.get(s, 'requires')
+
+ for name, value in config.items(s):
+ d[name] = value
+ sections[s] = d
+
+ return meta, vars, sections, requires
+
+def _read_config_imp(filenames, dirs=None):
+ def _read_config(f):
+ meta, vars, sections, reqs = parse_config(f, dirs)
+ # recursively add sections and variables of required libraries
+ for rname, rvalue in reqs.items():
+ nmeta, nvars, nsections, nreqs = _read_config(pkg_to_filename(rvalue))
+
+ # Update var dict for variables not in 'top' config file
+ for k, v in nvars.items():
+ if not vars.has_key(k):
+ vars[k] = v
+
+ # Update sec dict
+ for oname, ovalue in nsections[rname].items():
+ sections[rname][oname] += ' %s' % ovalue
+
+ return meta, vars, sections, reqs
+
+ meta, vars, sections, reqs = _read_config(filenames)
+
+ return LibraryInfo(name=meta["name"], description=meta["description"],
+ version=meta["version"], sections=sections, vars=VariableSet(vars))
+
+# Trivial cache to cache LibraryInfo instances creation. To be really
+# efficient, the cache should be handled in read_config, since a same file can
+# be parsed many time outside LibraryInfo creation, but I doubt this will be a
+# problem in practice
+_CACHE = {}
+def read_config(pkgname, dirs=None):
+ try:
+ return _CACHE[pkgname]
+ except KeyError:
+ v = _read_config_imp(pkg_to_filename(pkgname), dirs)
+ _CACHE[pkgname] = v
+ return v
+
+# TODO:
+# - implements version comparison (modversion + atleast)
+
+# pkg-config simple emulator - useful for debugging, and maybe later to query
+# the system
+if __name__ == '__main__':
+ import sys
+ from optparse import OptionParser
+ import glob
+
+ parser = OptionParser()
+ parser.add_option("--cflags", dest="cflags", action="store_true",
+ help="output all preprocessor and compiler flags")
+ parser.add_option("--libs", dest="libs", action="store_true",
+ help="output all linker flags")
+ parser.add_option("--use-section", dest="section",
+ help="use this section instead of default for options")
+ parser.add_option("--version", dest="version", action="store_true",
+ help="output version")
+ parser.add_option("--atleast-version", dest="min_version",
+ help="Minimal version")
+ parser.add_option("--list-all", dest="list_all", action="store_true",
+ help="Minimal version")
+ parser.add_option("--define-variable", dest="define_variable",
+ help="Replace variable with the given value")
+
+ (options, args) = parser.parse_args(sys.argv)
+
+ if len(args) < 2:
+ raise ValueError("Expect package name on the command line:")
+
+ if options.list_all:
+ files = glob.glob("*.ini")
+ for f in files:
+ info = read_config(f)
+ print "%s\t%s - %s" % (info.name, info.name, info.description)
+
+ pkg_name = args[1]
+ import os
+ d = os.environ.get('NPY_PKG_CONFIG_PATH')
+ if d:
+ info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.', d])
+ else:
+ info = read_config(pkg_name, ['numpy/core/lib/npy-pkg-config', '.'])
+
+ if options.section:
+ section = options.section
+ else:
+ section = "default"
+
+ if options.define_variable:
+ m = re.search('([\S]+)=([\S]+)', options.define_variable)
+ if not m:
+ raise ValueError("--define-variable option should be of " \
+ "the form --define-variable=foo=bar")
+ else:
+ name = m.group(1)
+ value = m.group(2)
+ info.vars[name] = value
+
+ if options.cflags:
+ print info.cflags(section)
+ if options.libs:
+ print info.libs(section)
+ if options.version:
+ print info.version
+ if options.min_version:
+ print info.version >= options.min_version
diff --git a/numpy/distutils/numpy_distribution.py b/numpy/distutils/numpy_distribution.py
index 681b8b316..4424e34cd 100644
--- a/numpy/distutils/numpy_distribution.py
+++ b/numpy/distutils/numpy_distribution.py
@@ -7,6 +7,10 @@ class NumpyDistribution(Distribution):
def __init__(self, attrs = None):
# A list of (sconscripts, pre_hook, post_hook, src, parent_names)
self.scons_data = []
+ # A list of installable libraries
+ self.installed_libraries = []
+ # A dict of pkg_config files to generate/install
+ self.installed_pkg_config = {}
Distribution.__init__(self, attrs)
def has_scons_scripts(self):
diff --git a/numpy/distutils/tests/test_npy_pkg_config.py b/numpy/distutils/tests/test_npy_pkg_config.py
new file mode 100644
index 000000000..5553aa878
--- /dev/null
+++ b/numpy/distutils/tests/test_npy_pkg_config.py
@@ -0,0 +1,96 @@
+import os
+from tempfile import mkstemp
+
+from numpy.testing import *
+from numpy.distutils.npy_pkg_config import read_config, parse_flags
+
+simple = """\
+[meta]
+Name = foo
+Description = foo lib
+Version = 0.1
+
+[default]
+cflags = -I/usr/include
+libs = -L/usr/lib
+"""
+simple_d = {'cflags': '-I/usr/include', 'libflags': '-L/usr/lib',
+ 'version': '0.1', 'name': 'foo'}
+
+simple_variable = """\
+[meta]
+Name = foo
+Description = foo lib
+Version = 0.1
+
+[variables]
+prefix = /foo/bar
+libdir = ${prefix}/lib
+includedir = ${prefix}/include
+
+[default]
+cflags = -I${includedir}
+libs = -L${libdir}
+"""
+simple_variable_d = {'cflags': '-I/foo/bar/include', 'libflags': '-L/foo/bar/lib',
+ 'version': '0.1', 'name': 'foo'}
+
+class TestLibraryInfo(TestCase):
+ def test_simple(self):
+ fd, filename = mkstemp('foo.ini')
+ try:
+ pkg = os.path.splitext(filename)[0]
+ try:
+ os.write(fd, simple)
+ finally:
+ os.close(fd)
+
+ out = read_config(pkg)
+ self.failUnless(out.cflags() == simple_d['cflags'])
+ self.failUnless(out.libs() == simple_d['libflags'])
+ self.failUnless(out.name == simple_d['name'])
+ self.failUnless(out.version == simple_d['version'])
+ finally:
+ os.remove(filename)
+
+ def test_simple_variable(self):
+ fd, filename = mkstemp('foo.ini')
+ try:
+ pkg = os.path.splitext(filename)[0]
+ try:
+ os.write(fd, simple_variable)
+ finally:
+ os.close(fd)
+
+ out = read_config(pkg)
+ self.failUnless(out.cflags() == simple_variable_d['cflags'])
+ self.failUnless(out.libs() == simple_variable_d['libflags'])
+ self.failUnless(out.name == simple_variable_d['name'])
+ self.failUnless(out.version == simple_variable_d['version'])
+
+ out.vars['prefix'] = '/Users/david'
+ self.failUnless(out.cflags() == '-I/Users/david/include')
+ finally:
+ os.remove(filename)
+
+class TestParseFlags(TestCase):
+ def test_simple_cflags(self):
+ d = parse_flags("-I/usr/include")
+ self.failUnless(d['include_dirs'] == ['/usr/include'])
+
+ d = parse_flags("-I/usr/include -DFOO")
+ self.failUnless(d['include_dirs'] == ['/usr/include'])
+ self.failUnless(d['macros'] == ['FOO'])
+
+ d = parse_flags("-I /usr/include -DFOO")
+ self.failUnless(d['include_dirs'] == ['/usr/include'])
+ self.failUnless(d['macros'] == ['FOO'])
+
+ def test_simple_lflags(self):
+ d = parse_flags("-L/usr/lib -lfoo -L/usr/lib -lbar")
+ self.failUnless(d['library_dirs'] == ['/usr/lib', '/usr/lib'])
+ self.failUnless(d['libraries'] == ['foo', 'bar'])
+
+ d = parse_flags("-L /usr/lib -lfoo -L/usr/lib -lbar")
+ self.failUnless(d['library_dirs'] == ['/usr/lib', '/usr/lib'])
+ self.failUnless(d['libraries'] == ['foo', 'bar'])
diff --git a/numpy/doc/constants.py b/numpy/doc/constants.py
index 22e353b0e..154c74621 100644
--- a/numpy/doc/constants.py
+++ b/numpy/doc/constants.py
@@ -64,9 +64,9 @@ add_newdoc('numpy', 'NAN',
See Also
--------
- isnan : Shows which elements are Not a Number.
- isfinite : Shows which elements are finite (not one of
- Not a Number, positive infinity and negative infinity)
+ isnan: Shows which elements are Not a Number.
+
+ isfinite: Shows which elements are finite (not one of Not a Number, positive infinity and negative infinity)
Notes
-----
@@ -182,8 +182,8 @@ add_newdoc('numpy', 'NaN',
--------
isnan : Shows which elements are Not a Number.
- isfinite : Shows which elements are finite (not one of
- Not a Number, positive infinity and negative infinity)
+
+ isfinite : Shows which elements are finite (not one of Not a Number, positive infinity and negative infinity)
Notes
-----
diff --git a/numpy/doc/creation.py b/numpy/doc/creation.py
index 133765678..d57c7c261 100644
--- a/numpy/doc/creation.py
+++ b/numpy/doc/creation.py
@@ -76,7 +76,7 @@ generally will not do for arbitrary start, stop, and step values.
indices() will create a set of arrays (stacked as a one-higher dimensioned
array), one per dimension with each representing variation in that dimension.
-An examples illustrates much better than a verbal description: ::
+An example illustrates much better than a verbal description: ::
>>> np.indices((3,3))
array([[[0, 0, 0], [1, 1, 1], [2, 2, 2]], [[0, 1, 2], [0, 1, 2], [0, 1, 2]]])
diff --git a/numpy/doc/subclassing.py b/numpy/doc/subclassing.py
index bbd44f8ee..de0338060 100644
--- a/numpy/doc/subclassing.py
+++ b/numpy/doc/subclassing.py
@@ -11,59 +11,133 @@ Gerard-Marchant - http://www.scipy.org/Subclasses.
Introduction
------------
-Subclassing ndarray is relatively simple, but you will need to
-understand some behavior of ndarrays to understand some minor
-complications to subclassing. There are examples at the bottom of the
-page, but you will probably want to read the background to understand
-why subclassing works as it does.
+
+Subclassing ndarray is relatively simple, but it has some complications
+compared to other Python objects. On this page we explain the machinery
+that allows you to subclass ndarray, and the implications for
+implementing a subclass.
ndarrays and object creation
============================
-The creation of ndarrays is complicated by the need to return views of
-ndarrays, that are also ndarrays. For example:
+
+Subclassing ndarray is complicated by the fact that new instances of
+ndarray classes can come about in three different ways. These are:
+
+#. Explicit constructor call - as in ``MySubClass(params)``. This is
+ the usual route to Python instance creation.
+#. View casting - casting an existing ndarray as a given subclass
+#. New from template - creating a new instance from a template
+ instance. Examples include returning slices from a subclassed array,
+ creating return types from ufuncs, and copying arrays. See
+ :ref:`new-from-template` for more details
+
+The last two are characteristics of ndarrays - in order to support
+things like array slicing. The complications of subclassing ndarray are
+due to the mechanisms numpy has to support these latter two routes of
+instance creation.
+
+.. _view-casting:
+
+View casting
+------------
+
+*View casting* is the standard ndarray mechanism by which you take an
+ndarray of any subclass, and return a view of the array as another
+(specified) subclass:
>>> import numpy as np
+>>> # create a completely useless ndarray subclass
+>>> class C(np.ndarray): pass
+>>> # create a standard ndarray
>>> arr = np.zeros((3,))
->>> type(arr)
-<type 'numpy.ndarray'>
->>> v = arr[1:]
->>> type(v)
-<type 'numpy.ndarray'>
->>> v is arr
+>>> # take a view of it, as our useless subclass
+>>> c_arr = arr.view(C)
+>>> type(c_arr)
+<class 'C'>
+
+.. _new-from-template:
+
+Creating new from template
+--------------------------
+
+New instances of an ndarray subclass can also come about by a very
+similar mechanism to :ref:`view-casting`, when numpy finds it needs to
+create a new instance from a template instance. The most obvious place
+this has to happen is when you are taking slices of subclassed arrays.
+For example:
+
+>>> v = c_arr[1:]
+>>> type(v) # the view is of type 'C'
+<class 'C'>
+>>> v is c_arr # but it's a new instance
False
-So, when we take a view (here a slice) from the ndarray, we return a
-new ndarray, that points to the data in the original. When we
-subclass ndarray, taking a view (such as a slice) needs to return an
-object of our own class. There is machinery to do this, but it is
-this machinery that makes subclassing slightly non-standard.
+The slice is a *view* onto the original ``c_arr`` data. So, when we
+take a view from the ndarray, we return a new ndarray, of the same
+class, that points to the data in the original.
-To allow subclassing, and views of subclasses, ndarray uses the
-ndarray ``__new__`` method for the main work of object initialization,
-rather then the more usual ``__init__`` method.
+There are other points in the use of ndarrays where we need such views,
+such as copying arrays (``c_arr.copy()``), creating ufunc output arrays
+(see also :ref:`array-wrap`), and reducing methods (like
+``c_arr.mean()``.
-``__new__`` and ``__init__``
-============================
+Relationship of view casting and new-from-template
+--------------------------------------------------
+
+These paths both use the same machinery. We make the distinction here,
+because they result in different input to your methods. Specifically,
+:ref:`view-casting` means you have created a new instance of your array
+type from any potential subclass of ndarray. :ref:`new-from-template`
+means you have created a new instance of your class from a pre-existing
+instance, allowing you - for example - to copy across attributes that
+are particular to your subclass.
+
+Implications for subclassing
+----------------------------
-``__new__`` is a standard python method, and, if present, is called
-before ``__init__`` when we create a class instance. Consider the
-following::
+If we subclass ndarray, we need to deal not only with explicit
+construction of our array type, but also :ref:`view-casting` or
+:ref:`new-from-template`. Numpy has the machinery to do this, and this
+machinery that makes subclassing slightly non-standard.
+
+There are two aspects to the machinery that ndarray uses to support
+views and new-from-template in subclasses.
+
+The first is the use of the ``ndarray.__new__`` method for the main work
+of object initialization, rather then the more usual ``__init__``
+method. The second is the use of the ``__array_finalize__`` method to
+allow subclasses to clean up after the creation of views and new
+instances from templates.
+
+A brief Python primer on ``__new__`` and ``__init__``
+=====================================================
+
+``__new__`` is a standard Python method, and, if present, is called
+before ``__init__`` when we create a class instance. See the `python
+__new__ documentation
+<http://docs.python.org/reference/datamodel.html#object.__new__>`_ for more detail.
+
+For example, consider the following Python code:
+
+.. testcode::
class C(object):
def __new__(cls, *args):
+ print 'Cls in __new__:', cls
print 'Args in __new__:', args
return object.__new__(cls, *args)
+
def __init__(self, *args):
+ print 'type(self) in __init__:', type(self)
print 'Args in __init__:', args
- C('hello')
-
-The code gives the following output::
+meaning that we get:
- cls is: <class '__main__.C'>
- Args in __new__: ('hello',)
- self is : <__main__.C object at 0xb7dc720c>
- Args in __init__: ('hello',)
+>>> c = C('hello')
+Cls in __new__: <class 'C'>
+Args in __new__: ('hello',)
+type(self) in __init__: <class 'C'>
+Args in __init__: ('hello',)
When we call ``C('hello')``, the ``__new__`` method gets its own class
as first argument, and the passed argument, which is the string
@@ -79,34 +153,29 @@ done in the ``__new__`` method.
Why use ``__new__`` rather than just the usual ``__init__``? Because
in some cases, as for ndarray, we want to be able to return an object
-of some other class. Consider the following::
+of some other class. Consider the following:
- class C(object):
- def __new__(cls, *args):
- print 'cls is:', cls
- print 'Args in __new__:', args
- return object.__new__(cls, *args)
- def __init__(self, *args):
- print 'self is :', self
- print 'Args in __init__:', args
+.. testcode::
class D(C):
def __new__(cls, *args):
print 'D cls is:', cls
print 'D args in __new__:', args
return C.__new__(C, *args)
- def __init__(self, *args):
- print 'D self is :', self
- print 'D args in __init__:', args
- D('hello')
+ def __init__(self, *args):
+ # we never get here
+ print 'In D __init__'
-which gives::
+meaning that:
- D cls is: <class '__main__.D'>
- D args in __new__: ('hello',)
- cls is: <class '__main__.C'>
- Args in __new__: ('hello',)
+>>> obj = D('hello')
+D cls is: <class 'D'>
+D args in __new__: ('hello',)
+Cls in __new__: <class 'C'>
+Args in __new__: ('hello',)
+>>> type(obj)
+<class 'C'>
The definition of ``C`` is the same as before, but for ``D``, the
``__new__`` method returns an instance of class ``C`` rather than
@@ -133,21 +202,107 @@ this way, in its standard methods for taking views, but the ndarray
why not call ``obj = subdtype.__new__(...`` then? Because we may not
have a ``__new__`` method with the same call signature).
-So, when creating a new view object of our subclass, we need to be
-able to set any extra attributes from the original object of our
-class. This is the role of the ``__array_finalize__`` method of
-ndarray. ``__array_finalize__`` is called from within the
-ndarray machinery, each time we create an ndarray of our own class,
-and passes in the new view object, created as above, as well as the
-old object from which the view has been taken. In it we can take any
-attributes from the old object and put then into the new view object,
-or do any other related processing. Now we are ready for a simple
-example.
+The role of ``__array_finalize__``
+==================================
+
+``__array_finalize__`` is the mechanism that numpy provides to allow
+subclasses to handle the various ways that new instances get created.
+
+Remember that subclass instances can come about in these three ways:
+
+#. explicit constructor call (``obj = MySubClass(params)``). This will
+ call the usual sequence of ``MySubClass.__new__`` then (if it exists)
+ ``MySubClass.__init__``.
+#. :ref:`view-casting`
+#. :ref:`new-from-template`
+
+Our ``MySubClass.__new__`` method only gets called in the case of the
+explicit constructor call, so we can't rely on ``MySubClass.__new__`` or
+``MySubClass.__init__`` to deal with the view casting and
+new-from-template. It turns out that ``MySubClass.__array_finalize__``
+*does* get called for all three methods of object creation, so this is
+where our object creation housekeeping usually goes.
+
+* For the explicit constructor call, our subclass will need to create a
+ new ndarray instance of its own class. In practice this means that
+ we, the authors of the code, will need to make a call to
+ ``ndarray.__new__(MySubClass,...)``, or do view casting of an existing
+ array (see below)
+* For view casting and new-from-template, the equivalent of
+ ``ndarray.__new__(MySubClass,...`` is called, at the C level.
+
+The arguments that ``__array_finalize__`` recieves differ for the three
+methods of instance creation above.
+
+The following code allows us to look at the call sequences and arguments:
+
+.. testcode::
+
+ import numpy as np
+
+ class C(np.ndarray):
+ def __new__(cls, *args, **kwargs):
+ print 'In __new__ with class %s' % cls
+ return np.ndarray.__new__(cls, *args, **kwargs)
+
+ def __init__(self, *args, **kwargs):
+ # in practice you probably will not need or want an __init__
+ # method for your subclass
+ print 'In __init__ with class %s' % self.__class__
+
+ def __array_finalize__(self, obj):
+ print 'In array_finalize:'
+ print ' self type is %s' % type(self)
+ print ' obj type is %s' % type(obj)
+
+
+Now:
+
+>>> # Explicit constructor
+>>> c = C((10,))
+In __new__ with class <class 'C'>
+In array_finalize:
+ self type is <class 'C'>
+ obj type is <type 'NoneType'>
+In __init__ with class <class 'C'>
+>>> # View casting
+>>> a = np.arange(10)
+>>> cast_a = a.view(C)
+In array_finalize:
+ self type is <class 'C'>
+ obj type is <type 'numpy.ndarray'>
+>>> # Slicing (example of new-from-template)
+>>> cv = c[:1]
+In array_finalize:
+ self type is <class 'C'>
+ obj type is <class 'C'>
+
+The signature of ``__array_finalize__`` is::
+
+ def __array_finalize__(self, obj):
+
+``ndarray.__new__`` passes ``__array_finalize__`` the new object, of our
+own class (``self``) as well as the object from which the view has been
+taken (``obj``). As you can see from the output above, the ``self`` is
+always a newly created instance of our subclass, and the type of ``obj``
+differs for the three instance creation methods:
+
+* When called from the explicit constructor, ``obj`` is ``None``
+* When called from view casting, ``obj`` can be an instance of any
+ subclass of ndarray, including our own.
+* When called in new-from-template, ``obj`` is another instance of our
+ own subclass, that we might use to update the new ``self`` instance.
+
+Because ``__array_finalize__`` is the only method that always sees new
+instances being created, it is the sensible place to fill in instance
+defaults for new object attributes, among other tasks.
+
+This may be clearer with an example.
Simple example - adding an extra attribute to ndarray
-----------------------------------------------------
-::
+.. testcode::
import numpy as np
@@ -156,45 +311,79 @@ Simple example - adding an extra attribute to ndarray
def __new__(subtype, shape, dtype=float, buffer=None, offset=0,
strides=None, order=None, info=None):
# Create the ndarray instance of our type, given the usual
- # input arguments. This will call the standard ndarray
- # constructor, but return an object of our type
+ # ndarray input arguments. This will call the standard
+ # ndarray constructor, but return an object of our type.
+ # It also triggers a call to InfoArray.__array_finalize__
obj = np.ndarray.__new__(subtype, shape, dtype, buffer, offset, strides,
order)
- # add the new attribute to the created instance
+ # set the new 'info' attribute to the value passed
obj.info = info
# Finally, we must return the newly created object:
return obj
- def __array_finalize__(self,obj):
- # reset the attribute from passed original object
+ def __array_finalize__(self, obj):
+ # ``self`` is a new object resulting from
+ # ndarray.__new__(InfoArray, ...), therefore it only has
+ # attributes that the ndarray.__new__ constructor gave it -
+ # i.e. those of a standard ndarray.
+ #
+ # We could have got to the ndarray.__new__ call in 3 ways:
+ # From an explicit constructor - e.g. InfoArray():
+ # obj is None
+ # (we're in the middle of the InfoArray.__new__
+ # constructor, and self.info will be set when we return to
+ # InfoArray.__new__)
+ if obj is None: return
+ # From view casting - e.g arr.view(InfoArray):
+ # obj is arr
+ # (type(obj) can be InfoArray)
+ # From new-from-template - e.g infoarr[:3]
+ # type(obj) is InfoArray
+ #
+ # Note that it is here, rather than in the __new__ method,
+ # that we set the default value for 'info', because this
+ # method sees all creation of default objects - with the
+ # InfoArray.__new__ constructor, but also with
+ # arr.view(InfoArray).
self.info = getattr(obj, 'info', None)
# We do not need to return anything
- obj = InfoArray(shape=(3,), info='information')
- print type(obj)
- print obj.info
- v = obj[1:]
- print type(v)
- print v.info
-which gives::
-
- <class '__main__.InfoArray'>
- information
- <class '__main__.InfoArray'>
- information
-
-This class isn't very useful, because it has the same constructor as
-the bare ndarray object, including passing in buffers and shapes and
-so on. We would probably prefer to be able to take an already formed
-ndarray from the usual numpy calls to ``np.array`` and return an
+Using the object looks like this:
+
+ >>> obj = InfoArray(shape=(3,)) # explicit constructor
+ >>> type(obj)
+ <class 'InfoArray'>
+ >>> obj.info is None
+ True
+ >>> obj = InfoArray(shape=(3,), info='information')
+ >>> obj.info
+ 'information'
+ >>> v = obj[1:] # new-from-template - here - slicing
+ >>> type(v)
+ <class 'InfoArray'>
+ >>> v.info
+ 'information'
+ >>> arr = np.arange(10)
+ >>> cast_arr = arr.view(InfoArray) # view casting
+ >>> type(cast_arr)
+ <class 'InfoArray'>
+ >>> cast_arr.info is None
+ True
+
+This class isn't very useful, because it has the same constructor as the
+bare ndarray object, including passing in buffers and shapes and so on.
+We would probably prefer the constructor to be able to take an already
+formed ndarray from the usual numpy calls to ``np.array`` and return an
object.
Slightly more realistic example - attribute added to existing array
-------------------------------------------------------------------
-Here is a class (with thanks to Pierre GM for the original example),
-that takes array that already exists, casts as our type, and adds an
-extra attribute::
+
+Here is a class that takes a standard ndarray that already exists, casts
+as our type, and adds an extra attribute.
+
+.. testcode::
import numpy as np
@@ -209,77 +398,154 @@ extra attribute::
# Finally, we must return the newly created object:
return obj
- def __array_finalize__(self,obj):
- # reset the attribute from passed original object
+ def __array_finalize__(self, obj):
+ # see InfoArray.__array_finalize__ for comments
+ if obj is None: return
self.info = getattr(obj, 'info', None)
- # We do not need to return anything
- arr = np.arange(5)
- obj = RealisticInfoArray(arr, info='information')
- print type(obj)
- print obj.info
- v = obj[1:]
- print type(v)
- print v.info
-which gives::
+So:
- <class '__main__.RealisticInfoArray'>
- information
- <class '__main__.RealisticInfoArray'>
- information
+ >>> arr = np.arange(5)
+ >>> obj = RealisticInfoArray(arr, info='information')
+ >>> type(obj)
+ <class 'RealisticInfoArray'>
+ >>> obj.info
+ 'information'
+ >>> v = obj[1:]
+ >>> type(v)
+ <class 'RealisticInfoArray'>
+ >>> v.info
+ 'information'
+
+.. _array-wrap:
``__array_wrap__`` for ufuncs
------------------------------
+-------------------------------------------------------
-Let's say you have an instance ``obj`` of your new subclass,
-``RealisticInfoArray``, and you pass it into a ufunc with another
-array::
+``__array_wrap__`` gets called at the end of numpy ufuncs and other numpy
+functions, to allow a subclass to set the type of the return value
+and update attributes and metadata. Let's show how this works with an example.
+First we make the same subclass as above, but with a different name and
+some print statements:
- arr = np.arange(5)
- ret = np.multiply.outer(arr, obj)
+.. testcode::
-When a numpy ufunc is called on a subclass of ndarray, the
-__array_wrap__ method is called to transform the result into a new
-instance of the subclass. By default, __array_wrap__ will call
-__array_finalize__, and the attributes will be inherited.
+ import numpy as np
-By defining a specific __array_wrap__ method for our subclass, we can
-tweak the output. The __array_wrap__ method requires one argument, the
-object on which the ufunc is applied, and an optional parameter
-*context*. This parameter is returned by some ufuncs as a 3-element
-tuple: (name of the ufunc, argument of the ufunc, domain of the
-ufunc). See the masked array subclass for an implementation.
+ class MySubClass(np.ndarray):
-Extra gotchas - custom __del__ methods and ndarray.base
--------------------------------------------------------
-One of the problems that ndarray solves is that of memory ownership of
-ndarrays and their views. Consider the case where we have created an
-ndarray, ``arr`` and then taken a view with ``v = arr[1:]``. If we
-then do ``del v``, we need to make sure that the ``del`` does not
-delete the memory pointed to by the view, because we still need it for
-the original ``arr`` object. Numpy therefore keeps track of where the
-data came from for a particular array or view, with the ``base`` attribute::
+ def __new__(cls, input_array, info=None):
+ obj = np.asarray(input_array).view(cls)
+ obj.info = info
+ return obj
- import numpy as np
+ def __array_finalize__(self, obj):
+ print 'In __array_finalize__:'
+ print ' self is %s' % repr(self)
+ print ' obj is %s' % repr(obj)
+ if obj is None: return
+ self.info = getattr(obj, 'info', None)
- # A normal ndarray, that owns its own data
- arr = np.zeros((4,))
- # In this case, base is None
- assert arr.base is None
- # We take a view
- v1 = arr[1:]
- # base now points to the array that it derived from
- assert v1.base is arr
- # Take a view of a view
- v2 = v1[1:]
- # base points to the view it derived from
- assert v2.base is v1
-
-The assertions all succeed in this case. In general, if the array
-owns its own memory, as for ``arr`` in this case, then ``arr.base``
-will be None - there are some exceptions to this - see the numpy book
-for more details.
+ def __array_wrap__(self, out_arr, context=None):
+ print 'In __array_wrap__:'
+ print ' self is %s' % repr(self)
+ print ' arr is %s' % repr(out_arr)
+ # then just call the parent
+ return np.ndarray.__array_wrap__(self, out_arr, context)
+
+We run a ufunc on an instance of our new array:
+
+>>> obj = MySubClass(np.arange(5), info='spam')
+In __array_finalize__:
+ self is MySubClass([0, 1, 2, 3, 4])
+ obj is array([0, 1, 2, 3, 4])
+>>> arr2 = np.arange(5)+1
+>>> ret = np.add(arr2, obj)
+In __array_wrap__:
+ self is MySubClass([0, 1, 2, 3, 4])
+ arr is array([1, 3, 5, 7, 9])
+In __array_finalize__:
+ self is MySubClass([1, 3, 5, 7, 9])
+ obj is MySubClass([0, 1, 2, 3, 4])
+>>> ret
+MySubClass([1, 3, 5, 7, 9])
+>>> ret.info
+'spam'
+
+Note that the ufunc (``np.add``) has called the ``__array_wrap__`` method of the
+input with the highest ``__array_priority__`` value, in this case
+``MySubClass.__array_wrap__``, with arguments ``self`` as ``obj``, and
+``out_arr`` as the (ndarray) result of the addition. In turn, the
+default ``__array_wrap__`` (``ndarray.__array_wrap__``) has cast the
+result to class ``MySubClass``, and called ``__array_finalize__`` -
+hence the copying of the ``info`` attribute. This has all happened at the C level.
+
+But, we could do anything we wanted:
+
+.. testcode::
+
+ class SillySubClass(np.ndarray):
+
+ def __array_wrap__(self, arr, context=None):
+ return 'I lost your data'
+
+>>> arr1 = np.arange(5)
+>>> obj = arr1.view(SillySubClass)
+>>> arr2 = np.arange(5)
+>>> ret = np.multiply(obj, arr2)
+>>> ret
+'I lost your data'
+
+So, by defining a specific ``__array_wrap__`` method for our subclass,
+we can tweak the output from ufuncs. The ``__array_wrap__`` method
+requires ``self``, then an argument - which is the result of the ufunc -
+and an optional parameter *context*. This parameter is returned by some
+ufuncs as a 3-element tuple: (name of the ufunc, argument of the ufunc,
+domain of the ufunc). ``__array_wrap__`` should return an instance of
+its containing class. See the masked array subclass for an
+implementation.
+
+In addition to ``__array_wrap__``, which is called on the way out of the
+ufunc, there is also an ``__array_prepare__`` method which is called on
+the way into the ufunc, after the output arrays are created but before any
+computation has been performed. The default implementation does nothing
+but pass through the array. ``__array_prepare__`` should not attempt to
+access the array data or resize the array, it is intended for setting the
+output array type, updating attributes and metadata, and performing any
+checks based on the input that may be desired before computation begins.
+Like ``__array_wrap__``, ``__array_prepare__`` must return an ndarray or
+subclass thereof or raise an error.
+
+Extra gotchas - custom ``__del__`` methods and ndarray.base
+-----------------------------------------------------------
+
+One of the problems that ndarray solves is keeping track of memory
+ownership of ndarrays and their views. Consider the case where we have
+created an ndarray, ``arr`` and have taken a slice with ``v = arr[1:]``.
+The two objects are looking at the same memory. Numpy keeps track of
+where the data came from for a particular array or view, with the
+``base`` attribute:
+
+>>> # A normal ndarray, that owns its own data
+>>> arr = np.zeros((4,))
+>>> # In this case, base is None
+>>> arr.base is None
+True
+>>> # We take a view
+>>> v1 = arr[1:]
+>>> # base now points to the array that it derived from
+>>> v1.base is arr
+True
+>>> # Take a view of a view
+>>> v2 = v1[1:]
+>>> # base points to the view it derived from
+>>> v2.base is v1
+True
+
+In general, if the array owns its own memory, as for ``arr`` in this
+case, then ``arr.base`` will be None - there are some exceptions to this
+- see the numpy book for more details.
The ``base`` attribute is useful in being able to tell whether we have
a view or the original array. This in turn can be useful if we need
@@ -289,4 +555,5 @@ the original array is deleted, but not the views. For an example of
how this can work, have a look at the ``memmap`` class in
``numpy.core``.
+
"""
diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py
index 8f4b979c5..9901eb11d 100755
--- a/numpy/f2py/crackfortran.py
+++ b/numpy/f2py/crackfortran.py
@@ -1942,9 +1942,9 @@ def _kind_func(string):
if string[0] in "'\"":
string = string[1:-1]
if real16pattern.match(string):
- return 16
- elif real8pattern.match(string):
return 8
+ elif real8pattern.match(string):
+ return 4
return 'kind('+string+')'
def _selected_int_kind_func(r):
diff --git a/numpy/fft/fftpack.py b/numpy/fft/fftpack.py
index 5fd56246e..fd973a123 100644
--- a/numpy/fft/fftpack.py
+++ b/numpy/fft/fftpack.py
@@ -141,7 +141,6 @@ def fft(a, n=None, axis=-1):
1.14383329e-17 +1.22460635e-16j,
-1.64863782e-15 +1.77635684e-15j])
-
>>> from numpy.fft import fft, fftfreq
>>> import matplotlib.pyplot as plt
>>> t = np.arange(256)
@@ -591,7 +590,6 @@ def fftn(a, s=None, axes=None):
[[-2.+0.j, -2.+0.j, -2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
-
>>> from numpy import meshgrid, pi, arange, sin, cos, log, abs
>>> from numpy.fft import fftn, fftshift
>>> import matplotlib.pyplot as plt
diff --git a/numpy/lib/arraysetops.py b/numpy/lib/arraysetops.py
index c5e7822f2..b8ae9a9f3 100644
--- a/numpy/lib/arraysetops.py
+++ b/numpy/lib/arraysetops.py
@@ -3,40 +3,36 @@ Set operations for 1D numeric arrays based on sorting.
:Contains:
ediff1d,
- unique1d,
+ unique,
intersect1d,
- intersect1d_nu,
setxor1d,
- setmember1d,
- setmember1d_nu,
+ in1d,
union1d,
setdiff1d
+:Deprecated:
+ unique1d,
+ intersect1d_nu,
+ setmember1d
+
:Notes:
-All functions work best with integer numerical arrays on input (e.g. indices).
-For floating point arrays, innacurate results may appear due to usual round-off
+For floating point arrays, inaccurate results may appear due to usual round-off
and floating point comparison issues.
-Except unique1d, union1d and intersect1d_nu, all functions expect inputs with
-unique elements. Speed could be gained in some operations by an implementaion of
-sort(), that can provide directly the permutation vectors, avoiding thus calls
-to argsort().
+Speed could be gained in some operations by an implementation of
+sort(), that can provide directly the permutation vectors, avoiding
+thus calls to argsort().
-Run _test_unique1d_speed() to compare performance of numpy.unique1d() and
-numpy.unique() - it should be the same.
-
-To do: Optionally return indices analogously to unique1d for all functions.
-
-created: 01.11.2005
-last revision: 07.01.2007
+To do: Optionally return indices analogously to unique for all functions.
:Author: Robert Cimrman
"""
__all__ = ['ediff1d', 'unique1d', 'intersect1d', 'intersect1d_nu', 'setxor1d',
- 'setmember1d', 'setmember1d_nu', 'union1d', 'setdiff1d']
+ 'setmember1d', 'union1d', 'setdiff1d', 'unique', 'in1d']
import numpy as np
+from numpy.lib.utils import deprecate_with_doc
def ediff1d(ary, to_end=None, to_begin=None):
"""
@@ -50,7 +46,7 @@ def ediff1d(ary, to_end=None, to_begin=None):
If provided, this number will be tacked onto the end of the returned
differences.
to_begin : number, optional
- If provided, this number will be taked onto the beginning of the
+ If provided, this number will be tacked onto the beginning of the
returned differences.
Returns
@@ -73,26 +69,26 @@ def ediff1d(ary, to_end=None, to_begin=None):
arrays.append(to_end)
if len(arrays) != 1:
- # We'll save ourselves a copy of a potentially large array in the common
- # case where neither to_begin or to_end was given.
+ # We'll save ourselves a copy of a potentially large array in
+ # the common case where neither to_begin or to_end was given.
ed = np.hstack(arrays)
return ed
-def unique1d(ar1, return_index=False, return_inverse=False):
+def unique(ar, return_index=False, return_inverse=False):
"""
Find the unique elements of an array.
Parameters
----------
- ar1 : array_like
+ ar : array_like
This array will be flattened if it is not already 1-D.
return_index : bool, optional
If True, also return the indices against `ar1` that result in the
unique array.
return_inverse : bool, optional
If True, also return the indices against the unique array that
- result in `ar1`.
+ result in `ar`.
Returns
-------
@@ -112,17 +108,17 @@ def unique1d(ar1, return_index=False, return_inverse=False):
Examples
--------
- >>> np.unique1d([1, 1, 2, 2, 3, 3])
+ >>> np.unique([1, 1, 2, 2, 3, 3])
array([1, 2, 3])
>>> a = np.array([[1, 1], [2, 3]])
- >>> np.unique1d(a)
+ >>> np.unique(a)
array([1, 2, 3])
Reconstruct the input from unique values:
- >>> np.unique1d([1,2,6,4,2,3,2], return_index=True)
+ >>> np.unique([1,2,6,4,2,3,2], return_index=True)
>>> x = [1,2,6,4,2,3,2]
- >>> u, i = np.unique1d(x, return_inverse=True)
+ >>> u, i = np.unique(x, return_inverse=True)
>>> u
array([1, 2, 3, 4, 6])
>>> i
@@ -131,14 +127,15 @@ def unique1d(ar1, return_index=False, return_inverse=False):
[1, 2, 6, 4, 2, 3, 2]
"""
- if return_index:
- import warnings
- warnings.warn("The order of the output arguments for "
- "`return_index` has changed. Before, "
- "the output was (indices, unique_arr), but "
- "has now been reversed to be more consistent.")
+ try:
+ ar = ar.flatten()
+ except AttributeError:
+ if not return_inverse and not return_index:
+ items = sorted(set(ar))
+ return np.asarray(items)
+ else:
+ ar = np.asanyarray(ar).flatten()
- ar = np.asanyarray(ar1).flatten()
if ar.size == 0:
if return_inverse and return_index:
return ar, np.empty(0, np.bool), np.empty(0, np.bool)
@@ -166,44 +163,18 @@ def unique1d(ar1, return_index=False, return_inverse=False):
flag = np.concatenate(([True], ar[1:] != ar[:-1]))
return ar[flag]
-def intersect1d(ar1, ar2):
- """
- Intersection returning repeated or unique elements common to both arrays.
-
- Parameters
- ----------
- ar1,ar2 : array_like
- Input arrays.
-
- Returns
- -------
- out : ndarray, shape(N,)
- Sorted 1D array of common elements with repeating elements.
-
- See Also
- --------
- intersect1d_nu : Returns only unique common elements.
- numpy.lib.arraysetops : Module with a number of other functions for
- performing set operations on arrays.
-
- Examples
- --------
- >>> np.intersect1d([1,3,3],[3,1,1])
- array([1, 1, 3, 3])
-
- """
- aux = np.concatenate((ar1,ar2))
- aux.sort()
- return aux[aux[1:] == aux[:-1]]
-def intersect1d_nu(ar1, ar2):
+def intersect1d(ar1, ar2, assume_unique=False):
"""
Intersection returning unique elements common to both arrays.
Parameters
----------
- ar1,ar2 : array_like
+ ar1, ar2 : array_like
Input arrays.
+ assume_unique : bool
+ If True, the input arrays are both assumed to be unique, which
+ can speed up the calculation. Default is False.
Returns
-------
@@ -212,34 +183,34 @@ def intersect1d_nu(ar1, ar2):
See Also
--------
- intersect1d : Returns repeated or unique common elements.
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
Examples
--------
- >>> np.intersect1d_nu([1,3,3],[3,1,1])
+ >>> np.intersect1d([1,3,3], [3,1,1])
array([1, 3])
"""
- # Might be faster than unique1d( intersect1d( ar1, ar2 ) )?
- aux = np.concatenate((unique1d(ar1), unique1d(ar2)))
+ if not assume_unique:
+ # Might be faster than unique( intersect1d( ar1, ar2 ) )?
+ ar1 = unique(ar1)
+ ar2 = unique(ar2)
+ aux = np.concatenate( (ar1, ar2) )
aux.sort()
return aux[aux[1:] == aux[:-1]]
-def setxor1d(ar1, ar2):
+def setxor1d(ar1, ar2, assume_unique=False):
"""
- Set exclusive-or of 1D arrays with unique elements.
-
- Use unique1d() to generate arrays with only unique elements to use as
- inputs to this function.
+ Set exclusive-or of two 1D arrays.
Parameters
----------
- ar1 : array_like
- Input array.
- ar2 : array_like
- Input array.
+ ar1, ar2 : array_like
+ Input arrays.
+ assume_unique : bool
+ If True, the input arrays are both assumed to be unique, which
+ can speed up the calculation. Default is False.
Returns
-------
@@ -252,7 +223,11 @@ def setxor1d(ar1, ar2):
performing set operations on arrays.
"""
- aux = np.concatenate((ar1, ar2))
+ if not assume_unique:
+ ar1 = unique(ar1)
+ ar2 = unique(ar2)
+
+ aux = np.concatenate( (ar1, ar2) )
if aux.size == 0:
return aux
@@ -263,98 +238,68 @@ def setxor1d(ar1, ar2):
flag2 = flag[1:] == flag[:-1]
return aux[flag2]
-def setmember1d(ar1, ar2):
+def in1d(ar1, ar2, assume_unique=False):
"""
- Return a boolean array set True where first element is in second array.
-
- Boolean array is the shape of `ar1` containing True where the elements
- of `ar1` are in `ar2` and False otherwise.
+ Test whether each element of an array is also present in a second array.
- Use unique1d() to generate arrays with only unique elements to use as
- inputs to this function.
+ Returns a boolean array the same length as `ar1` that is True
+ where an element of `ar1` is in `ar2` and False otherwise.
Parameters
----------
- ar1 : array_like
- Input array.
- ar2 : array_like
- Input array.
+ ar1, ar2 : array_like
+ Input arrays.
+ assume_unique : bool
+ If True, the input arrays are both assumed to be unique, which
+ can speed up the calculation. Default is False.
Returns
-------
mask : ndarray, bool
The values `ar1[mask]` are in `ar2`.
-
See Also
--------
- setmember1d_nu : Works for arrays with non-unique elements.
numpy.lib.arraysetops : Module with a number of other functions for
performing set operations on arrays.
+ Notes
+ -----
+ .. versionadded:: 1.4.0
+
Examples
--------
>>> test = np.arange(5)
>>> states = [0, 2]
- >>> mask = np.setmember1d(test,states)
+ >>> mask = np.setmember1d(test, states)
>>> mask
array([ True, False, True, False, False], dtype=bool)
>>> test[mask]
array([0, 2])
"""
- # We need this to be a stable sort, so always use 'mergesort' here. The
- # values from the first array should always come before the values from the
- # second array.
- ar = np.concatenate( (ar1, ar2 ) )
+ if not assume_unique:
+ ar1, rev_idx = np.unique(ar1, return_inverse=True)
+ ar2 = np.unique(ar2)
+
+ ar = np.concatenate( (ar1, ar2) )
+ # We need this to be a stable sort, so always use 'mergesort'
+ # here. The values from the first array should always come before
+ # the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
equal_adj = (sar[1:] == sar[:-1])
flag = np.concatenate( (equal_adj, [False] ) )
-
indx = order.argsort(kind='mergesort')[:len( ar1 )]
- return flag[indx]
-
-def setmember1d_nu(ar1, ar2):
- """
- Return a boolean array set True where first element is in second array.
-
- Boolean array is the shape of `ar1` containing True where the elements
- of `ar1` are in `ar2` and False otherwise.
-
- Unlike setmember1d(), this version works also for arrays with duplicate
- values. It uses setmember1d() internally. For arrays with unique
- entries it is slower than calling setmember1d() directly.
-
- Parameters
- ----------
- ar1 : array_like
- Input array.
- ar2 : array_like
- Input array.
- Returns
- -------
- mask : ndarray, bool
- The values `ar1[mask]` are in `ar2`.
-
- See Also
- --------
- setmember1d : Faster for arrays with unique elements.
- numpy.lib.arraysetops : Module with a number of other functions for
- performing set operations on arrays.
-
- """
- unique_ar1, rev_idx = np.unique1d(ar1, return_inverse=True)
- mask = np.setmember1d(unique_ar1, np.unique1d(ar2))
- return mask[rev_idx]
+ if assume_unique:
+ return flag[indx]
+ else:
+ return flag[indx][rev_idx]
def union1d(ar1, ar2):
"""
- Union of 1D arrays with unique elements.
-
- Use unique1d() to generate arrays with only unique elements to use as
- inputs to this function.
+ Union of two 1D arrays.
Parameters
----------
@@ -374,14 +319,11 @@ def union1d(ar1, ar2):
performing set operations on arrays.
"""
- return unique1d( np.concatenate( (ar1, ar2) ) )
+ return unique( np.concatenate( (ar1, ar2) ) )
-def setdiff1d(ar1, ar2):
+def setdiff1d(ar1, ar2, assume_unique=False):
"""
- Set difference of 1D arrays with unique elements.
-
- Use unique1d() to generate arrays with only unique elements to use as
- inputs to this function.
+ Set difference of two 1D arrays.
Parameters
----------
@@ -389,6 +331,9 @@ def setdiff1d(ar1, ar2):
Input array.
ar2 : array_like
Input comparison array.
+ assume_unique : bool
+ If True, the input arrays are both assumed to be unique, which
+ can speed up the calculation. Default is False.
Returns
-------
@@ -401,8 +346,80 @@ def setdiff1d(ar1, ar2):
performing set operations on arrays.
"""
- aux = setmember1d(ar1,ar2)
+ if not assume_unique:
+ ar1 = unique(ar1)
+ ar2 = unique(ar2)
+ aux = in1d(ar1, ar2, assume_unique=True)
if aux.size == 0:
return aux
else:
return np.asarray(ar1)[aux == 0]
+
+@deprecate_with_doc('')
+def unique1d(ar1, return_index=False, return_inverse=False):
+ """
+ This function is deprecated. Use unique() instead.
+ """
+ if return_index:
+ import warnings
+ warnings.warn("The order of the output arguments for "
+ "`return_index` has changed. Before, "
+ "the output was (indices, unique_arr), but "
+ "has now been reversed to be more consistent.")
+
+ ar = np.asanyarray(ar1).flatten()
+ if ar.size == 0:
+ if return_inverse and return_index:
+ return ar, np.empty(0, np.bool), np.empty(0, np.bool)
+ elif return_inverse or return_index:
+ return ar, np.empty(0, np.bool)
+ else:
+ return ar
+
+ if return_inverse or return_index:
+ perm = ar.argsort()
+ aux = ar[perm]
+ flag = np.concatenate(([True], aux[1:] != aux[:-1]))
+ if return_inverse:
+ iflag = np.cumsum(flag) - 1
+ iperm = perm.argsort()
+ if return_index:
+ return aux[flag], perm[flag], iflag[iperm]
+ else:
+ return aux[flag], iflag[iperm]
+ else:
+ return aux[flag], perm[flag]
+
+ else:
+ ar.sort()
+ flag = np.concatenate(([True], ar[1:] != ar[:-1]))
+ return ar[flag]
+
+@deprecate_with_doc('')
+def intersect1d_nu(ar1, ar2):
+ """
+ This function is deprecated. Use intersect1d()
+ instead.
+ """
+ # Might be faster than unique1d( intersect1d( ar1, ar2 ) )?
+ aux = np.concatenate((unique1d(ar1), unique1d(ar2)))
+ aux.sort()
+ return aux[aux[1:] == aux[:-1]]
+
+@deprecate_with_doc('')
+def setmember1d(ar1, ar2):
+ """
+ This function is deprecated. Use in1d(assume_unique=True)
+ instead.
+ """
+ # We need this to be a stable sort, so always use 'mergesort' here. The
+ # values from the first array should always come before the values from the
+ # second array.
+ ar = np.concatenate( (ar1, ar2 ) )
+ order = ar.argsort(kind='mergesort')
+ sar = ar[order]
+ equal_adj = (sar[1:] == sar[:-1])
+ flag = np.concatenate( (equal_adj, [False] ) )
+
+ indx = order.argsort(kind='mergesort')[:len( ar1 )]
+ return flag[indx]
diff --git a/numpy/lib/financial.py b/numpy/lib/financial.py
index 0cef1c4d2..503d43647 100644
--- a/numpy/lib/financial.py
+++ b/numpy/lib/financial.py
@@ -28,6 +28,18 @@ def fv(rate, nper, pmt, pv, when='end'):
"""
Compute the future value.
+ Given:
+ * a present value, `pv`
+ * an interest `rate` compounded once per period, of which
+ there are
+ * `nper` total
+ * a (fixed) payment, `pmt`, paid either
+ * at the beginning (`when` = {'begin', 1}) or the end
+ (`when` = {'end', 0}) of each period
+
+ Return:
+ the value at the end of the `nper` periods
+
Parameters
----------
rate : scalar or array_like of shape(M, )
@@ -61,6 +73,17 @@ def fv(rate, nper, pmt, pv, when='end'):
fv + pv + pmt * nper == 0
+ References
+ ----------
+ .. [WRW] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May).
+ Open Document Format for Office Applications (OpenDocument)v1.2,
+ Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version,
+ Pre-Draft 12. Organization for the Advancement of Structured Information
+ Standards (OASIS). Billerica, MA, USA. [ODT Document].
+ Available:
+ http://www.oasis-open.org/committees/documents.php?wg_abbrev=office-formula
+ OpenDocument-formula-20090508.odt
+
Examples
--------
What is the future value after 10 years of saving $100 now, with
@@ -94,6 +117,19 @@ def pmt(rate, nper, pv, fv=0, when='end'):
"""
Compute the payment against loan principal plus interest.
+ Given:
+ * a present value, `pv` (e.g., an amount borrowed)
+ * a future value, `fv` (e.g., 0)
+ * an interest `rate` compounded once per period, of which
+ there are
+ * `nper` total
+ * and (optional) specification of whether payment is made
+ at the beginning (`when` = {'begin', 1}) or the end
+ (`when` = {'end', 0}) of each period
+
+ Return:
+ the (fixed) periodic payment.
+
Parameters
----------
rate : array_like
@@ -102,8 +138,8 @@ def pmt(rate, nper, pv, fv=0, when='end'):
Number of compounding periods
pv : array_like
Present value
- fv : array_like
- Future value
+ fv : array_like (optional)
+ Future value (default = 0)
when : {{'begin', 1}, {'end', 0}}, {string, int}
When payments are due ('begin' (1) or 'end' (0))
@@ -117,7 +153,7 @@ def pmt(rate, nper, pv, fv=0, when='end'):
Notes
-----
- The payment ``pmt`` is computed by solving the equation::
+ The payment is computed by solving the equation::
fv +
pv*(1 + rate)**nper +
@@ -127,16 +163,37 @@ def pmt(rate, nper, pv, fv=0, when='end'):
fv + pv + pmt * nper == 0
+ for ``pmt``.
+
+ Note that computing a monthly mortgage payment is only
+ one use for this function. For example, pmt returns the
+ periodic deposit one must make to achieve a specified
+ future balance given an initial deposit, a fixed,
+ periodically compounded interest rate, and the total
+ number of periods.
+
+ References
+ ----------
+ .. [WRW] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May).
+ Open Document Format for Office Applications (OpenDocument)v1.2,
+ Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version,
+ Pre-Draft 12. Organization for the Advancement of Structured Information
+ Standards (OASIS). Billerica, MA, USA. [ODT Document].
+ Available:
+ http://www.oasis-open.org/committees/documents.php?wg_abbrev=office-formula
+ OpenDocument-formula-20090508.odt
+
Examples
--------
- What would the monthly payment need to be to pay off a $200,000 loan in 15
+ What is the monthly payment needed to pay off a $200,000 loan in 15
years at an annual interest rate of 7.5%?
>>> np.pmt(0.075/12, 12*15, 200000)
-1854.0247200054619
- In order to pay-off (i.e. have a future-value of 0) the $200,000 obtained
- today, a monthly payment of $1,854.02 would be required.
+ In order to pay-off (i.e., have a future-value of 0) the $200,000 obtained
+ today, a monthly payment of $1,854.02 would be required. Note that this
+ example illustrates usage of `fv` having a default value of 0.
"""
when = _convert_when(when)
@@ -282,6 +339,18 @@ def pv(rate, nper, pmt, fv=0.0, when='end'):
"""
Compute the present value.
+ Given:
+ * a future value, `fv`
+ * an interest `rate` compounded once per period, of which
+ there are
+ * `nper` total
+ * a (fixed) payment, `pmt`, paid either
+ * at the beginning (`when` = {'begin', 1}) or the end
+ (`when` = {'end', 0}) of each period
+
+ Return:
+ the value now
+
Parameters
----------
rate : array_like
@@ -302,7 +371,7 @@ def pv(rate, nper, pmt, fv=0.0, when='end'):
Notes
-----
- The present value ``pv`` is computed by solving the equation::
+ The present value is computed by solving the equation::
fv +
pv*(1 + rate)**nper +
@@ -312,6 +381,45 @@ def pv(rate, nper, pmt, fv=0.0, when='end'):
fv + pv + pmt * nper = 0
+ for `pv`, which is then returned.
+
+ References
+ ----------
+ .. [WRW] Wheeler, D. A., E. Rathke, and R. Weir (Eds.) (2009, May).
+ Open Document Format for Office Applications (OpenDocument)v1.2,
+ Part 2: Recalculated Formula (OpenFormula) Format - Annotated Version,
+ Pre-Draft 12. Organization for the Advancement of Structured Information
+ Standards (OASIS). Billerica, MA, USA. [ODT Document].
+ Available:
+ http://www.oasis-open.org/committees/documents.php?wg_abbrev=office-formula
+ OpenDocument-formula-20090508.odt
+
+ Examples
+ --------
+ What is the present value (e.g., the initial investment)
+ of an investment that needs to total $15692.93
+ after 10 years of saving $100 every month? Assume the
+ interest rate is 5% (annually) compounded monthly.
+
+ >>> np.pv(0.05/12, 10*12, -100, 15692.93)
+ -100.00067131625819
+
+ By convention, the negative sign represents cash flow out
+ (i.e., money not available today). Thus, to end up with
+ $15,692.93 in 10 years saving $100 a month at 5% annual
+ interest, one's initial deposit should also be $100.
+
+ If any input is array_like, ``pv`` returns an array of equal shape.
+ Let's compare different interest rates in the example above:
+
+ >>> a = np.array((0.05, 0.04, 0.03))/12
+ >>> np.pv(a, 10*12, -100, 15692.93)
+ array([ -100.00067132, -649.26771385, -1273.78633713])
+
+ So, to end up with the same $15692.93 under the same $100 per month
+ "savings plan," for annual interest rates of 4% and 3%, one would
+ need initial investments of $649.27 and $1273.79, respectively.
+
"""
when = _convert_when(when)
rate, nper, pmt, fv, when = map(np.asarray, [rate, nper, pmt, fv, when])
@@ -391,24 +499,54 @@ def irr(values):
"""
Return the Internal Rate of Return (IRR).
- This is the rate of return that gives a net present value of 0.0.
+ This is the "average" periodically compounded rate of return
+ that gives a net present value of 0.0; for a more complete explanation,
+ see Notes below.
Parameters
----------
values : array_like, shape(N,)
- Input cash flows per time period. At least the first value would be
- negative to represent the investment in the project.
+ Input cash flows per time period. By convention, net "deposits"
+ are negative and net "withdrawals" are positive. Thus, for example,
+ at least the first element of `values`, which represents the initial
+ investment, will typically be negative.
Returns
-------
out : float
Internal Rate of Return for periodic input values.
+ Notes
+ -----
+ The IRR is perhaps best understood through an example (illustrated
+ using np.irr in the Examples section below). Suppose one invests
+ 100 units and then makes the following withdrawals at regular
+ (fixed) intervals: 39, 59, 55, 20. Assuming the ending value is 0,
+ one's 100 unit investment yields 173 units; however, due to the
+ combination of compounding and the periodic withdrawals, the
+ "average" rate of return is neither simply 0.73/4 nor (1.73)^0.25-1.
+ Rather, it is the solution (for :math:`r`) of the equation:
+
+ .. math:: -100 + \\frac{39}{1+r} + \\frac{59}{(1+r)^2}
+ + \\frac{55}{(1+r)^3} + \\frac{20}{(1+r)^4} = 0
+
+ In general, for `values` :math:`= [v_0, v_1, ... v_M]`,
+ irr is the solution of the equation: [G]_
+
+ .. math:: \\sum_{t=0}^M{\\frac{v_t}{(1+irr)^{t}}} = 0
+
+ References
+ ----------
+ .. [G] L. J. Gitman, "Principles of Managerial Finance, Brief," 3rd ed.,
+ Addison-Wesley, 2003, pg. 348.
+
Examples
--------
>>> np.irr([-100, 39, 59, 55, 20])
0.2809484211599611
+ (Compare with the Example given for numpy.lib.financial.npv)
+
"""
res = np.roots(values[::-1])
# Find the root(s) between 0 and 1
@@ -430,8 +568,14 @@ def npv(rate, values):
rate : scalar
The discount rate.
values : array_like, shape(M, )
- The values of the time series of cash flows. Must be the same
- increment as the `rate`.
+ The values of the time series of cash flows. The (fixed) time
+ interval between cash flow "events" must be the same as that
+ for which `rate` is given (i.e., if `rate` is per year, then
+ precisely a year is understood to elapse between each cash flow
+ event). By convention, investments or "deposits" are negative,
+ income or "withdrawals" are positive; `values` must begin with
+ the initial investment, thus `values[0]` will typically be
+ negative.
Returns
-------
@@ -440,9 +584,21 @@ def npv(rate, values):
Notes
-----
- Returns the result of:
+ Returns the result of: [G]_
+
+ .. math :: \\sum_{t=0}^M{\\frac{values_t}{(1+rate)^{t}}}
+
+ References
+ ----------
+ .. [G] L. J. Gitman, "Principles of Managerial Finance, Brief," 3rd ed.,
+ Addison-Wesley, 2003, pg. 346.
- .. math :: \\sum_{t=1}^M{\\frac{values_t}{(1+rate)^{t}}}
+ Examples
+ --------
+ >>> np.npv(0.281,[-100, 39, 59, 55, 20])
+ -0.0066187288356340801
+
+ (Compare with the Example given for numpy.lib.financial.irr)
"""
values = np.asarray(values)
@@ -456,7 +612,7 @@ def mirr(values, finance_rate, reinvest_rate):
----------
values : array_like
Cash flows (must contain at least one positive and one negative value)
- or nan is returned.
+ or nan is returned. The first value is considered a sunk cost at time zero.
finance_rate : scalar
Interest rate paid on the cash flows
reinvest_rate : scalar
@@ -469,13 +625,13 @@ def mirr(values, finance_rate, reinvest_rate):
"""
- values = np.asarray(values)
+ values = np.asarray(values, dtype=np.double)
+ n = values.size
pos = values > 0
neg = values < 0
- if not (pos.size > 0 and neg.size > 0):
+ if not (pos.any() and neg.any()):
return np.nan
+ numer = np.abs(npv(reinvest_rate, values*pos))*(1 + reinvest_rate)
+ denom = np.abs(npv(finance_rate, values*neg))*(1 + finance_rate)
+ return (numer/denom)**(1.0/(n - 1))*(1 + reinvest_rate) - 1
- n = pos.size + neg.size
- numer = -npv(reinvest_rate, values[pos])*((1+reinvest_rate)**n)
- denom = npv(finance_rate, values[neg])*(1+finance_rate)
- return (numer / denom)**(1.0/(n-1)) - 1
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index b493801df..663c3d2ef 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -3,7 +3,7 @@ __all__ = ['logspace', 'linspace',
'select', 'piecewise', 'trim_zeros',
'copy', 'iterable',
'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp',
- 'unique', 'extract', 'place', 'nansum', 'nanmax', 'nanargmax',
+ 'extract', 'place', 'nansum', 'nanmax', 'nanargmax',
'nanargmin', 'nanmin', 'vectorize', 'asarray_chkfinite', 'average',
'histogram', 'histogramdd', 'bincount', 'digitize', 'cov',
'corrcoef', 'msort', 'median', 'sinc', 'hamming', 'hanning',
@@ -28,6 +28,7 @@ from numpy.lib.twodim_base import diag
from _compiled_base import _insert, add_docstring
from _compiled_base import digitize, bincount, interp as compiled_interp
from arraysetops import setdiff1d
+from utils import deprecate_with_doc
import numpy as np
#end Fernando's utilities
@@ -377,11 +378,11 @@ def histogram(a, bins=10, range=None, normed=False, weights=None, new=None):
n = np.diff(n)
- if normed is False:
- return n, bins
- elif normed is True:
+ if normed:
db = array(np.diff(bins), float)
return n/(n*db).sum(), bins
+ else:
+ return n, bins
def histogramdd(sample, bins=10, range=None, normed=False, weights=None):
@@ -717,9 +718,9 @@ def piecewise(x, condlist, funclist, *args, **kw):
Parameters
----------
- x : (N,) ndarray
+ x : ndarray
The input domain.
- condlist : list of M (N,)-shaped boolean arrays
+ condlist : list of bool arrays
Each boolean array corresponds to a function in `funclist`. Wherever
`condlist[i]` is True, `funclist[i](x)` is used as the output value.
@@ -727,24 +728,24 @@ def piecewise(x, condlist, funclist, *args, **kw):
and should therefore be of the same shape as `x`.
The length of `condlist` must correspond to that of `funclist`.
- If one extra function is given, i.e. if the length of `funclist` is
- M+1, then that extra function is the default value, used wherever
- all conditions are false.
- funclist : list of M or M+1 callables, f(x,*args,**kw), or values
+ If one extra function is given, i.e. if
+ ``len(funclist) - len(condlist) == 1``, then that extra function
+ is the default value, used wherever all conditions are false.
+ funclist : list of callables, f(x,*args,**kw), or scalars
Each function is evaluated over `x` wherever its corresponding
condition is True. It should take an array as input and give an array
or a scalar value as output. If, instead of a callable,
- a value is provided then a constant function (``lambda x: value``) is
+ a scalar is provided then a constant function (``lambda x: scalar``) is
assumed.
args : tuple, optional
Any further arguments given to `piecewise` are passed to the functions
- upon execution, i.e., if called ``piecewise(...,...,1,'a')``, then
- each function is called as ``f(x,1,'a')``.
- kw : dictionary, optional
+ upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then
+ each function is called as ``f(x, 1, 'a')``.
+ kw : dict, optional
Keyword arguments used in calling `piecewise` are passed to the
functions upon execution, i.e., if called
- ``piecewise(...,...,lambda=1)``, then each function is called as
- ``f(x,lambda=1)``.
+ ``piecewise(..., ..., lambda=1)``, then each function is called as
+ ``f(x, lambda=1)``.
Returns
-------
@@ -754,6 +755,11 @@ def piecewise(x, condlist, funclist, *args, **kw):
as defined by the boolean arrays in `condlist`. Portions not covered
by any condition have undefined values.
+
+ See Also
+ --------
+ choose, select, where
+
Notes
-----
This is similar to choose or select, except that functions are
@@ -773,8 +779,8 @@ def piecewise(x, condlist, funclist, *args, **kw):
--------
Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``.
- >>> x = np.arange(6) - 2.5 # x runs from -2.5 to 2.5 in steps of 1
- >>> np.piecewise(x, [x < 0, x >= 0.5], [-1,1])
+ >>> x = np.arange(6) - 2.5
+ >>> np.piecewise(x, [x < 0, x >= 0], [-1, 1])
array([-1., -1., -1., 1., 1., 1.])
Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for
@@ -836,39 +842,35 @@ def select(condlist, choicelist, default=0):
Parameters
----------
- condlist : list of N boolean arrays of length M
- The conditions C_0 through C_(N-1) which determine
- from which vector the output elements are taken.
- choicelist : list of N arrays of length M
- Th vectors V_0 through V_(N-1), from which the output
- elements are chosen.
+ condlist : list of bool ndarrays
+ The list of conditions which determine from which array in `choicelist`
+ the output elements are taken. When multiple conditions are satisfied,
+ the first one encountered in `condlist` is used.
+ choicelist : list of ndarrays
+ The list of arrays from which the output elements are taken. It has
+ to be of the same length as `condlist`.
+ default : scalar, optional
+ The element inserted in `output` when all conditions evaluate to False.
Returns
-------
- output : 1-dimensional array of length M
- The output at position m is the m-th element of the first
- vector V_n for which C_n[m] is non-zero. Note that the
- output depends on the order of conditions, since the
- first satisfied condition is used.
-
- Notes
- -----
- Equivalent to:
- ::
+ output : ndarray
+ The output at position m is the m-th element of the array in
+ `choicelist` where the m-th element of the corresponding array in
+ `condlist` is True.
- output = []
- for m in range(M):
- output += [V[m] for V,C in zip(values,cond) if C[m]]
- or [default]
+ See Also
+ --------
+ where : Return elements from one of two arrays depending on condition.
+ take, choose, compress, diag, diagonal
Examples
--------
- >>> t = np.arange(10)
- >>> s = np.arange(10)*100
- >>> condlist = [t == 4, t > 5]
- >>> choicelist = [s, t]
+ >>> x = np.arange(10)
+ >>> condlist = [x<3, x>5]
+ >>> choicelist = [x, x**2]
>>> np.select(condlist, choicelist)
- array([ 0, 0, 0, 0, 400, 0, 6, 7, 8, 9])
+ array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81])
"""
n = len(condlist)
@@ -960,11 +962,17 @@ def gradient(f, *varargs):
Examples
--------
- >>> np.gradient(np.array([[1,1],[3,4]]))
- [array([[ 2., 3.],
- [ 2., 3.]]),
- array([[ 0., 0.],
- [ 1., 1.]])]
+ >>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float)
+ >>> np.gradient(x)
+ array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
+ >>> np.gradient(x, 2)
+ array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ])
+
+ >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
+ [array([[ 2., 2., -1.],
+ [ 2., 2., -1.]]),
+ array([[ 1. , 2.5, 4. ],
+ [ 1. , 1. , 1. ]])]
"""
N = len(f.shape) # number of dimensions
@@ -1026,7 +1034,11 @@ def gradient(f, *varargs):
def diff(a, n=1, axis=-1):
"""
- Calculate the nth order discrete difference along given axis.
+ Calculate the n-th order discrete difference along given axis.
+
+ The first order difference is given by ``out[n] = a[n+1] - a[n]`` along
+ the given axis, higher order differences are calculated by using `diff`
+ recursively.
Parameters
----------
@@ -1035,26 +1047,31 @@ def diff(a, n=1, axis=-1):
n : int, optional
The number of times values are differenced.
axis : int, optional
- The axis along which the difference is taken.
+ The axis along which the difference is taken, default is the last axis.
Returns
-------
out : ndarray
- The `n` order differences. The shape of the output is the same as `a`
- except along `axis` where the dimension is `n` less.
+ The `n` order differences. The shape of the output is the same as `a`
+ except along `axis` where the dimension is smaller by `n`.
+
+ See Also
+ --------
+ gradient, ediff1d
Examples
--------
- >>> x = np.array([0,1,3,9,5,10])
+ >>> x = np.array([1, 2, 4, 7, 0])
>>> np.diff(x)
- array([ 1, 2, 6, -4, 5])
- >>> np.diff(x,n=2)
- array([ 1, 4, -10, 9])
- >>> x = np.array([[1,3,6,10],[0,5,6,8]])
+ array([ 1, 2, 3, -7])
+ >>> np.diff(x, n=2)
+ array([ 1, 1, -10])
+
+ >>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
>>> np.diff(x)
array([[2, 3, 4],
- [5, 1, 2]])
- >>> np.diff(x,axis=0)
+ [5, 1, 2]])
+ >>> np.diff(x, axis=0)
array([[-1, 2, 0, -2]])
"""
@@ -1201,15 +1218,34 @@ def unwrap(p, discont=pi, axis=-1):
----------
p : array_like
Input array.
- discont : float
- Maximum discontinuity between values.
- axis : integer
- Axis along which unwrap will operate.
+ discont : float, optional
+ Maximum discontinuity between values, default is ``pi``.
+ axis : int, optional
+ Axis along which unwrap will operate, default is the last axis.
Returns
-------
out : ndarray
- Output array
+ Output array.
+
+ See Also
+ --------
+ rad2deg, deg2rad
+
+ Notes
+ -----
+ If the discontinuity in `p` is smaller than ``pi``, but larger than
+ `discont`, no unwrapping is done because taking the 2*pi complement
+ would only make the discontinuity larger.
+
+ Examples
+ --------
+ >>> phase = np.linspace(0, np.pi, num=5)
+ >>> phase[3:] += np.pi
+ >>> phase
+ array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531])
+ >>> np.unwrap(phase)
+ array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ])
"""
p = asarray(p)
@@ -1310,31 +1346,11 @@ import sys
if sys.hexversion < 0x2040000:
from sets import Set as set
+@deprecate_with_doc('')
def unique(x):
"""
- Return the sorted, unique elements of an array or sequence.
-
- Parameters
- ----------
- x : ndarray or sequence
- Input array.
-
- Returns
- -------
- y : ndarray
- The sorted, unique elements are returned in a 1-D array.
-
- Examples
- --------
- >>> np.unique([1, 1, 2, 2, 3, 3])
- array([1, 2, 3])
- >>> a = np.array([[1, 1], [2, 3]])
- >>> np.unique(a)
- array([1, 2, 3])
-
- >>> np.unique([True, True, False])
- array([False, True], dtype=bool)
-
+ This function is deprecated. Use numpy.lib.arraysetops.unique()
+ instead.
"""
try:
tmp = x.flatten()
@@ -1365,53 +1381,64 @@ def extract(condition, arr):
See Also
--------
- take, put, putmask
+ take, put, putmask, compress
Examples
--------
- >>> arr = np.array([[1,2,3,4],[5,6,7,8],[9,10,11,12]])
+ >>> arr = np.arange(12).reshape((3, 4))
>>> arr
- array([[ 1, 2, 3, 4],
- [ 5, 6, 7, 8],
- [ 9, 10, 11, 12]])
+ array([[ 0, 1, 2, 3],
+ [ 4, 5, 6, 7],
+ [ 8, 9, 10, 11]])
>>> condition = np.mod(arr, 3)==0
>>> condition
- array([[False, False, True, False],
- [False, True, False, False],
- [ True, False, False, True]], dtype=bool)
+ array([[ True, False, False, True],
+ [False, False, True, False],
+ [False, True, False, False]], dtype=bool)
>>> np.extract(condition, arr)
- array([ 3, 6, 9, 12])
+ array([0, 3, 6, 9])
+
If `condition` is boolean:
>>> arr[condition]
- array([ 3, 6, 9, 12])
+ array([0, 3, 6, 9])
"""
return _nx.take(ravel(arr), nonzero(ravel(condition))[0])
def place(arr, mask, vals):
"""
- Changes elements of an array based on conditional and input values.
+ Change elements of an array based on conditional and input values.
- Similar to ``putmask(a, mask, vals)`` but the 1D array `vals` has the
- same number of elements as the non-zero values of `mask`. Inverse of
- ``extract``.
+ Similar to ``np.putmask(a, mask, vals)``, the difference is that `place`
+ uses the first N elements of `vals`, where N is the number of True values
+ in `mask`, while `putmask` uses the elements where `mask` is True.
- Sets `a`.flat[n] = `values`\\[n] for each n where `mask`.flat[n] is true.
+ Note that `extract` does the exact opposite of `place`.
Parameters
----------
a : array_like
Array to put data into.
mask : array_like
- Boolean mask array.
- values : array_like, shape(number of non-zero `mask`, )
- Values to put into `a`.
+ Boolean mask array. Must have the same size as `a`.
+ vals : 1-D sequence
+ Values to put into `a`. Only the first N elements are used, where
+ N is the number of True values in `mask`. If `vals` is smaller
+ than N it will be repeated.
See Also
--------
- putmask, put, take
+ putmask, put, take, extract
+
+ Examples
+ --------
+ >>> x = np.arange(6).reshape(2, 3)
+ >>> np.place(x, x>2, [44, 55])
+ >>> x
+ array([[ 0, 1, 2],
+ [44, 55, 44]])
"""
return _insert(arr, mask, vals)
@@ -2841,6 +2868,25 @@ def trapz(y, x=None, dx=1.0, axis=-1):
axis : int, optional
Specify the axis.
+ Returns
+ -------
+ out : float
+ Definite integral as approximated by trapezoidal rule.
+
+ Notes
+ -----
+ Image [2]_ illustrates trapezoidal rule -- y-axis locations of points will
+ be taken from `y` array, by default x-axis distances between points will be
+ 1.0, alternatively they can be provided with `x` array or with `dx` scalar.
+ Return value will be equal to combined area under the red lines.
+
+
+ References
+ ----------
+ .. [1] Wikipedia page: http://en.wikipedia.org/wiki/Trapezoidal_rule
+
+ .. [2] Illustration image: http://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
+
Examples
--------
>>> np.trapz([1,2,3])
diff --git a/numpy/lib/index_tricks.py b/numpy/lib/index_tricks.py
index b8add9ed7..eeb1d37aa 100644
--- a/numpy/lib/index_tricks.py
+++ b/numpy/lib/index_tricks.py
@@ -3,29 +3,38 @@ __all__ = ['unravel_index',
'ogrid',
'r_', 'c_', 's_',
'index_exp', 'ix_',
- 'ndenumerate','ndindex']
+ 'ndenumerate','ndindex',
+ 'fill_diagonal','diag_indices','diag_indices_from']
import sys
import numpy.core.numeric as _nx
-from numpy.core.numeric import asarray, ScalarType, array
+from numpy.core.numeric import ( asarray, ScalarType, array, alltrue, cumprod,
+ arange )
from numpy.core.numerictypes import find_common_type
import math
import function_base
import numpy.core.defmatrix as matrix
+from function_base import diff
makemat = matrix.matrix
# contributed by Stefan van der Walt
def unravel_index(x,dims):
"""
- Convert a flat index into an index tuple for an array of given shape.
+ Convert a flat index to an index tuple for an array of given shape.
Parameters
----------
x : int
Flattened index.
- dims : shape tuple
- Input shape.
+ dims : tuple of ints
+ Input shape, the shape of an array into which indexing is
+ required.
+
+ Returns
+ -------
+ idx : tuple of ints
+ Tuple of the same shape as `dims`, containing the unraveled index.
Notes
-----
@@ -34,7 +43,7 @@ def unravel_index(x,dims):
Examples
--------
- >>> arr = np.arange(20).reshape(5,4)
+ >>> arr = np.arange(20).reshape(5, 4)
>>> arr
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
@@ -72,21 +81,45 @@ def unravel_index(x,dims):
return tuple(x/dim_prod % dims)
def ix_(*args):
- """ Construct an open mesh from multiple sequences.
+ """
+ Construct an open mesh from multiple sequences.
+
+ This function takes N 1-D sequences and returns N outputs with N
+ dimensions each, such that the shape is 1 in all but one dimension
+ and the dimension with the non-unit shape value cycles through all
+ N dimensions.
- This function takes n 1-d sequences and returns n outputs with n
- dimensions each such that the shape is 1 in all but one dimension and
- the dimension with the non-unit shape value cycles through all n
- dimensions.
+ Using `ix_` one can quickly construct index arrays that will index
+ the cross product. ``a[np.ix_([1,3],[2,5])]`` returns the array
+ ``[a[1,2] a[1,5] a[3,2] a[3,5]]``.
- Using ix_() one can quickly construct index arrays that will index
- the cross product.
+ Parameters
+ ----------
+ args : 1-D sequences
- a[ix_([1,3,7],[2,5,8])] returns the array
+ Returns
+ -------
+ out : ndarrays
+ N arrays with N dimensions each, with N the number of input
+ sequences. Together these arrays form an open mesh.
+
+ See Also
+ --------
+ ogrid, mgrid, meshgrid
+
+ Examples
+ --------
+ >>> a = np.arange(10).reshape(2, 5)
+ >>> ixgrid = np.ix_([0,1], [2,4])
+ >>> ixgrid
+ (array([[0],
+ [1]]), array([[2, 4]]))
+ >>> print ixgrid[0].shape, ixgrid[1].shape
+ (2, 1) (1, 2)
+ >>> a[ixgrid]
+ array([[2, 4],
+ [7, 9]])
- a[1,2] a[1,5] a[1,8]
- a[3,2] a[3,5] a[3,8]
- a[7,2] a[7,5] a[7,8]
"""
out = []
nd = len(args)
@@ -215,7 +248,11 @@ mgrid.__doc__ = None # set in numpy.add_newdocs
ogrid.__doc__ = None # set in numpy.add_newdocs
class AxisConcatenator(object):
- """Translates slice objects to concatenation along an axis.
+ """
+ Translates slice objects to concatenation along an axis.
+
+ For detailed documentation on usage, see `r_`.
+
"""
def _retval(self, res):
if self.matrix:
@@ -338,11 +375,96 @@ class AxisConcatenator(object):
# in help(r_)
class RClass(AxisConcatenator):
- """Translates slice objects to concatenation along the first axis.
+ """
+ Translates slice objects to concatenation along the first axis.
+
+ This is a simple way to build up arrays quickly. There are two use cases.
+
+ 1. If the index expression contains comma separated arrays, then stack
+ them along their first axis.
+ 2. If the index expression contains slice notation or scalars then create
+ a 1-D array with a range indicated by the slice notation.
+
+ If slice notation is used, the syntax ``start:stop:step`` is equivalent
+ to ``np.arange(start, stop, step)`` inside of the brackets. However, if
+ ``step`` is an imaginary number (i.e. 100j) then its integer portion is
+ interpreted as a number-of-points desired and the start and stop are
+ inclusive. In other words ``start:stop:stepj`` is interpreted as
+ ``np.linspace(start, stop, step, endpoint=1)`` inside of the brackets.
+ After expansion of slice notation, all comma separated sequences are
+ concatenated together.
+
+ Optional character strings placed as the first element of the index
+ expression can be used to change the output. The strings 'r' or 'c' result
+ in matrix output. If the result is 1-D and 'r' is specified a 1 x N (row)
+ matrix is produced. If the result is 1-D and 'c' is specified, then a N x 1
+ (column) matrix is produced. If the result is 2-D then both provide the
+ same matrix result.
+
+ A string integer specifies which axis to stack multiple comma separated
+ arrays along. A string of two comma-separated integers allows indication
+ of the minimum number of dimensions to force each entry into as the
+ second integer (the axis to concatenate along is still the first integer).
+
+ A string with three comma-separated integers allows specification of the
+ axis to concatenate along, the minimum number of dimensions to force the
+ entries to, and which axis should contain the start of the arrays which
+ are less than the specified number of dimensions. In other words the third
+ integer allows you to specify where the 1's should be placed in the shape
+ of the arrays that have their shapes upgraded. By default, they are placed
+ in the front of the shape tuple. The third argument allows you to specify
+ where the start of the array should be instead. Thus, a third argument of
+ '0' would place the 1's at the end of the array shape. Negative integers
+ specify where in the new shape tuple the last dimension of upgraded arrays
+ should be placed, so the default is '-1'.
+
+ Parameters
+ ----------
+ Not a function, so takes no parameters
- For example:
+
+ Returns
+ -------
+ A concatenated ndarray or matrix.
+
+ See Also
+ --------
+ concatenate : Join a sequence of arrays together.
+ c_ : Translates slice objects to concatenation along the second axis.
+
+ Examples
+ --------
>>> np.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])]
array([1, 2, 3, 0, 0, 4, 5, 6])
+ >>> np.r_[-1:1:6j, [0]*3, 5, 6]
+ array([-1. , -0.6, -0.2, 0.2, 0.6, 1. , 0. , 0. , 0. , 5. , 6. ])
+
+ String integers specify the axis to concatenate along or the minimum
+ number of dimensions to force entries into.
+
+ >>> np.r_['-1', a, a] # concatenate along last axis
+ array([[0, 1, 2, 0, 1, 2],
+ [3, 4, 5, 3, 4, 5]])
+ >>> np.r_['0,2', [1,2,3], [4,5,6]] # concatenate along first axis, dim>=2
+ array([[1, 2, 3],
+ [4, 5, 6]])
+
+ >>> np.r_['0,2,0', [1,2,3], [4,5,6]]
+ array([[1],
+ [2],
+ [3],
+ [4],
+ [5],
+ [6]])
+ >>> np.r_['1,2,0', [1,2,3], [4,5,6]]
+ array([[1, 4],
+ [2, 5],
+ [3, 6]])
+
+ Using 'r' or 'c' as a first string argument creates a matrix.
+
+ >>> np.r_['r',[1,2,3], [4,5,6]]
+ matrix([[1, 2, 3, 4, 5, 6]])
"""
def __init__(self):
@@ -351,11 +473,21 @@ class RClass(AxisConcatenator):
r_ = RClass()
class CClass(AxisConcatenator):
- """Translates slice objects to concatenation along the second axis.
+ """
+ Translates slice objects to concatenation along the second axis.
+
+ This is short-hand for ``np.r_['-1,2,0', index expression]``, which is
+ useful because of its common occurrence. In particular, arrays will be
+ stacked along their last axis after being upgraded to at least 2-D with
+ 1's post-pended to the shape (column vectors made out of 1-D arrays).
- For example:
+ For detailed documentation, see `r_`.
+
+ Examples
+ --------
>>> np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])]
- array([1, 2, 3, 0, 0, 4, 5, 6])
+ array([[1, 2, 3, 0, 0, 4, 5, 6]])
+
"""
def __init__(self):
AxisConcatenator.__init__(self, -1, ndmin=2, trans1d=0)
@@ -373,9 +505,13 @@ class ndenumerate(object):
a : ndarray
Input array.
+ See Also
+ --------
+ ndindex, flatiter
+
Examples
--------
- >>> a = np.array([[1,2],[3,4]])
+ >>> a = np.array([[1, 2], [3, 4]])
>>> for index, x in np.ndenumerate(a):
... print index, x
(0, 0) 1
@@ -388,6 +524,17 @@ class ndenumerate(object):
self.iter = asarray(arr).flat
def next(self):
+ """
+ Standard iterator method, returns the index tuple and array value.
+
+ Returns
+ -------
+ coords : tuple of ints
+ The indices of the current iteration.
+ val : scalar
+ The array element of the current iteration.
+
+ """
return self.iter.coords, self.iter.next()
def __iter__(self):
@@ -399,17 +546,21 @@ class ndindex(object):
An N-dimensional iterator object to index arrays.
Given the shape of an array, an `ndindex` instance iterates over
- the N-dimensional index of the array. At each iteration, the index of the
- last dimension is incremented by one.
+ the N-dimensional index of the array. At each iteration a tuple
+ of indices is returned, the last dimension is iterated over first.
Parameters
----------
- `*args` : integers
- The size of each dimension in the counter.
+ `*args` : ints
+ The size of each dimension of the array.
+
+ See Also
+ --------
+ ndenumerate, flatiter
Examples
--------
- >>> for index in np.ndindex(3,2,1):
+ >>> for index in np.ndindex(3, 2, 1):
... print index
(0, 0, 0)
(0, 1, 0)
@@ -442,9 +593,25 @@ class ndindex(object):
self._incrementone(axis-1)
def ndincr(self):
+ """
+ Increment the multi-dimensional index by one.
+
+ `ndincr` takes care of the "wrapping around" of the axes.
+ It is called by `ndindex.next` and not normally used directly.
+
+ """
self._incrementone(self.nd-1)
def next(self):
+ """
+ Standard iterator method, updates the index and returns the index tuple.
+
+ Returns
+ -------
+ val : tuple of ints
+ Returns a tuple containing the indices of the current iteration.
+
+ """
if (self.index >= self.total):
raise StopIteration
val = tuple(self.ind)
@@ -501,3 +668,167 @@ index_exp = IndexExpression(maketuple=True)
s_ = IndexExpression(maketuple=False)
# End contribution from Konrad.
+
+
+# The following functions complement those in twodim_base, but are
+# applicable to N-dimensions.
+
+def fill_diagonal(a, val):
+ """Fill the main diagonal of the given array of any dimensionality.
+
+ For an array with ndim > 2, the diagonal is the list of locations with
+ indices a[i,i,...,i], all identical.
+
+ This function modifies the input array in-place, it does not return a
+ value.
+
+ This functionality can be obtained via diag_indices(), but internally this
+ version uses a much faster implementation that never constructs the indices
+ and uses simple slicing.
+
+ Parameters
+ ----------
+ a : array, at least 2-dimensional.
+ Array whose diagonal is to be filled, it gets modified in-place.
+
+ val : scalar
+ Value to be written on the diagonal, its type must be compatible with
+ that of the array a.
+
+ See also
+ --------
+ diag_indices, diag_indices_from
+
+ Notes
+ -----
+ .. versionadded:: 1.4.0
+
+ Examples
+ --------
+ >>> a = zeros((3,3),int)
+ >>> fill_diagonal(a,5)
+ >>> a
+ array([[5, 0, 0],
+ [0, 5, 0],
+ [0, 0, 5]])
+
+ The same function can operate on a 4-d array:
+ >>> a = zeros((3,3,3,3),int)
+ >>> fill_diagonal(a,4)
+
+ We only show a few blocks for clarity:
+ >>> a[0,0]
+ array([[4, 0, 0],
+ [0, 0, 0],
+ [0, 0, 0]])
+ >>> a[1,1]
+ array([[0, 0, 0],
+ [0, 4, 0],
+ [0, 0, 0]])
+ >>> a[2,2]
+ array([[0, 0, 0],
+ [0, 0, 0],
+ [0, 0, 4]])
+
+ """
+ if a.ndim < 2:
+ raise ValueError("array must be at least 2-d")
+ if a.ndim == 2:
+ # Explicit, fast formula for the common case. For 2-d arrays, we
+ # accept rectangular ones.
+ step = a.shape[1] + 1
+ else:
+ # For more than d=2, the strided formula is only valid for arrays with
+ # all dimensions equal, so we check first.
+ if not alltrue(diff(a.shape)==0):
+ raise ValueError("All dimensions of input must be of equal length")
+ step = 1 + (cumprod(a.shape[:-1])).sum()
+
+ # Write the value out into the diagonal.
+ a.flat[::step] = val
+
+
+def diag_indices(n, ndim=2):
+ """Return the indices to access the main diagonal of an array.
+
+ This returns a tuple of indices that can be used to access the main
+ diagonal of an array with ndim (>=2) dimensions and shape (n,n,...,n). For
+ ndim=2 this is the usual diagonal, for ndim>2 this is the set of indices
+ to access A[i,i,...,i] for i=[0..n-1].
+
+ Parameters
+ ----------
+ n : int
+ The size, along each dimension, of the arrays for which the returned
+ indices can be used.
+
+ ndim : int, optional
+ The number of dimensions.
+
+ Notes
+ -----
+ .. versionadded:: 1.4.0
+
+ See also
+ --------
+ diag_indices_from
+
+ Examples
+ --------
+ Create a set of indices to access the diagonal of a (4,4) array:
+ >>> di = diag_indices(4)
+
+ >>> a = np.array([[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]])
+ >>> a
+ array([[ 1, 2, 3, 4],
+ [ 5, 6, 7, 8],
+ [ 9, 10, 11, 12],
+ [13, 14, 15, 16]])
+ >>> a[di] = 100
+ >>> a
+ array([[100, 2, 3, 4],
+ [ 5, 100, 7, 8],
+ [ 9, 10, 100, 12],
+ [ 13, 14, 15, 100]])
+
+ Now, we create indices to manipulate a 3-d array:
+ >>> d3 = diag_indices(2,3)
+
+ And use it to set the diagonal of a zeros array to 1:
+ >>> a = zeros((2,2,2),int)
+ >>> a[d3] = 1
+ >>> a
+ array([[[1, 0],
+ [0, 0]],
+
+ [[0, 0],
+ [0, 1]]])
+
+ """
+ idx = arange(n)
+ return (idx,) * ndim
+
+
+def diag_indices_from(arr):
+ """Return the indices to access the main diagonal of an n-dimensional array.
+
+ See diag_indices() for full details.
+
+ Parameters
+ ----------
+ arr : array, at least 2-d
+
+ Notes
+ -----
+ .. versionadded:: 1.4.0
+
+ """
+
+ if not arr.ndim >= 2:
+ raise ValueError("input array must be at least 2-d")
+ # For more than d=2, the strided formula is only valid for arrays with
+ # all dimensions equal, so we check first.
+ if not alltrue(diff(arr.shape) == 0):
+ raise ValueError("All dimensions of input must be of equal length")
+
+ return diag_indices(arr.shape[0], arr.ndim)
diff --git a/numpy/lib/info.py b/numpy/lib/info.py
index f93234d57..4a781a2ca 100644
--- a/numpy/lib/info.py
+++ b/numpy/lib/info.py
@@ -135,12 +135,11 @@ Set operations for 1D numeric arrays based on sort() function.
================ ===================
ediff1d Array difference (auxiliary function).
-unique1d Unique elements of 1D array.
+unique Unique elements of an array.
intersect1d Intersection of 1D arrays with unique elements.
-intersect1d_nu Intersection of 1D arrays with any elements.
setxor1d Set exclusive-or of 1D arrays with unique elements.
-setmember1d Return an array of shape of ar1 containing 1 where
- the elements of ar1 are in ar2 and 0 otherwise.
+in1d Test whether elements in a 1D array are also present in
+ another array.
union1d Union of 1D arrays with unique elements.
setdiff1d Set difference of 1D arrays with unique elements.
================ ===================
diff --git a/numpy/lib/io.py b/numpy/lib/io.py
index 98d071fab..3a962c7e1 100644
--- a/numpy/lib/io.py
+++ b/numpy/lib/io.py
@@ -118,6 +118,27 @@ class NpzFile(object):
else:
raise KeyError, "%s is not a file in the archive" % key
+
+ def __iter__(self):
+ return iter(self.files)
+
+ def items(self):
+ return [(f, self[f]) for f in self.files]
+
+ def iteritems(self):
+ for f in self.files:
+ yield (f, self[f])
+
+ def keys(self):
+ return self.files
+
+ def iterkeys(self):
+ return self.__iter__()
+
+ def __contains__(self, key):
+ return self.files.__contains__(key)
+
+
def load(file, mmap_mode=None):
"""
Load a pickled, ``.npy``, or ``.npz`` binary file.
@@ -126,6 +147,7 @@ def load(file, mmap_mode=None):
----------
file : file-like object or string
The file to read. It must support ``seek()`` and ``read()`` methods.
+ If the filename extension is ``.gz``, the file is first decompressed.
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode
(see `numpy.memmap`). The mode has no effect for pickled or
@@ -146,6 +168,11 @@ def load(file, mmap_mode=None):
IOError
If the input file does not exist or cannot be read.
+ See Also
+ --------
+ save, savez, loadtxt
+ memmap : Create a memory-map to an array stored in a file on disk.
+
Notes
-----
- If the file contains pickle data, then whatever is stored in the
@@ -202,20 +229,20 @@ def load(file, mmap_mode=None):
def save(file, arr):
"""
- Save an array to a binary file in NumPy format.
+ Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
- f : file or string
+ file : file or string
File or filename to which the data is saved. If the filename
does not already have a ``.npy`` extension, it is added.
- x : array_like
- Array data.
+ arr : array_like
+ Array data to be saved.
See Also
--------
- savez : Save several arrays into an .npz compressed archive
- savetxt : Save an array to a file as plain text
+ savez : Save several arrays into a .npz compressed archive
+ savetxt, load
Examples
--------
@@ -225,7 +252,7 @@ def save(file, arr):
>>> x = np.arange(10)
>>> np.save(outfile, x)
- >>> outfile.seek(0)
+ >>> outfile.seek(0) # only necessary in this example (with tempfile)
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
@@ -273,6 +300,12 @@ def savez(file, *args, **kwds):
The .npz file format is a zipped archive of files named after the variables
they contain. Each file contains one variable in .npy format.
+ Examples
+ --------
+ >>> x = np.random.random((3, 3))
+ >>> y = np.zeros((3, 2))
+ >>> np.savez('data', x=x, y=y)
+
"""
# Import is postponed to here since zipfile depends on gzip, an optional
@@ -523,20 +556,20 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None, converters=None,
def savetxt(fname, X, fmt='%.18e',delimiter=' '):
"""
- Save an array to file.
+ Save an array to a text file.
Parameters
----------
- fname : filename or a file handle
- If the filename ends in .gz, the file is automatically saved in
- compressed gzip format. The load() command understands gzipped
- files transparently.
+ fname : filename or file handle
+ If the filename ends in ``.gz``, the file is automatically saved in
+ compressed gzip format. `loadtxt` understands gzipped files
+ transparently.
X : array_like
- Data.
- fmt : string or sequence of strings
+ Data to be saved to a text file.
+ fmt : str or sequence of strs
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
- case delimiter is ignored.
+ case `delimiter` is ignored.
delimiter : str
Character separating columns.
@@ -588,15 +621,20 @@ def savetxt(fname, X, fmt='%.18e',delimiter=' '):
``x,X`` : unsigned hexadecimal integer
- This is not an exhaustive specification.
-
+ This explanation of ``fmt`` is not complete, for an exhaustive
+ specification see [1]_.
+ References
+ ----------
+ .. [1] `Format Specification Mini-Language
+ <http://docs.python.org/library/string.html#
+ format-specification-mini-language>`_, Python Documentation.
Examples
--------
- >>> savetxt('test.out', x, delimiter=',') # X is an array
- >>> savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
- >>> savetxt('test.out', x, fmt='%1.4e') # use exponential notation
+ >>> savetxt('test.out', x, delimiter=',') # X is an array
+ >>> savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
+ >>> savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
@@ -712,15 +750,13 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, skiprows=0,
Each line past the first `skiprows` ones is split at the `delimiter`
character, and characters following the `comments` character are discarded.
-
-
Parameters
----------
- fname : file or string
- File or filename to read. If the filename extension is `.gz` or `.bz2`,
- the file is first decompressed.
- dtype : data-type
+ fname : {file, string}
+ File or filename to read. If the filename extension is `.gz` or
+ `.bz2`, the file is first decompressed.
+ dtype : dtype
Data type of the resulting array. If this is a flexible data-type,
the resulting array will be 1-dimensional, and each row will be
interpreted as an element of the array. In this case, the number
@@ -729,20 +765,20 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, skiprows=0,
of the dtype.
If None, the dtypes will be determined by the contents of each
column, individually.
- comments : {string}, optional
+ comments : string, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
- delimiter : {string}, optional
+ delimiter : string, optional
The string used to separate values. By default, any consecutive
whitespace act as delimiter.
- skiprows : {int}, optional
+ skiprows : int, optional
Numbers of lines to skip at the beginning of the file.
converters : {None, dictionary}, optional
A dictionary mapping column number to a function that will convert
values in the column to a number. Converters can also be used to
provide a default value for missing data:
``converters = {3: lambda s: float(s or 0)}``.
- missing : {string}, optional
+ missing : string, optional
A string representing a missing value, irrespective of the column where
it appears (e.g., `'missing'` or `'unused'`).
missing_values : {None, dictionary}, optional
@@ -757,20 +793,21 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, skiprows=0,
If `names` is a sequence or a single-string of comma-separated names,
the names will be used to define the field names in a flexible dtype.
If `names` is None, the names of the dtype fields will be used, if any.
- excludelist : {sequence}, optional
+ excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
- deletechars : {string}, optional
- A string combining invalid characters that must be deleted from the names.
+ deletechars : string, optional
+ A string combining invalid characters that must be deleted from the
+ names.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case_sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
- unpack : {bool}, optional
+ unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
- usemask : {bool}, optional
+ usemask : bool, optional
If True, returns a masked array.
If False, return a regular standard array.
@@ -779,23 +816,20 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, skiprows=0,
out : MaskedArray
Data read from the text file.
- Notes
+ See Also
--------
+ numpy.loadtxt : equivalent function when no data is missing.
+
+ Notes
+ -----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variable are named (either by a flexible dtype or with `names`,
- there must not be any header in the file (else a :exc:ValueError exception
- is raised).
-
- Warnings
- --------
+ there must not be any header in the file (else a :exc:ValueError
+ exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
- See Also
- --------
- numpy.loadtxt : equivalent function when no data is missing.
-
"""
#
if usemask:
@@ -1128,20 +1162,21 @@ def recfromtxt(fname, dtype=None, comments='#', delimiter=None, skiprows=0,
excludelist=None, deletechars=None, case_sensitive=True,
usemask=False):
"""
- Load ASCII data stored in fname and returns a standard recarray (if
+ Load ASCII data stored in fname and returns a standard recarray (if
`usemask=False`) or a MaskedRecords (if `usemask=True`).
-
+
Complete description of all the optional input parameters is available in
the docstring of the `genfromtxt` function.
-
+
See Also
--------
numpy.genfromtxt : generic function
- Warnings
- --------
+ Notes
+ -----
* by default, `dtype=None`, which means that the dtype of the output array
will be determined from the data.
+
"""
kwargs = dict(dtype=dtype, comments=comments, delimiter=delimiter,
skiprows=skiprows, converters=converters,
diff --git a/numpy/lib/scimath.py b/numpy/lib/scimath.py
index 269d332bf..0e1bafa91 100644
--- a/numpy/lib/scimath.py
+++ b/numpy/lib/scimath.py
@@ -166,7 +166,8 @@ def _fix_real_abs_gt_1(x):
return x
def sqrt(x):
- """Return the square root of x.
+ """
+ Return the square root of x.
Parameters
----------
@@ -174,12 +175,29 @@ def sqrt(x):
Returns
-------
- array_like output.
+ out : array_like
+
+ Notes
+ -----
+
+ As the numpy.sqrt, this returns the principal square root of x, which is
+ what most people mean when they use square root; the principal square root
+ of x is not any number z such as z^2 = x.
+
+ For positive numbers, the principal square root is defined as the positive
+ number z such as z^2 = x.
+
+ The principal square root of -1 is i, the principal square root of any
+ negative number -x is defined a i * sqrt(x). For any non zero complex
+ number, it is defined by using the following branch cut: x = r e^(i t) with
+ r > 0 and -pi < t <= pi. The principal square root is then
+ sqrt(r) e^(i t/2).
Examples
--------
For real, non-negative inputs this works just like numpy.sqrt():
+
>>> np.lib.scimath.sqrt(1)
1.0
@@ -187,33 +205,20 @@ def sqrt(x):
array([ 1., 2.])
But it automatically handles negative inputs:
+
>>> np.lib.scimath.sqrt(-1)
(0.0+1.0j)
>>> np.lib.scimath.sqrt([-1,4])
array([ 0.+1.j, 2.+0.j])
- Notes
- -----
-
- As the numpy.sqrt, this returns the principal square root of x, which is
- what most people mean when they use square root; the principal square root
- of x is not any number z such as z^2 = x.
-
- For positive numbers, the principal square root is defined as the positive
- number z such as z^2 = x.
-
- The principal square root of -1 is i, the principal square root of any
- negative number -x is defined a i * sqrt(x). For any non zero complex
- number, it is defined by using the following branch cut: x = r e^(i t) with
- r > 0 and -pi < t <= pi. The principal square root is then
- sqrt(r) e^(i t/2).
"""
x = _fix_real_lt_zero(x)
return nx.sqrt(x)
def log(x):
- """Return the natural logarithm of x.
+ """
+ Return the natural logarithm of x.
If x contains negative inputs, the answer is computed and returned in the
complex domain.
@@ -224,7 +229,7 @@ def log(x):
Returns
-------
- array_like
+ out : array_like
Examples
--------
@@ -237,12 +242,14 @@ def log(x):
>>> np.lib.scimath.log(-math.exp(1)) == (1+1j*math.pi)
True
+
"""
x = _fix_real_lt_zero(x)
return nx.log(x)
def log10(x):
- """Return the base 10 logarithm of x.
+ """
+ Return the base 10 logarithm of x.
If x contains negative inputs, the answer is computed and returned in the
complex domain.
@@ -253,12 +260,13 @@ def log10(x):
Returns
-------
- array_like
+ out : array_like
Examples
--------
(We set the printing precision so the example can be auto-tested)
+
>>> np.set_printoptions(precision=4)
>>> np.lib.scimath.log10([10**1,10**2])
@@ -267,12 +275,14 @@ def log10(x):
>>> np.lib.scimath.log10([-10**1,-10**2,10**2])
array([ 1.+1.3644j, 2.+1.3644j, 2.+0.j ])
+
"""
x = _fix_real_lt_zero(x)
return nx.log10(x)
def logn(n, x):
- """Take log base n of x.
+ """
+ Take log base n of x.
If x contains negative inputs, the answer is computed and returned in the
complex domain.
@@ -283,12 +293,13 @@ def logn(n, x):
Returns
-------
- array_like
+ out : array_like
Examples
--------
(We set the printing precision so the example can be auto-tested)
+
>>> np.set_printoptions(precision=4)
>>> np.lib.scimath.logn(2,[4,8])
@@ -296,13 +307,15 @@ def logn(n, x):
>>> np.lib.scimath.logn(2,[-4,-8,8])
array([ 2.+4.5324j, 3.+4.5324j, 3.+0.j ])
+
"""
x = _fix_real_lt_zero(x)
n = _fix_real_lt_zero(n)
return nx.log(x)/nx.log(n)
def log2(x):
- """ Take log base 2 of x.
+ """
+ Take log base 2 of x.
If x contains negative inputs, the answer is computed and returned in the
complex domain.
@@ -313,12 +326,13 @@ def log2(x):
Returns
-------
- array_like
+ out : array_like
Examples
--------
(We set the printing precision so the example can be auto-tested)
+
>>> np.set_printoptions(precision=4)
>>> np.lib.scimath.log2([4,8])
@@ -326,12 +340,14 @@ def log2(x):
>>> np.lib.scimath.log2([-4,-8,8])
array([ 2.+4.5324j, 3.+4.5324j, 3.+0.j ])
+
"""
x = _fix_real_lt_zero(x)
return nx.log2(x)
def power(x, p):
- """Return x**p.
+ """
+ Return x**p.
If x contains negative values, it is converted to the complex domain.
@@ -344,11 +360,12 @@ def power(x, p):
Returns
-------
- array_like
+ out : array_like
Examples
--------
(We set the printing precision so the example can be auto-tested)
+
>>> np.set_printoptions(precision=4)
>>> np.lib.scimath.power([2,4],2)
@@ -359,6 +376,7 @@ def power(x, p):
>>> np.lib.scimath.power([-2,4],2)
array([ 4.+0.j, 16.+0.j])
+
"""
x = _fix_real_lt_zero(x)
p = _fix_int_lt_zero(p)
@@ -393,7 +411,8 @@ def arccos(x):
return nx.arccos(x)
def arcsin(x):
- """Compute the inverse sine of x.
+ """
+ Compute the inverse sine of x.
For real x with abs(x)<=1, this returns the principal value.
@@ -410,6 +429,7 @@ def arcsin(x):
Examples
--------
(We set the printing precision so the example can be auto-tested)
+
>>> np.set_printoptions(precision=4)
>>> np.lib.scimath.arcsin(0)
@@ -417,12 +437,14 @@ def arcsin(x):
>>> np.lib.scimath.arcsin([0,1])
array([ 0. , 1.5708])
+
"""
x = _fix_real_abs_gt_1(x)
return nx.arcsin(x)
def arctanh(x):
- """Compute the inverse hyperbolic tangent of x.
+ """
+ Compute the inverse hyperbolic tangent of x.
For real x with abs(x)<=1, this returns the principal value.
@@ -434,7 +456,7 @@ def arctanh(x):
Returns
-------
- array_like
+ out : array_like
Examples
--------
@@ -446,6 +468,7 @@ def arctanh(x):
>>> np.lib.scimath.arctanh([0,2])
array([ 0.0000+0.j , 0.5493-1.5708j])
+
"""
x = _fix_real_abs_gt_1(x)
return nx.arctanh(x)
diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py
index 19dd54f7a..a5bf4d0ea 100644
--- a/numpy/lib/shape_base.py
+++ b/numpy/lib/shape_base.py
@@ -892,6 +892,19 @@ def dsplit(ary,indices_or_sections):
raise ValueError, 'vsplit only works on arrays of 3 or more dimensions'
return split(ary,indices_or_sections,2)
+def get_array_prepare(*args):
+ """Find the wrapper for the array with the highest priority.
+
+ In case of ties, leftmost wins. If no wrapper is found, return None
+ """
+ wrappers = [(getattr(x, '__array_priority__', 0), -i,
+ x.__array_prepare__) for i, x in enumerate(args)
+ if hasattr(x, '__array_prepare__')]
+ wrappers.sort()
+ if wrappers:
+ return wrappers[-1][-1]
+ return None
+
def get_array_wrap(*args):
"""Find the wrapper for the array with the highest priority.
@@ -975,7 +988,6 @@ def kron(a,b):
True
"""
- wrapper = get_array_wrap(a, b)
b = asanyarray(b)
a = array(a,copy=False,subok=True,ndmin=b.ndim)
ndb, nda = b.ndim, a.ndim
@@ -998,6 +1010,10 @@ def kron(a,b):
axis = nd-1
for _ in xrange(nd):
result = concatenate(result, axis=axis)
+ wrapper = get_array_prepare(a, b)
+ if wrapper is not None:
+ result = wrapper(result)
+ wrapper = get_array_wrap(a, b)
if wrapper is not None:
result = wrapper(result)
return result
@@ -1007,6 +1023,19 @@ def tile(A, reps):
"""
Construct an array by repeating A the number of times given by reps.
+ If `reps` has length ``d``, the result will have dimension of
+ ``max(d, A.ndim)``.
+
+ If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new
+ axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication,
+ or shape (1, 1, 3) for 3-D replication. If this is not the desired
+ behavior, promote `A` to d-dimensions manually before calling this
+ function.
+
+ If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it.
+ Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as
+ (1, 1, 2, 2).
+
Parameters
----------
A : array_like
@@ -1017,24 +1046,11 @@ def tile(A, reps):
Returns
-------
c : ndarray
- The output array.
+ The tiled output array.
See Also
--------
- repeat
-
- Notes
- -----
- If `reps` has length d, the result will have dimension of max(d, `A`.ndim).
-
- If `A`.ndim < d, `A` is promoted to be d-dimensional by prepending new
- axes. So a shape (3,) array is promoted to (1,3) for 2-D replication,
- or shape (1,1,3) for 3-D replication. If this is not the desired behavior,
- promote `A` to d-dimensions manually before calling this function.
-
- If `A`.ndim > d, `reps` is promoted to `A`.ndim by pre-pending 1's to it.
- Thus for an `A` of shape (2,3,4,5), a `reps` of (2,2) is treated as
- (1,1,2,2).
+ repeat : Repeat elements of an array.
Examples
--------
@@ -1046,7 +1062,6 @@ def tile(A, reps):
[0, 1, 2, 0, 1, 2]])
>>> np.tile(a, (2, 1, 2))
array([[[0, 1, 2, 0, 1, 2]],
- <BLANKLINE>
[[0, 1, 2, 0, 1, 2]]])
>>> b = np.array([[1, 2], [3, 4]])
diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py
index 40bc11f6e..92305129a 100644
--- a/numpy/lib/tests/test_arraysetops.py
+++ b/numpy/lib/tests/test_arraysetops.py
@@ -9,39 +9,61 @@ from numpy.lib.arraysetops import *
import warnings
class TestAso(TestCase):
- def test_unique1d( self ):
+ def test_unique( self ):
a = np.array( [5, 7, 1, 2, 1, 5, 7] )
ec = np.array( [1, 2, 5, 7] )
- c = unique1d( a )
+ c = unique( a )
assert_array_equal( c, ec )
warnings.simplefilter('ignore', Warning)
- unique, indices = unique1d( a, return_index=True )
+ vals, indices = unique( a, return_index=True )
warnings.resetwarnings()
ed = np.array( [2, 3, 0, 1] )
- assert_array_equal(unique, ec)
+ assert_array_equal(vals, ec)
assert_array_equal(indices, ed)
- assert_array_equal([], unique1d([]))
+ warnings.simplefilter('ignore', Warning)
+ vals, ind0, ind1 = unique( a, return_index=True,
+ return_inverse=True )
+ warnings.resetwarnings()
+
+ ee = np.array( [2, 3, 0, 1, 0, 2, 3] )
+ assert_array_equal(vals, ec)
+ assert_array_equal(ind0, ed)
+ assert_array_equal(ind1, ee)
+
+ assert_array_equal([], unique([]))
def test_intersect1d( self ):
+ # unique inputs
a = np.array( [5, 7, 1, 2] )
b = np.array( [2, 4, 3, 1, 5] )
ec = np.array( [1, 2, 5] )
- c = intersect1d( a, b )
+ c = intersect1d( a, b, assume_unique=True )
assert_array_equal( c, ec )
+ # non-unique inputs
+ a = np.array( [5, 5, 7, 1, 2] )
+ b = np.array( [2, 1, 4, 3, 3, 1, 5] )
+
+ ed = np.array( [1, 2, 5] )
+ c = intersect1d( a, b )
+ assert_array_equal( c, ed )
+
assert_array_equal([], intersect1d([],[]))
def test_intersect1d_nu( self ):
+ # This should be removed when intersect1d_nu is removed.
a = np.array( [5, 5, 7, 1, 2] )
b = np.array( [2, 1, 4, 3, 3, 1, 5] )
ec = np.array( [1, 2, 5] )
+ warnings.simplefilter('ignore', Warning)
c = intersect1d_nu( a, b )
+ warnings.resetwarnings()
assert_array_equal( c, ec )
assert_array_equal([], intersect1d_nu([],[]))
@@ -83,11 +105,14 @@ class TestAso(TestCase):
assert_array_equal([1],ediff1d(two_elem))
def test_setmember1d( self ):
+ # This should be removed when setmember1d is removed.
a = np.array( [5, 7, 1, 2] )
b = np.array( [2, 4, 3, 1, 5] )
ec = np.array( [True, False, True, True] )
+ warnings.simplefilter('ignore', Warning)
c = setmember1d( a, b )
+ warnings.resetwarnings()
assert_array_equal( c, ec )
a[0] = 8
@@ -102,51 +127,77 @@ class TestAso(TestCase):
assert_array_equal([], setmember1d([],[]))
- def test_setmember1d_nu(self):
+ def test_in1d(self):
+ a = np.array( [5, 7, 1, 2] )
+ b = np.array( [2, 4, 3, 1, 5] )
+
+ ec = np.array( [True, False, True, True] )
+ c = in1d( a, b, assume_unique=True )
+ assert_array_equal( c, ec )
+
+ a[0] = 8
+ ec = np.array( [False, False, True, True] )
+ c = in1d( a, b, assume_unique=True )
+ assert_array_equal( c, ec )
+
+ a[0], a[3] = 4, 8
+ ec = np.array( [True, False, True, False] )
+ c = in1d( a, b, assume_unique=True )
+ assert_array_equal( c, ec )
+
a = np.array([5,4,5,3,4,4,3,4,3,5,2,1,5,5])
b = [2,3,4]
ec = [False, True, False, True, True, True, True, True, True, False,
True, False, False, False]
- c = setmember1d_nu(a, b)
+ c = in1d(a, b)
assert_array_equal(c, ec)
b = b + [5, 5, 4]
ec = [True, True, True, True, True, True, True, True, True, True,
True, False, True, True]
- c = setmember1d_nu(a, b)
+ c = in1d(a, b)
assert_array_equal(c, ec)
a = np.array([5, 7, 1, 2])
b = np.array([2, 4, 3, 1, 5])
ec = np.array([True, False, True, True])
- c = setmember1d_nu(a, b)
+ c = in1d(a, b)
assert_array_equal(c, ec)
a = np.array([5, 7, 1, 1, 2])
b = np.array([2, 4, 3, 3, 1, 5])
ec = np.array([True, False, True, True, True])
- c = setmember1d_nu(a, b)
+ c = in1d(a, b)
assert_array_equal(c, ec)
a = np.array([5])
b = np.array([2])
ec = np.array([False])
- c = setmember1d_nu(a, b)
+ c = in1d(a, b)
assert_array_equal(c, ec)
a = np.array([5, 5])
b = np.array([2, 2])
ec = np.array([False, False])
- c = setmember1d_nu(a, b)
+ c = in1d(a, b)
assert_array_equal(c, ec)
- assert_array_equal(setmember1d_nu([], []), [])
+ assert_array_equal(in1d([], []), [])
+
+ def test_in1d_char_array( self ):
+ a = np.array(['a', 'b', 'c','d','e','c','e','b'])
+ b = np.array(['a','c'])
+
+ ec = np.array([True, False, True, False, False, True, False, False])
+ c = in1d(a, b)
+
+ assert_array_equal(c, ec)
def test_union1d( self ):
a = np.array( [5, 4, 7, 1, 2] )
@@ -159,7 +210,7 @@ class TestAso(TestCase):
assert_array_equal([], union1d([],[]))
def test_setdiff1d( self ):
- a = np.array( [6, 5, 4, 7, 1, 2] )
+ a = np.array( [6, 5, 4, 7, 1, 2, 7, 4] )
b = np.array( [2, 4, 3, 3, 2, 1, 5] )
ec = np.array( [6, 7] )
@@ -180,14 +231,6 @@ class TestAso(TestCase):
assert_array_equal(setdiff1d(a,b),np.array(['c']))
def test_manyways( self ):
- nItem = 100
- a = np.fix( nItem / 10 * np.random.random( nItem ) )
- b = np.fix( nItem / 10 * np.random.random( nItem ) )
-
- c1 = intersect1d_nu( a, b )
- c2 = unique1d( intersect1d( a, b ) )
- assert_array_equal( c1, c2 )
-
a = np.array( [5, 7, 1, 2, 8] )
b = np.array( [9, 8, 2, 4, 3, 1, 5] )
diff --git a/numpy/lib/tests/test_financial.py b/numpy/lib/tests/test_financial.py
index 1ac14b561..c1d77c517 100644
--- a/numpy/lib/tests/test_financial.py
+++ b/numpy/lib/tests/test_financial.py
@@ -36,13 +36,18 @@ class TestFinancial(TestCase):
117.04, 2)
def test_mirr(self):
- v1 = [-4500,-800,800,800,600,600,800,800,700,3000]
- assert_almost_equal(np.mirr(v1,0.08,0.055),
- 0.0665, 4)
+ val = [-4500,-800,800,800,600,600,800,800,700,3000]
+ assert_almost_equal(np.mirr(val, 0.08, 0.055), 0.0666, 4)
+
+ val = [-120000,39000,30000,21000,37000,46000]
+ assert_almost_equal(np.mirr(val, 0.10, 0.12), 0.126094, 6)
+
+ val = [100,200,-50,300,-200]
+ assert_almost_equal(np.mirr(val, 0.05, 0.06), 0.3428, 4)
+
+ val = [39000,30000,21000,37000,46000]
+ assert_(np.isnan(np.mirr(val, 0.10, 0.12)))
- v2 = [-120000,39000,30000,21000,37000,46000]
- assert_almost_equal(np.mirr(v2,0.10,0.12),
- 0.1344, 4)
def test_unimplemented():
diff --git a/numpy/lib/tests/test_index_tricks.py b/numpy/lib/tests/test_index_tricks.py
index 47529502d..d7e61799a 100644
--- a/numpy/lib/tests/test_index_tricks.py
+++ b/numpy/lib/tests/test_index_tricks.py
@@ -1,5 +1,8 @@
from numpy.testing import *
-from numpy import array, ones, r_, mgrid, unravel_index
+import numpy as np
+from numpy import ( array, ones, r_, mgrid, unravel_index, zeros, where,
+ ndenumerate, fill_diagonal, diag_indices,
+ diag_indices_from )
class TestUnravelIndex(TestCase):
def test_basic(self):
@@ -62,5 +65,60 @@ class TestConcatenator(TestCase):
assert_array_equal(d[5:,:],c)
+class TestNdenumerate(TestCase):
+ def test_basic(self):
+ a = array([[1,2], [3,4]])
+ assert_equal(list(ndenumerate(a)),
+ [((0,0), 1), ((0,1), 2), ((1,0), 3), ((1,1), 4)])
+
+
+def test_fill_diagonal():
+ a = zeros((3, 3),int)
+ fill_diagonal(a, 5)
+ yield (assert_array_equal, a,
+ array([[5, 0, 0],
+ [0, 5, 0],
+ [0, 0, 5]]))
+
+ # The same function can operate on a 4-d array:
+ a = zeros((3, 3, 3, 3), int)
+ fill_diagonal(a, 4)
+ i = array([0, 1, 2])
+ yield (assert_equal, where(a != 0), (i, i, i, i))
+
+
+def test_diag_indices():
+ di = diag_indices(4)
+ a = array([[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16]])
+ a[di] = 100
+ yield (assert_array_equal, a,
+ array([[100, 2, 3, 4],
+ [ 5, 100, 7, 8],
+ [ 9, 10, 100, 12],
+ [ 13, 14, 15, 100]]))
+
+ # Now, we create indices to manipulate a 3-d array:
+ d3 = diag_indices(2, 3)
+
+ # And use it to set the diagonal of a zeros array to 1:
+ a = zeros((2, 2, 2),int)
+ a[d3] = 1
+ yield (assert_array_equal, a,
+ array([[[1, 0],
+ [0, 0]],
+
+ [[0, 0],
+ [0, 1]]]) )
+
+def test_diag_indices_from():
+ x = np.random.random((4, 4))
+ r, c = diag_indices_from(x)
+ assert_array_equal(r, np.arange(4))
+ assert_array_equal(c, np.arange(4))
+
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py
index e5a73a86a..185ceef7c 100644
--- a/numpy/lib/tests/test_io.py
+++ b/numpy/lib/tests/test_io.py
@@ -916,5 +916,31 @@ def test_gzip_loadtxt_from_string():
f = gzip.GzipFile(fileobj=s, mode="r")
assert_array_equal(np.loadtxt(f), [1, 2, 3])
+def test_npzfile_dict():
+ s = StringIO.StringIO()
+ x = np.zeros((3, 3))
+ y = np.zeros((3, 3))
+
+ np.savez(s, x=x, y=y)
+ s.seek(0)
+
+ z = np.load(s)
+
+ assert 'x' in z
+ assert 'y' in z
+ assert 'x' in z.keys()
+ assert 'y' in z.keys()
+
+ for f, a in z.iteritems():
+ assert f in ['x', 'y']
+ assert_equal(a.shape, (3, 3))
+
+ assert len(z.items()) == 2
+
+ for f in z:
+ assert f in ['x', 'y']
+
+ assert 'x' in list(z.iterkeys())
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/lib/tests/test_regression.py b/numpy/lib/tests/test_regression.py
index b8c487962..5abf9aefe 100644
--- a/numpy/lib/tests/test_regression.py
+++ b/numpy/lib/tests/test_regression.py
@@ -48,6 +48,10 @@ class TestRegression(object):
"""Ticket 928."""
assert_raises(ValueError, np.histogramdd, np.ones((1,10)), bins=2**10)
+ def test_ndenumerate_crash(self):
+ """Ticket 1140"""
+ # Shouldn't crash:
+ list(np.ndenumerate(np.array([[]])))
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/lib/tests/test_twodim_base.py b/numpy/lib/tests/test_twodim_base.py
index 32c4ca58e..5d850f9fd 100644
--- a/numpy/lib/tests/test_twodim_base.py
+++ b/numpy/lib/tests/test_twodim_base.py
@@ -3,8 +3,11 @@
"""
from numpy.testing import *
-from numpy import arange, rot90, add, fliplr, flipud, zeros, ones, eye, \
- array, diag, histogram2d, tri
+
+from numpy import ( arange, rot90, add, fliplr, flipud, zeros, ones, eye,
+ array, diag, histogram2d, tri, mask_indices, triu_indices,
+ triu_indices_from, tril_indices, tril_indices_from )
+
import numpy as np
def get_mat(n):
@@ -50,34 +53,68 @@ class TestEye(TestCase):
[1,0,0],
[0,1,0]]))
+ def test_eye_bounds(self):
+ assert_equal(eye(2, 2, 1), [[0, 1], [0, 0]])
+ assert_equal(eye(2, 2, -1), [[0, 0], [1, 0]])
+ assert_equal(eye(2, 2, 2), [[0, 0], [0, 0]])
+ assert_equal(eye(2, 2, -2), [[0, 0], [0, 0]])
+ assert_equal(eye(3, 2, 2), [[0, 0], [0, 0], [0, 0]])
+ assert_equal(eye(3, 2, 1), [[0, 1], [0, 0], [0, 0]])
+ assert_equal(eye(3, 2, -1), [[0, 0], [1, 0], [0, 1]])
+ assert_equal(eye(3, 2, -2), [[0, 0], [0, 0], [1, 0]])
+ assert_equal(eye(3, 2, -3), [[0, 0], [0, 0], [0, 0]])
+
+ def test_strings(self):
+ assert_equal(eye(2, 2, dtype='S3'), [['1', ''], ['', '1']])
+
+ def test_bool(self):
+ assert_equal(eye(2, 2, dtype=bool), [[True, False], [False, True]])
+
class TestDiag(TestCase):
def test_vector(self):
- vals = (100*arange(5)).astype('l')
- b = zeros((5,5))
+ vals = (100 * arange(5)).astype('l')
+ b = zeros((5, 5))
for k in range(5):
- b[k,k] = vals[k]
- assert_equal(diag(vals),b)
- b = zeros((7,7))
+ b[k, k] = vals[k]
+ assert_equal(diag(vals), b)
+ b = zeros((7, 7))
c = b.copy()
for k in range(5):
- b[k,k+2] = vals[k]
- c[k+2,k] = vals[k]
- assert_equal(diag(vals,k=2), b)
- assert_equal(diag(vals,k=-2), c)
+ b[k, k + 2] = vals[k]
+ c[k + 2, k] = vals[k]
+ assert_equal(diag(vals, k=2), b)
+ assert_equal(diag(vals, k=-2), c)
- def test_matrix(self):
- vals = (100*get_mat(5)+1).astype('l')
+ def test_matrix(self, vals=None):
+ if vals is None:
+ vals = (100 * get_mat(5) + 1).astype('l')
b = zeros((5,))
for k in range(5):
b[k] = vals[k,k]
- assert_equal(diag(vals),b)
- b = b*0
+ assert_equal(diag(vals), b)
+ b = b * 0
for k in range(3):
- b[k] = vals[k,k+2]
- assert_equal(diag(vals,2),b[:3])
+ b[k] = vals[k, k + 2]
+ assert_equal(diag(vals, 2), b[:3])
for k in range(3):
- b[k] = vals[k+2,k]
- assert_equal(diag(vals,-2),b[:3])
+ b[k] = vals[k + 2, k]
+ assert_equal(diag(vals, -2), b[:3])
+
+ def test_fortran_order(self):
+ vals = array((100 * get_mat(5) + 1), order='F', dtype='l')
+ self.test_matrix(vals)
+
+ def test_diag_bounds(self):
+ A = [[1, 2], [3, 4], [5, 6]]
+ assert_equal(diag(A, k=2), [])
+ assert_equal(diag(A, k=1), [2])
+ assert_equal(diag(A, k=0), [1, 4])
+ assert_equal(diag(A, k=-1), [3, 6])
+ assert_equal(diag(A, k=-2), [5])
+ assert_equal(diag(A, k=-3), [])
+
+ def test_failure(self):
+ self.failUnlessRaises(ValueError, diag, [[[1]]])
class TestFliplr(TestCase):
def test_basic(self):
@@ -193,5 +230,76 @@ class TestTri(TestCase):
assert_array_equal(tri(3,dtype=bool),out.astype(bool))
+def test_mask_indices():
+ # simple test without offset
+ iu = mask_indices(3, np.triu)
+ a = np.arange(9).reshape(3, 3)
+ yield (assert_array_equal, a[iu], array([0, 1, 2, 4, 5, 8]))
+ # Now with an offset
+ iu1 = mask_indices(3, np.triu, 1)
+ yield (assert_array_equal, a[iu1], array([1, 2, 5]))
+
+
+def test_tril_indices():
+ # indices without and with offset
+ il1 = tril_indices(4)
+ il2 = tril_indices(4, 2)
+
+ a = np.array([[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16]])
+
+ # indexing:
+ yield (assert_array_equal, a[il1],
+ array([ 1, 5, 6, 9, 10, 11, 13, 14, 15, 16]) )
+
+ # And for assigning values:
+ a[il1] = -1
+ yield (assert_array_equal, a,
+ array([[-1, 2, 3, 4],
+ [-1, -1, 7, 8],
+ [-1, -1, -1, 12],
+ [-1, -1, -1, -1]]) )
+
+ # These cover almost the whole array (two diagonals right of the main one):
+ a[il2] = -10
+ yield (assert_array_equal, a,
+ array([[-10, -10, -10, 4],
+ [-10, -10, -10, -10],
+ [-10, -10, -10, -10],
+ [-10, -10, -10, -10]]) )
+
+
+def test_triu_indices():
+ iu1 = triu_indices(4)
+ iu2 = triu_indices(4, 2)
+
+ a = np.array([[1, 2, 3, 4],
+ [5, 6, 7, 8],
+ [9, 10, 11, 12],
+ [13, 14, 15, 16]])
+
+ # Both for indexing:
+ yield (assert_array_equal, a[iu1],
+ array([1, 2, 3, 4, 6, 7, 8, 11, 12, 16]))
+
+ # And for assigning values:
+ a[iu1] = -1
+ yield (assert_array_equal, a,
+ array([[-1, -1, -1, -1],
+ [ 5, -1, -1, -1],
+ [ 9, 10, -1, -1],
+ [13, 14, 15, -1]]) )
+
+ # These cover almost the whole array (two diagonals right of the main one):
+ a[iu2] = -10
+ yield ( assert_array_equal, a,
+ array([[ -1, -1, -10, -10],
+ [ 5, -1, -1, -10],
+ [ 9, 10, -1, -1],
+ [ 13, 14, 15, -1]]) )
+
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py
index f0abf3122..e794d4144 100644
--- a/numpy/lib/twodim_base.py
+++ b/numpy/lib/twodim_base.py
@@ -3,10 +3,13 @@
"""
__all__ = ['diag','diagflat','eye','fliplr','flipud','rot90','tri','triu',
- 'tril','vander','histogram2d']
+ 'tril','vander','histogram2d','mask_indices',
+ 'tril_indices','tril_indices_from','triu_indices','triu_indices_from',
+ ]
from numpy.core.numeric import asanyarray, equal, subtract, arange, \
- zeros, greater_equal, multiply, ones, asarray
+ zeros, greater_equal, multiply, ones, asarray, alltrue, where, \
+ empty
def fliplr(m):
"""
@@ -195,10 +198,16 @@ def eye(N, M=None, k=0, dtype=float):
[ 0., 0., 0.]])
"""
- if M is None: M = N
- m = equal(subtract.outer(arange(N), arange(M)),-k)
- if m.dtype != dtype:
- m = m.astype(dtype)
+ if M is None:
+ M = N
+ m = zeros((N, M), dtype=dtype)
+ if k >= M:
+ return m
+ if k >= 0:
+ i = k
+ else:
+ i = (-k) * M
+ m[:M-k].flat[i::M+1] = 1
return m
def diag(v, k=0):
@@ -244,28 +253,26 @@ def diag(v, k=0):
"""
v = asarray(v)
s = v.shape
- if len(s)==1:
+ if len(s) == 1:
n = s[0]+abs(k)
res = zeros((n,n), v.dtype)
- if (k>=0):
- i = arange(0,n-k)
- fi = i+k+i*n
+ if k >= 0:
+ i = k
else:
- i = arange(0,n+k)
- fi = i+(i-k)*n
- res.flat[fi] = v
+ i = (-k) * n
+ res[:n-k].flat[i::n+1] = v
return res
- elif len(s)==2:
- N1,N2 = s
+ elif len(s) == 2:
+ if k >= s[1]:
+ return empty(0, dtype=v.dtype)
+ if v.flags.f_contiguous:
+ # faster slicing
+ v, k, s = v.T, -k, s[::-1]
if k >= 0:
- M = min(N1,N2-k)
- i = arange(0,M)
- fi = i+k+i*N2
+ i = k
else:
- M = min(N1+k,N2)
- i = arange(0,M)
- fi = i + (i-k)*N2
- return v.flat[fi]
+ i = (-k) * s[1]
+ return v[:s[1]-k].flat[i::s[1]+1]
else:
raise ValueError, "Input must be 1- or 2-d."
@@ -559,3 +566,233 @@ def histogram2d(x,y, bins=10, range=None, normed=False, weights=None):
bins = [xedges, yedges]
hist, edges = histogramdd([x,y], bins, range, normed, weights)
return hist, edges[0], edges[1]
+
+
+def mask_indices(n,mask_func,k=0):
+ """Return the indices to access (n,n) arrays, given a masking function.
+
+ Assume mask_func() is a function that, for a square array a of size (n,n)
+ with a possible offset argument k, when called as mask_func(a,k) returns a
+ new array with zeros in certain locations (functions like triu() or tril()
+ do precisely this). Then this function returns the indices where the
+ non-zero values would be located.
+
+ Parameters
+ ----------
+ n : int
+ The returned indices will be valid to access arrays of shape (n,n).
+
+ mask_func : callable
+ A function whose api is similar to that of numpy.tri{u,l}. That is,
+ mask_func(x,k) returns a boolean array, shaped like x. k is an optional
+ argument to the function.
+
+ k : scalar
+ An optional argument which is passed through to mask_func(). Functions
+ like tri{u,l} take a second argument that is interpreted as an offset.
+
+ Returns
+ -------
+ indices : an n-tuple of index arrays.
+ The indices corresponding to the locations where mask_func(ones((n,n)),k)
+ is True.
+
+ Notes
+ -----
+ .. versionadded:: 1.4.0
+
+ Examples
+ --------
+ These are the indices that would allow you to access the upper triangular
+ part of any 3x3 array:
+ >>> iu = mask_indices(3,np.triu)
+
+ For example, if `a` is a 3x3 array:
+ >>> a = np.arange(9).reshape(3,3)
+ >>> a
+ array([[0, 1, 2],
+ [3, 4, 5],
+ [6, 7, 8]])
+
+ Then:
+ >>> a[iu]
+ array([0, 1, 2, 4, 5, 8])
+
+ An offset can be passed also to the masking function. This gets us the
+ indices starting on the first diagonal right of the main one:
+ >>> iu1 = mask_indices(3,np.triu,1)
+
+ with which we now extract only three elements:
+ >>> a[iu1]
+ array([1, 2, 5])
+ """
+ m = ones((n,n),int)
+ a = mask_func(m,k)
+ return where(a != 0)
+
+
+def tril_indices(n,k=0):
+ """Return the indices for the lower-triangle of an (n,n) array.
+
+ Parameters
+ ----------
+ n : int
+ Sets the size of the arrays for which the returned indices will be valid.
+
+ k : int, optional
+ Diagonal offset (see tril() for details).
+
+ Notes
+ -----
+ .. versionadded:: 1.4.0
+
+ Examples
+ --------
+ Commpute two different sets of indices to access 4x4 arrays, one for the
+ lower triangular part starting at the main diagonal, and one starting two
+ diagonals further right:
+
+ >>> il1 = tril_indices(4)
+ >>> il2 = tril_indices(4,2)
+
+ Here is how they can be used with a sample array:
+ >>> a = np.array([[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]])
+ >>> a
+ array([[ 1, 2, 3, 4],
+ [ 5, 6, 7, 8],
+ [ 9, 10, 11, 12],
+ [13, 14, 15, 16]])
+
+ Both for indexing:
+ >>> a[il1]
+ array([ 1, 5, 6, 9, 10, 11, 13, 14, 15, 16])
+
+ And for assigning values:
+ >>> a[il1] = -1
+ >>> a
+ array([[-1, 2, 3, 4],
+ [-1, -1, 7, 8],
+ [-1, -1, -1, 12],
+ [-1, -1, -1, -1]])
+
+ These cover almost the whole array (two diagonals right of the main one):
+ >>> a[il2] = -10
+ >>> a
+ array([[-10, -10, -10, 4],
+ [-10, -10, -10, -10],
+ [-10, -10, -10, -10],
+ [-10, -10, -10, -10]])
+
+ See also
+ --------
+ - triu_indices : similar function, for upper-triangular.
+ - mask_indices : generic function accepting an arbitrary mask function.
+ """
+ return mask_indices(n,tril,k)
+
+
+def tril_indices_from(arr,k=0):
+ """Return the indices for the lower-triangle of an (n,n) array.
+
+ See tril_indices() for full details.
+
+ Parameters
+ ----------
+ n : int
+ Sets the size of the arrays for which the returned indices will be valid.
+
+ k : int, optional
+ Diagonal offset (see tril() for details).
+
+ Notes
+ -----
+ .. versionadded:: 1.4.0
+
+ """
+ if not arr.ndim==2 and arr.shape[0] == arr.shape[1]:
+ raise ValueError("input array must be 2-d and square")
+ return tril_indices(arr.shape[0],k)
+
+
+def triu_indices(n,k=0):
+ """Return the indices for the upper-triangle of an (n,n) array.
+
+ Parameters
+ ----------
+ n : int
+ Sets the size of the arrays for which the returned indices will be valid.
+
+ k : int, optional
+ Diagonal offset (see triu() for details).
+
+ Notes
+ -----
+ .. versionadded:: 1.4.0
+
+ Examples
+ --------
+ Commpute two different sets of indices to access 4x4 arrays, one for the
+ lower triangular part starting at the main diagonal, and one starting two
+ diagonals further right:
+
+ >>> iu1 = triu_indices(4)
+ >>> iu2 = triu_indices(4,2)
+
+ Here is how they can be used with a sample array:
+ >>> a = np.array([[1,2,3,4],[5,6,7,8],[9,10,11,12],[13,14,15,16]])
+ >>> a
+ array([[ 1, 2, 3, 4],
+ [ 5, 6, 7, 8],
+ [ 9, 10, 11, 12],
+ [13, 14, 15, 16]])
+
+ Both for indexing:
+ >>> a[il1]
+ array([ 1, 5, 6, 9, 10, 11, 13, 14, 15, 16])
+
+ And for assigning values:
+ >>> a[iu] = -1
+ >>> a
+ array([[-1, -1, -1, -1],
+ [ 5, -1, -1, -1],
+ [ 9, 10, -1, -1],
+ [13, 14, 15, -1]])
+
+ These cover almost the whole array (two diagonals right of the main one):
+ >>> a[iu2] = -10
+ >>> a
+ array([[ -1, -1, -10, -10],
+ [ 5, -1, -1, -10],
+ [ 9, 10, -1, -1],
+ [ 13, 14, 15, -1]])
+
+ See also
+ --------
+ - tril_indices : similar function, for lower-triangular.
+ - mask_indices : generic function accepting an arbitrary mask function.
+ """
+ return mask_indices(n,triu,k)
+
+
+def triu_indices_from(arr,k=0):
+ """Return the indices for the lower-triangle of an (n,n) array.
+
+ See triu_indices() for full details.
+
+ Parameters
+ ----------
+ n : int
+ Sets the size of the arrays for which the returned indices will be valid.
+
+ k : int, optional
+ Diagonal offset (see triu() for details).
+
+ Notes
+ -----
+ .. versionadded:: 1.4.0
+
+ """
+ if not arr.ndim==2 and arr.shape[0] == arr.shape[1]:
+ raise ValueError("input array must be 2-d and square")
+ return triu_indices(arr.shape[0],k)
+
diff --git a/numpy/lib/type_check.py b/numpy/lib/type_check.py
index 113cec682..69f4f2193 100644
--- a/numpy/lib/type_check.py
+++ b/numpy/lib/type_check.py
@@ -85,8 +85,8 @@ def real(val):
Returns
-------
out : ndarray
- If `val` is real, the type of `val` is used for the output. If `val`
- has complex elements, the returned type is float.
+ Output array. If `val` is real, the type of `val` is used for the
+ output. If `val` has complex elements, the returned type is float.
See Also
--------
@@ -94,13 +94,13 @@ def real(val):
Examples
--------
- >>> a = np.array([1+2j,3+4j,5+6j])
+ >>> a = np.array([1+2j, 3+4j, 5+6j])
>>> a.real
array([ 1., 3., 5.])
>>> a.real = 9
>>> a
array([ 9.+2.j, 9.+4.j, 9.+6.j])
- >>> a.real = np.array([9,8,7])
+ >>> a.real = np.array([9, 8, 7])
>>> a
array([ 9.+2.j, 8.+4.j, 7.+6.j])
@@ -109,7 +109,7 @@ def real(val):
def imag(val):
"""
- Return the imaginary part of array.
+ Return the imaginary part of the elements of the array.
Parameters
----------
@@ -118,8 +118,22 @@ def imag(val):
Returns
-------
- out : ndarray, real or int
- Real part of each element, same shape as `val`.
+ out : ndarray
+ Output array. If `val` is real, the type of `val` is used for the
+ output. If `val` has complex elements, the returned type is float.
+
+ See Also
+ --------
+ real, angle, real_if_close
+
+ Examples
+ --------
+ >>> a = np.array([1+2j, 3+4j, 5+6j])
+ >>> a.imag
+ array([ 2., 4., 6.])
+ >>> a.imag = np.array([8, 10, 12])
+ >>> a
+ array([ 1. +8.j, 3.+10.j, 5.+12.j])
"""
return asanyarray(val).imag
diff --git a/numpy/lib/ufunclike.py b/numpy/lib/ufunclike.py
index 5dbc3f225..5e89b0930 100644
--- a/numpy/lib/ufunclike.py
+++ b/numpy/lib/ufunclike.py
@@ -176,7 +176,7 @@ def isneginf(x, y=None):
_log2 = nx.log(2)
def log2(x, y=None):
"""
- Return the base 2 logarithm.
+ Return the base 2 logarithm of the input array, element-wise.
Parameters
----------
@@ -188,7 +188,7 @@ def log2(x, y=None):
Returns
-------
y : ndarray
- The logarithm to the base 2 of `x` elementwise.
+ The logarithm to the base 2 of `x` element-wise.
NaNs are returned where `x` is negative.
See Also
@@ -197,7 +197,7 @@ def log2(x, y=None):
Examples
--------
- >>> np.log2([-1,2,4])
+ >>> np.log2([-1, 2, 4])
array([ NaN, 1., 2.])
"""
diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py
index 3de0579df..908c4995d 100644
--- a/numpy/lib/utils.py
+++ b/numpy/lib/utils.py
@@ -81,12 +81,34 @@ else:
return func
def deprecate(func, oldname=None, newname=None):
- """Deprecate old functions.
+ """
+ Deprecate old functions.
+
Issues a DeprecationWarning, adds warning to oldname's docstring,
rebinds oldname.__name__ and returns new function object.
- Example:
- oldfunc = deprecate(newfunc, 'oldfunc', 'newfunc')
+ Parameters
+ ----------
+ func : function
+
+ oldname : string
+
+ newname : string
+
+ Returns
+ -------
+ old_func : function
+
+ Examples
+ --------
+ Note that olduint returns a value after printing Deprecation Warning.
+
+ >>> olduint = np.deprecate(np.uint)
+ >>> olduint(6)
+ /usr/lib/python2.5/site-packages/numpy/lib/utils.py:114:
+ DeprecationWarning: uint32 is deprecated
+ warnings.warn(str1, DeprecationWarning)
+ 6
"""
@@ -186,13 +208,28 @@ def byte_bounds(a):
def may_share_memory(a, b):
- """Determine if two arrays can share memory
+ """
+ Determine if two arrays can share memory
The memory-bounds of a and b are computed. If they overlap then
this function returns True. Otherwise, it returns False.
A return of True does not necessarily mean that the two arrays
share any element. It just means that they *might*.
+
+ Parameters
+ ----------
+ a, b : ndarray
+
+ Returns
+ -------
+ out : bool
+
+ Examples
+ --------
+ >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
+ False
+
"""
a_low, a_high = byte_bounds(a)
b_low, b_high = byte_bounds(b)
@@ -349,24 +386,46 @@ def info(object=None,maxwidth=76,output=sys.stdout,toplevel='numpy'):
Parameters
----------
- object : optional
- Input object to get information about.
+ object : object or str, optional
+ Input object or name to get information about. If `object` is a
+ numpy object, its docstring is given. If it is a string, available
+ modules are searched for matching objects.
+ If None, information about `info` itself is returned.
maxwidth : int, optional
Printing width.
- output : file like object open for writing, optional
- Write into file like object.
- toplevel : string, optional
+ output : file like object, optional
+ File like object that the output is written to, default is ``stdout``.
+ The object has to be opened in 'w' or 'a' mode.
+ toplevel : str, optional
Start search at this level.
+ See Also
+ --------
+ source, lookfor
+
+ Notes
+ -----
+ When used interactively with an object, ``np.info(obj)`` is equivalent to
+ ``help(obj)`` on the Python prompt or ``obj?`` on the IPython prompt.
+
Examples
--------
>>> np.info(np.polyval) # doctest: +SKIP
-
polyval(p, x)
+ Evaluate the polynomial p at x.
+ ...
- Evaluate the polymnomial p at x.
+ When using a string for `object` it is possible to get multiple results.
- ...
+ >>> np.info('fft') # doctest: +SKIP
+ *** Found in numpy ***
+ Core FFT routines
+ ...
+ *** Found in numpy.fft ***
+ fft(a, n=None, axis=-1)
+ ...
+ *** Repeat reference found in numpy.fft.fftpack ***
+ *** Total of 3 references found. ***
"""
global _namedict, _dictlist
@@ -512,15 +571,39 @@ def source(object, output=sys.stdout):
"""
Print or write to a file the source code for a Numpy object.
+ The source code is only returned for objects written in Python. Many
+ functions and classes are defined in C and will therefore not return
+ useful information.
+
Parameters
----------
object : numpy object
- Input object.
+ Input object. This can be any object (function, class, module, ...).
output : file object, optional
If `output` not supplied then source code is printed to screen
(sys.stdout). File object must be created with either write 'w' or
append 'a' modes.
+ See Also
+ --------
+ lookfor, info
+
+ Examples
+ --------
+ >>> np.source(np.interp)
+ In file: /usr/lib/python2.6/dist-packages/numpy/lib/function_base.py
+ def interp(x, xp, fp, left=None, right=None):
+ \"\"\".... (full docstring printed)\"\"\"
+ if isinstance(x, (float, int, number)):
+ return compiled_interp([x], xp, fp, left, right).item()
+ else:
+ return compiled_interp(x, xp, fp, left, right)
+
+ The source code is only returned for objects written in Python.
+
+ >>> np.source(np.array)
+ Not available for this object.
+
"""
# Local import to speed up numpy's import time.
import inspect
@@ -544,28 +627,41 @@ def lookfor(what, module=None, import_modules=True, regenerate=False):
Do a keyword search on docstrings.
A list of of objects that matched the search is displayed,
- sorted by relevance.
+ sorted by relevance. All given keywords need to be found in the
+ docstring for it to be returned as a result, but the order does
+ not matter.
Parameters
----------
what : str
String containing words to look for.
- module : str, module
- Module whose docstrings to go through.
- import_modules : bool
+ module : str, optional
+ Name of module whose docstrings to go through.
+ import_modules : bool, optional
Whether to import sub-modules in packages.
- Will import only modules in ``__all__``.
- regenerate : bool
- Whether to re-generate the docstring cache.
+ Will import only modules in ``__all__``. Default is True.
+ regenerate : bool, optional
+ Whether to re-generate the docstring cache. Default is False.
- Examples
+ See Also
--------
+ source, info
+
+ Notes
+ -----
+ Relevance is determined only roughly, by checking if the keywords occur
+ in the function name, at the start of a docstring, etc.
+ Examples
+ --------
>>> np.lookfor('binary representation')
Search results for 'binary representation'
------------------------------------------
numpy.binary_repr
Return the binary representation of the input number as a string.
+ numpy.base_repr
+ Return a string representation of a number in the given base system.
+ ...
"""
import pydoc
diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py
index dcf7fde26..5878b909f 100644
--- a/numpy/linalg/linalg.py
+++ b/numpy/linalg/linalg.py
@@ -30,7 +30,7 @@ class LinAlgError(Exception):
def _makearray(a):
new = asarray(a)
- wrap = getattr(a, "__array_wrap__", new.__array_wrap__)
+ wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
@@ -1237,7 +1237,7 @@ def lstsq(a, b, rcond=-1):
Notes
-----
- If `b` is a matrix, then all array results returned as
+ If `b` is a matrix, then all array results are returned as
matrices.
Examples
diff --git a/numpy/ma/core.py b/numpy/ma/core.py
index 5cf11ffb9..3071ff5fe 100644
--- a/numpy/ma/core.py
+++ b/numpy/ma/core.py
@@ -210,7 +210,49 @@ def minimum_fill_value(obj):
def maximum_fill_value(obj):
"""
- Calculate the default fill value suitable for taking the maximum of ``obj``.
+ Return the minimum value that can be represented by the dtype of an object.
+
+ This function is useful for calculating a fill value suitable for
+ taking the maximum of an array with a given dtype.
+
+ Parameters
+ ----------
+ obj : {ndarray, dtype}
+ An object that can be queried for it's numeric type.
+
+ Returns
+ -------
+ val : scalar
+ The minimum representable value.
+
+ Raises
+ ------
+ TypeError
+ If `obj` isn't a suitable numeric type.
+
+ See Also
+ --------
+ set_fill_value : Set the filling value of a masked array.
+ MaskedArray.fill_value : Return current fill value.
+
+ Examples
+ --------
+ >>> import numpy.ma as ma
+ >>> a = np.int8()
+ >>> ma.maximum_fill_value(a)
+ -128
+ >>> a = np.int32()
+ >>> ma.maximum_fill_value(a)
+ -2147483648
+
+ An array of numeric data can also be passed.
+
+ >>> a = np.array([1, 2, 3], dtype=np.int8)
+ >>> ma.maximum_fill_value(a)
+ -128
+ >>> a = np.array([1, 2, 3], dtype=np.float32)
+ >>> ma.maximum_fill_value(a)
+ -inf
"""
errmsg = "Unsuitable type for calculating maximum."
@@ -452,7 +494,7 @@ def getdata(a, subok=True):
Input ``MaskedArray``, alternatively a ndarray or a subclass thereof.
subok : bool
Whether to force the output to be a `pure` ndarray (False) or to
- return a subclass of ndarray if approriate (True, default).
+ return a subclass of ndarray if appropriate (True, default).
See Also
--------
@@ -2471,7 +2513,10 @@ class MaskedArray(ndarray):
self._mask = _mask
# Finalize the mask ...........
if self._mask is not nomask:
- self._mask.shape = self.shape
+ try:
+ self._mask.shape = self.shape
+ except ValueError:
+ self._mask = nomask
return
#..................................
def __array_wrap__(self, obj, context=None):
@@ -3126,6 +3171,8 @@ class MaskedArray(ndarray):
#............................................
def __eq__(self, other):
"Check whether other equals self elementwise"
+ if self is masked:
+ return masked
omask = getattr(other, '_mask', nomask)
if omask is nomask:
check = ndarray.__eq__(self.filled(0), other).view(type(self))
@@ -3152,6 +3199,8 @@ class MaskedArray(ndarray):
#
def __ne__(self, other):
"Check whether other doesn't equal self elementwise"
+ if self is masked:
+ return masked
omask = getattr(other, '_mask', nomask)
if omask is nomask:
check = ndarray.__ne__(self.filled(0), other).view(type(self))
@@ -3723,52 +3772,49 @@ class MaskedArray(ndarray):
def cumsum(self, axis=None, dtype=None, out=None):
"""
- Return the cumulative sum of the elements along the given axis.
- The cumulative sum is calculated over the flattened array by
- default, otherwise over the specified axis.
+ Return the cumulative sum of the elements along the given axis.
+ The cumulative sum is calculated over the flattened array by
+ default, otherwise over the specified axis.
- Masked values are set to 0 internally during the computation.
- However, their position is saved, and the result will be masked at
- the same locations.
+ Masked values are set to 0 internally during the computation.
+ However, their position is saved, and the result will be masked at
+ the same locations.
- Parameters
- ----------
- axis : {None, -1, int}, optional
- Axis along which the sum is computed. The default (`axis` = None) is to
- compute over the flattened array. `axis` may be negative, in which case
- it counts from the last to the first axis.
- dtype : {None, dtype}, optional
- Type of the returned array and of the accumulator in which the
- elements are summed. If `dtype` is not specified, it defaults
- to the dtype of `a`, unless `a` has an integer dtype with a
- precision less than that of the default platform integer. In
- that case, the default platform integer is used.
- out : ndarray, optional
- Alternative output array in which to place the result. It must
- have the same shape and buffer length as the expected output
- but the type will be cast if necessary.
-
- Warnings
- --------
- The mask is lost if out is not a valid :class:`MaskedArray` !
+ Parameters
+ ----------
+ axis : {None, -1, int}, optional
+ Axis along which the sum is computed. The default (`axis` = None) is to
+ compute over the flattened array. `axis` may be negative, in which case
+ it counts from the last to the first axis.
+ dtype : {None, dtype}, optional
+ Type of the returned array and of the accumulator in which the
+ elements are summed. If `dtype` is not specified, it defaults
+ to the dtype of `a`, unless `a` has an integer dtype with a
+ precision less than that of the default platform integer. In
+ that case, the default platform integer is used.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must
+ have the same shape and buffer length as the expected output
+ but the type will be cast if necessary.
- Returns
- -------
- cumsum : ndarray.
- A new array holding the result is returned unless ``out`` is
- specified, in which case a reference to ``out`` is returned.
+ Returns
+ -------
+ cumsum : ndarray.
+ A new array holding the result is returned unless ``out`` is
+ specified, in which case a reference to ``out`` is returned.
- Examples
- --------
- >>> marr = np.ma.array(np.arange(10), mask=[0,0,0,1,1,1,0,0,0,0])
- >>> print marr.cumsum()
- [0 1 3 -- -- -- 9 16 24 33]
+ Notes
+ -----
+ The mask is lost if `out` is not a valid :class:`MaskedArray` !
+ Arithmetic is modular when using integer types, and no error is
+ raised on overflow.
- Notes
- -----
- Arithmetic is modular when using integer types, and no error is
- raised on overflow.
+ Examples
+ --------
+ >>> marr = np.ma.array(np.arange(10), mask=[0,0,0,1,1,1,0,0,0,0])
+ >>> print marr.cumsum()
+ [0 1 3 -- -- -- 9 16 24 33]
"""
result = self.filled(0).cumsum(axis=axis, dtype=dtype, out=out)
@@ -3853,46 +3899,44 @@ class MaskedArray(ndarray):
def cumprod(self, axis=None, dtype=None, out=None):
"""
- Return the cumulative product of the elements along the given axis.
- The cumulative product is taken over the flattened array by
- default, otherwise over the specified axis.
+ Return the cumulative product of the elements along the given axis.
+ The cumulative product is taken over the flattened array by
+ default, otherwise over the specified axis.
- Masked values are set to 1 internally during the computation.
- However, their position is saved, and the result will be masked at
- the same locations.
+ Masked values are set to 1 internally during the computation.
+ However, their position is saved, and the result will be masked at
+ the same locations.
- Parameters
- ----------
- axis : {None, -1, int}, optional
- Axis along which the product is computed. The default
- (`axis` = None) is to compute over the flattened array.
- dtype : {None, dtype}, optional
- Determines the type of the returned array and of the accumulator
- where the elements are multiplied. If ``dtype`` has the value ``None`` and
- the type of ``a`` is an integer type of precision less than the default
- platform integer, then the default platform integer precision is
- used. Otherwise, the dtype is the same as that of ``a``.
- out : ndarray, optional
- Alternative output array in which to place the result. It must
- have the same shape and buffer length as the expected output
- but the type will be cast if necessary.
-
- Warnings
- --------
- The mask is lost if out is not a valid MaskedArray !
+ Parameters
+ ----------
+ axis : {None, -1, int}, optional
+ Axis along which the product is computed. The default
+ (`axis` = None) is to compute over the flattened array.
+ dtype : {None, dtype}, optional
+ Determines the type of the returned array and of the accumulator
+ where the elements are multiplied. If ``dtype`` has the value ``None``
+ and the type of ``a`` is an integer type of precision less than the
+ default platform integer, then the default platform integer precision
+ is used. Otherwise, the dtype is the same as that of ``a``.
+ out : ndarray, optional
+ Alternative output array in which to place the result. It must
+ have the same shape and buffer length as the expected output
+ but the type will be cast if necessary.
- Returns
- -------
- cumprod : ndarray
- A new array holding the result is returned unless out is specified,
- in which case a reference to out is returned.
+ Returns
+ -------
+ cumprod : ndarray
+ A new array holding the result is returned unless out is specified,
+ in which case a reference to out is returned.
- Notes
- -----
- Arithmetic is modular when using integer types, and no error is
- raised on overflow.
+ Notes
+ -----
+ The mask is lost if `out` is not a valid MaskedArray !
- """
+ Arithmetic is modular when using integer types, and no error is
+ raised on overflow.
+
+ """
result = self.filled(1).cumprod(axis=axis, dtype=dtype, out=out)
if out is not None:
if isinstance(out, MaskedArray):
@@ -4545,12 +4589,13 @@ class MaskedArray(ndarray):
purposes.
"""
+ cf = 'CF'[self.flags.fnc]
state = (1,
self.shape,
self.dtype,
self.flags.fnc,
- self._data.tostring(),
- getmaskarray(self).tostring(),
+ self._data.tostring(cf),
+ getmaskarray(self).tostring(cf),
self._fill_value,
)
return state
@@ -4864,19 +4909,18 @@ class _frommethod:
return doc
#
def __call__(self, a, *args, **params):
- if isinstance(a, MaskedArray):
- return getattr(a, self.__name__).__call__(*args, **params)
- #FIXME ----
- #As x is not a MaskedArray, we transform it to a ndarray with asarray
- #... and call the corresponding method.
- #Except that sometimes it doesn't work (try reshape([1,2,3,4],(2,2)))
- #we end up with a "SystemError: NULL result without error in PyObject_Call"
- #A dirty trick is then to call the initial numpy function...
- method = getattr(narray(a, copy=False), self.__name__)
- try:
+ # Get the method from the array (if possible)
+ method_name = self.__name__
+ method = getattr(a, method_name, None)
+ if method is not None:
return method(*args, **params)
- except SystemError:
- return getattr(np,self.__name__).__call__(a, *args, **params)
+ # Still here ? Then a is not a MaskedArray
+ method = getattr(MaskedArray, method_name, None)
+ if method is not None:
+ return method(MaskedArray(a), *args, **params)
+ # Still here ? OK, let's call the corresponding np function
+ method = getattr(np, method_name)
+ return method(a, *args, **params)
all = _frommethod('all')
anomalies = anom = _frommethod('anom')
diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py
index 1aa43a222..9c6b7d66c 100644
--- a/numpy/ma/extras.py
+++ b/numpy/ma/extras.py
@@ -13,24 +13,25 @@ __date__ = '$Date: 2007-10-29 17:18:13 +0200 (Mon, 29 Oct 2007) $'
__all__ = ['apply_along_axis', 'atleast_1d', 'atleast_2d', 'atleast_3d',
'average',
- 'column_stack','compress_cols','compress_rowcols', 'compress_rows',
- 'count_masked', 'corrcoef', 'cov',
+ 'clump_masked', 'clump_unmasked', 'column_stack', 'compress_cols',
+ 'compress_rowcols', 'compress_rows', 'count_masked', 'corrcoef',
+ 'cov',
'diagflat', 'dot','dstack',
'ediff1d',
'flatnotmasked_contiguous', 'flatnotmasked_edges',
'hsplit', 'hstack',
- 'intersect1d', 'intersect1d_nu',
+ 'in1d', 'intersect1d', 'intersect1d_nu',
'mask_cols', 'mask_rowcols', 'mask_rows', 'masked_all',
'masked_all_like', 'median', 'mr_',
'notmasked_contiguous', 'notmasked_edges',
'polyfit',
'row_stack',
'setdiff1d', 'setmember1d', 'setxor1d',
- 'unique1d', 'union1d',
+ 'unique', 'unique1d', 'union1d',
'vander', 'vstack',
]
-from itertools import groupby
+import itertools
import warnings
import core as ma
@@ -45,6 +46,8 @@ import numpy.core.umath as umath
from numpy.lib.index_tricks import AxisConcatenator
from numpy.linalg import lstsq
+from numpy.lib.utils import deprecate_with_doc
+
#...............................................................................
def issequence(seq):
"""Is seq a sequence (ndarray, list or tuple)?"""
@@ -56,11 +59,48 @@ def count_masked(arr, axis=None):
"""
Count the number of masked elements along the given axis.
+
Parameters
----------
+ arr : array_like
+ An array with (possibly) masked elements.
axis : int, optional
- Axis along which to count.
- If None (default), a flattened version of the array is used.
+ Axis along which to count. If None (default), a flattened
+ version of the array is used.
+
+ Returns
+ -------
+ count : int, ndarray
+ The total number of masked elements (axis=None) or the number
+ of masked elements along each slice of the given axis.
+
+ Examples
+ --------
+ >>> import numpy.ma as ma
+ >>> a = np.arange(9).reshape((3,3))
+ >>> a = ma.array(a)
+ >>> a[1, 0] = ma.masked
+ >>> a[1, 2] = ma.masked
+ >>> a[2, 1] = ma.masked
+ >>> a
+ masked_array(data =
+ [[0 1 2]
+ [-- 4 --]
+ [6 -- 8]],
+ mask =
+ [[False False False]
+ [ True False True]
+ [False True False]],
+ fill_value=999999)
+ >>> ma.count_masked(a)
+ 3
+
+ When the `axis` keyword is used an array is returned.
+
+ >>> ma.count_masked(a, axis=0)
+ array([1, 1, 1])
+ >>> ma.count_masked(a, axis=1)
+ array([0, 2, 1])
"""
m = getmaskarray(arr)
@@ -373,7 +413,7 @@ def average(a, axis=None, weights=None, returned=False):
else:
if weights is None:
n = a.filled(0).sum(axis=None)
- d = umath.add.reduce((-mask).ravel().astype(int))
+ d = float(umath.add.reduce((~mask).ravel()))
else:
w = array(filled(weights, 0.0), float, mask=mask).ravel()
n = add.reduce(a.ravel() * w)
@@ -830,7 +870,7 @@ def ediff1d(arr, to_end=None, to_begin=None):
return ed
-def unique1d(ar1, return_index=False, return_inverse=False):
+def unique(ar1, return_index=False, return_inverse=False):
"""
Finds the unique elements of an array.
@@ -840,11 +880,11 @@ def unique1d(ar1, return_index=False, return_inverse=False):
See Also
--------
- np.unique1d : equivalent function for ndarrays.
+ np.unique : equivalent function for ndarrays.
"""
- output = np.unique1d(ar1,
- return_index=return_index,
- return_inverse=return_inverse)
+ output = np.unique(ar1,
+ return_index=return_index,
+ return_inverse=return_inverse)
if isinstance(output, tuple):
output = list(output)
output[0] = output[0].view(MaskedArray)
@@ -854,33 +894,7 @@ def unique1d(ar1, return_index=False, return_inverse=False):
return output
-def intersect1d(ar1, ar2):
- """
- Returns the repeated or unique elements belonging to the two arrays.
-
- Masked values are assumed equals one to the other.
- The output is always a masked array
-
- See Also
- --------
- numpy.intersect1d : equivalent function for ndarrays.
-
- Examples
- --------
- >>> x = array([1, 3, 3, 3], mask=[0, 0, 0, 1])
- >>> y = array([3, 1, 1, 1], mask=[0, 0, 0, 1])
- >>> intersect1d(x, y)
- masked_array(data = [1 1 3 3 --],
- mask = [False False False False True],
- fill_value = 999999)
- """
- aux = ma.concatenate((ar1,ar2))
- aux.sort()
- return aux[aux[1:] == aux[:-1]]
-
-
-
-def intersect1d_nu(ar1, ar2):
+def intersect1d(ar1, ar2, assume_unique=False):
"""
Returns the unique elements common to both arrays.
@@ -889,27 +903,28 @@ def intersect1d_nu(ar1, ar2):
See Also
--------
- intersect1d : Returns repeated or unique common elements.
- numpy.intersect1d_nu : equivalent function for ndarrays.
+ numpy.intersect1d : equivalent function for ndarrays.
Examples
--------
>>> x = array([1, 3, 3, 3], mask=[0, 0, 0, 1])
>>> y = array([3, 1, 1, 1], mask=[0, 0, 0, 1])
- >>> intersect1d_nu(x, y)
+ >>> intersect1d(x, y)
masked_array(data = [1 3 --],
mask = [False False True],
fill_value = 999999)
"""
- # Might be faster than unique1d( intersect1d( ar1, ar2 ) )?
- aux = ma.concatenate((unique1d(ar1), unique1d(ar2)))
+ if assume_unique:
+ aux = ma.concatenate((ar1, ar2))
+ else:
+ # Might be faster than unique1d( intersect1d( ar1, ar2 ) )?
+ aux = ma.concatenate((unique(ar1), unique(ar2)))
aux.sort()
return aux[aux[1:] == aux[:-1]]
-
-def setxor1d(ar1, ar2):
+def setxor1d(ar1, ar2, assume_unique=False):
"""
Set exclusive-or of 1D arrays with unique elements.
@@ -918,6 +933,10 @@ def setxor1d(ar1, ar2):
numpy.setxor1d : equivalent function for ndarrays
"""
+ if not assume_unique:
+ ar1 = unique(ar1)
+ ar2 = unique(ar2)
+
aux = ma.concatenate((ar1, ar2))
if aux.size == 0:
return aux
@@ -929,54 +948,52 @@ def setxor1d(ar1, ar2):
flag2 = (flag[1:] == flag[:-1])
return aux[flag2]
-
-def setmember1d(ar1, ar2):
+def in1d(ar1, ar2, assume_unique=False):
"""
- Return a boolean array set True where first element is in second array.
+ Test whether each element of an array is also present in a second
+ array.
See Also
--------
- numpy.setmember1d : equivalent function for ndarrays.
+ numpy.in1d : equivalent function for ndarrays
- """
- ar1 = ma.asanyarray(ar1)
- ar2 = ma.asanyarray( ar2 )
- ar = ma.concatenate((ar1, ar2 ))
- b1 = ma.zeros(ar1.shape, dtype = np.int8)
- b2 = ma.ones(ar2.shape, dtype = np.int8)
- tt = ma.concatenate((b1, b2))
-
- # We need this to be a stable sort, so always use 'mergesort' here. The
- # values from the first array should always come before the values from the
- # second array.
- perm = ar.argsort(kind='mergesort')
- aux = ar[perm]
- aux2 = tt[perm]
-# flag = ediff1d( aux, 1 ) == 0
- flag = ma.concatenate((aux[1:] == aux[:-1], [False]))
- ii = ma.where( flag * aux2 )[0]
- aux = perm[ii+1]
- perm[ii+1] = perm[ii]
- perm[ii] = aux
- #
- indx = perm.argsort(kind='mergesort')[:len( ar1 )]
- #
- return flag[indx]
+ Notes
+ -----
+ .. versionadded:: 1.4.0
+ """
+ if not assume_unique:
+ ar1, rev_idx = unique(ar1, return_inverse=True)
+ ar2 = unique(ar2)
+
+ ar = ma.concatenate( (ar1, ar2) )
+ # We need this to be a stable sort, so always use 'mergesort'
+ # here. The values from the first array should always come before
+ # the values from the second array.
+ order = ar.argsort(kind='mergesort')
+ sar = ar[order]
+ equal_adj = (sar[1:] == sar[:-1])
+ flag = ma.concatenate( (equal_adj, [False] ) )
+ indx = order.argsort(kind='mergesort')[:len( ar1 )]
+
+ if assume_unique:
+ return flag[indx]
+ else:
+ return flag[indx][rev_idx]
def union1d(ar1, ar2):
"""
- Union of 1D arrays with unique elements.
+ Union of two arrays.
See also
--------
numpy.union1d : equivalent function for ndarrays.
"""
- return unique1d(ma.concatenate((ar1, ar2)))
+ return unique(ma.concatenate((ar1, ar2)))
-def setdiff1d(ar1, ar2):
+def setdiff1d(ar1, ar2, assume_unique=False):
"""
Set difference of 1D arrays with unique elements.
@@ -985,12 +1002,63 @@ def setdiff1d(ar1, ar2):
numpy.setdiff1d : equivalent function for ndarrays
"""
- aux = setmember1d(ar1,ar2)
+ if not assume_unique:
+ ar1 = unique(ar1)
+ ar2 = unique(ar2)
+ aux = in1d(ar1, ar2, assume_unique=True)
if aux.size == 0:
return aux
else:
return ma.asarray(ar1)[aux == 0]
+@deprecate_with_doc('')
+def unique1d(ar1, return_index=False, return_inverse=False):
+ """ This function is deprecated. Use ma.unique() instead. """
+ output = np.unique1d(ar1,
+ return_index=return_index,
+ return_inverse=return_inverse)
+ if isinstance(output, tuple):
+ output = list(output)
+ output[0] = output[0].view(MaskedArray)
+ output = tuple(output)
+ else:
+ output = output.view(MaskedArray)
+ return output
+
+@deprecate_with_doc('')
+def intersect1d_nu(ar1, ar2):
+ """ This function is deprecated. Use ma.intersect1d() instead."""
+ # Might be faster than unique1d( intersect1d( ar1, ar2 ) )?
+ aux = ma.concatenate((unique1d(ar1), unique1d(ar2)))
+ aux.sort()
+ return aux[aux[1:] == aux[:-1]]
+
+@deprecate_with_doc('')
+def setmember1d(ar1, ar2):
+ """ This function is deprecated. Use ma.in1d() instead."""
+ ar1 = ma.asanyarray(ar1)
+ ar2 = ma.asanyarray( ar2 )
+ ar = ma.concatenate((ar1, ar2 ))
+ b1 = ma.zeros(ar1.shape, dtype = np.int8)
+ b2 = ma.ones(ar2.shape, dtype = np.int8)
+ tt = ma.concatenate((b1, b2))
+
+ # We need this to be a stable sort, so always use 'mergesort' here. The
+ # values from the first array should always come before the values from the
+ # second array.
+ perm = ar.argsort(kind='mergesort')
+ aux = ar[perm]
+ aux2 = tt[perm]
+# flag = ediff1d( aux, 1 ) == 0
+ flag = ma.concatenate((aux[1:] == aux[:-1], [False]))
+ ii = ma.where( flag * aux2 )[0]
+ aux = perm[ii+1]
+ perm[ii+1] = perm[ii]
+ perm[ii] = aux
+ #
+ indx = perm.argsort(kind='mergesort')[:len( ar1 )]
+ #
+ return flag[indx]
#####--------------------------------------------------------------------------
@@ -1302,7 +1370,7 @@ def flatnotmasked_contiguous(a):
if len(unmasked) == 0:
return None
result = []
- for k, group in groupby(enumerate(unmasked), lambda (i,x):i-x):
+ for (k, group) in itertools.groupby(enumerate(unmasked), lambda (i,x):i-x):
tmp = np.array([g[1] for g in group], int)
# result.append((tmp.size, tuple(tmp[[0,-1]])))
result.append( slice(tmp[0], tmp[-1]) )
@@ -1347,6 +1415,73 @@ def notmasked_contiguous(a, axis=None):
return result
+def _ezclump(mask):
+ """
+ Finds the clumps (groups of data with the same values) for a 1D bool array.
+
+ Returns a series of slices.
+ """
+ #def clump_masked(a):
+ if mask.ndim > 1:
+ mask = mask.ravel()
+ idx = (mask[1:] - mask[:-1]).nonzero()
+ idx = idx[0] + 1
+ slices = [slice(left, right)
+ for (left, right) in zip(itertools.chain([0], idx),
+ itertools.chain(idx, [len(mask)]),)]
+ return slices
+
+
+def clump_unmasked(a):
+ """
+ Returns a list of slices corresponding to the unmasked clumps of a 1D array.
+
+ Examples
+ --------
+ >>> a = ma.masked_array(np.arange(10))
+ >>> a[[0, 1, 2, 6, 8, 9]] = ma.masked
+ >>> clump_unmasked(a)
+ [slice(3, 6, None), slice(7, 8, None)]
+
+ .. versionadded:: 1.4.0
+ """
+ mask = getattr(a, '_mask', nomask)
+ if mask is nomask:
+ return [slice(0, a.size)]
+ slices = _ezclump(mask)
+ if a[0] is masked:
+ result = slices[1::2]
+ else:
+ result = slices[::2]
+ return result
+
+
+def clump_masked(a):
+ """
+ Returns a list of slices corresponding to the masked clumps of a 1D array.
+
+ Examples
+ --------
+ >>> a = ma.masked_array(np.arange(10))
+ >>> a[[0, 1, 2, 6, 8, 9]] = ma.masked
+ >>> clump_masked(a)
+ [slice(0, 3, None), slice(6, 7, None), slice(8, None, None)]
+
+ .. versionadded:: 1.4.0
+ """
+ mask = ma.getmask(a)
+ if mask is nomask:
+ return []
+ slices = _ezclump(mask)
+ if len(slices):
+ if a[0] is masked:
+ slices = slices[::2]
+ else:
+ slices = slices[1::2]
+ return slices
+
+
+
#####--------------------------------------------------------------------------
#---- Polynomial fit ---
#####--------------------------------------------------------------------------
diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py
index e994a67c6..dc37ff4b6 100644
--- a/numpy/ma/tests/test_core.py
+++ b/numpy/ma/tests/test_core.py
@@ -388,6 +388,14 @@ class TestMaskedArray(TestCase):
assert_equal(a_pickled._mask, a._mask)
assert_equal(a_pickled, a)
+ def test_pickling_keepalignment(self):
+ "Tests pickling w/ F_CONTIGUOUS arrays"
+ import cPickle
+ a = arange(10)
+ a.shape = (-1, 2)
+ b = a.T
+ test = cPickle.loads(cPickle.dumps(b))
+ assert_equal(test, b)
def test_single_element_subscript(self):
"Tests single element subscripts of Maskedarrays."
@@ -660,6 +668,15 @@ class TestMaskedArrayArithmetic(TestCase):
self.failUnless(minimum(xm, xm).mask)
+ def test_masked_singleton_equality(self):
+ "Tests (in)equality on masked snigleton"
+ a = array([1, 2, 3], mask=[1, 1, 0])
+ assert((a[0] == 0) is masked)
+ assert((a[0] != 0) is masked)
+ assert_equal((a[-1] == 0), False)
+ assert_equal((a[-1] != 0), True)
+
+
def test_arithmetic_with_masked_singleton(self):
"Checks that there's no collapsing to masked"
x = masked_array([1,2])
@@ -775,6 +792,12 @@ class TestMaskedArrayArithmetic(TestCase):
assert_equal(amaximum, np.maximum.outer(a,a))
+ def test_minmax_reduce(self):
+ "Test np.min/maximum.reduce on array w/ full False mask"
+ a = array([1, 2, 3], mask=[False, False, False])
+ b = np.maximum.reduce(a)
+ assert_equal(b, 3)
+
def test_minmax_funcs_with_output(self):
"Tests the min/max functions with explicit outputs"
mask = np.random.rand(12).round()
@@ -1053,6 +1076,7 @@ class TestMaskedArrayArithmetic(TestCase):
assert_equal(test.mask, control.mask)
assert_equal(a.mask, [0, 0, 0, 0, 1])
+
#------------------------------------------------------------------------------
class TestMaskedArrayAttributes(TestCase):
@@ -1351,7 +1375,7 @@ class TestFillingValues(TestCase):
assert_equal(test[1][0], maximum_fill_value(a['B']['BA']))
assert_equal(test[1][1], maximum_fill_value(a['B']['BB']))
assert_equal(test[1], maximum_fill_value(a['B']))
-
+
#------------------------------------------------------------------------------
@@ -2315,7 +2339,7 @@ class TestMaskedArrayMethods(TestCase):
#------------------------------------------------------------------------------
-class TestMaskArrayMathMethod(TestCase):
+class TestMaskedArrayMathMethods(TestCase):
def setUp(self):
"Base data definition."
@@ -2954,6 +2978,16 @@ class TestMaskedArrayFunctions(TestCase):
control = np.array([ 0, 0, 0, 0, 0, 1], dtype=bool)
assert_equal(test, control)
+
+ def test_on_ndarray(self):
+ "Test functions on ndarrays"
+ a = np.array([1, 2, 3, 4])
+ m = array(a, mask=False)
+ test = anom(a)
+ assert_equal(test, m.anom())
+ test = reshape(a, (2, 2))
+ assert_equal(test, m.reshape(2, 2))
+
#------------------------------------------------------------------------------
class TestMaskedFields(TestCase):
diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py
index 3c6de62be..c0532b081 100644
--- a/numpy/ma/tests/test_extras.py
+++ b/numpy/ma/tests/test_extras.py
@@ -70,26 +70,43 @@ class TestGeneric(TestCase):
mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt)
test = masked_all_like(control)
assert_equal(test, control)
+
+ def test_clump_masked(self):
+ "Test clump_masked"
+ a = masked_array(np.arange(10))
+ a[[0, 1, 2, 6, 8, 9]] = masked
#
+ test = clump_masked(a)
+ control = [slice(0, 3), slice(6, 7), slice(8, 10)]
+ assert_equal(test, control)
+
+ def test_clump_unmasked(self):
+ "Test clump_unmasked"
+ a = masked_array(np.arange(10))
+ a[[0, 1, 2, 6, 8, 9]] = masked
+ test = clump_unmasked(a)
+ control = [slice(3, 6), slice(7, 8),]
+ assert_equal(test, control)
+
class TestAverage(TestCase):
"Several tests of average. Why so many ? Good point..."
def test_testAverage1(self):
"Test of average."
- ott = array([0.,1.,2.,3.], mask=[1,0,0,0])
- assert_equal(2.0, average(ott,axis=0))
+ ott = array([0.,1.,2.,3.], mask=[True, False, False, False])
+ assert_equal(2.0, average(ott, axis=0))
assert_equal(2.0, average(ott, weights=[1., 1., 2., 1.]))
- result, wts = average(ott, weights=[1.,1.,2.,1.], returned=1)
+ result, wts = average(ott, weights=[1., 1., 2., 1.], returned=1)
assert_equal(2.0, result)
self.failUnless(wts == 4.0)
ott[:] = masked
- assert_equal(average(ott,axis=0).mask, [True])
- ott = array([0.,1.,2.,3.], mask=[1,0,0,0])
- ott = ott.reshape(2,2)
+ assert_equal(average(ott, axis=0).mask, [True])
+ ott = array([0., 1., 2., 3.], mask=[True, False, False, False])
+ ott = ott.reshape(2, 2)
ott[:,1] = masked
- assert_equal(average(ott,axis=0), [2.0, 0.0])
- assert_equal(average(ott,axis=1).mask[0], [True])
+ assert_equal(average(ott, axis=0), [2.0, 0.0])
+ assert_equal(average(ott, axis=1).mask[0], [True])
assert_equal([2.,0.], average(ott, axis=0))
result, wts = average(ott, axis=0, returned=1)
assert_equal(wts, [1., 0.])
@@ -105,43 +122,44 @@ class TestAverage(TestCase):
assert_equal(average(y, None), np.add.reduce(np.arange(6))*3./12.)
assert_equal(average(y, axis=0), np.arange(6) * 3./2.)
assert_equal(average(y, axis=1),
- [average(x,axis=0), average(x,axis=0) * 2.0])
+ [average(x, axis=0), average(x, axis=0) * 2.0])
assert_equal(average(y, None, weights=w2), 20./6.)
assert_equal(average(y, axis=0, weights=w2),
[0.,1.,2.,3.,4.,10.])
assert_equal(average(y, axis=1),
- [average(x,axis=0), average(x,axis=0) * 2.0])
+ [average(x, axis=0), average(x, axis=0) * 2.0])
m1 = zeros(6)
- m2 = [0,0,1,1,0,0]
- m3 = [[0,0,1,1,0,0],[0,1,1,1,1,0]]
+ m2 = [0, 0, 1, 1, 0, 0]
+ m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]]
m4 = ones(6)
m5 = [0, 1, 1, 1, 1, 1]
- assert_equal(average(masked_array(x, m1),axis=0), 2.5)
- assert_equal(average(masked_array(x, m2),axis=0), 2.5)
- assert_equal(average(masked_array(x, m4),axis=0).mask, [True])
- assert_equal(average(masked_array(x, m5),axis=0), 0.0)
- assert_equal(count(average(masked_array(x, m4),axis=0)), 0)
+ assert_equal(average(masked_array(x, m1), axis=0), 2.5)
+ assert_equal(average(masked_array(x, m2), axis=0), 2.5)
+ assert_equal(average(masked_array(x, m4), axis=0).mask, [True])
+ assert_equal(average(masked_array(x, m5), axis=0), 0.0)
+ assert_equal(count(average(masked_array(x, m4), axis=0)), 0)
z = masked_array(y, m3)
assert_equal(average(z, None), 20./6.)
assert_equal(average(z, axis=0), [0.,1.,99.,99.,4.0, 7.5])
assert_equal(average(z, axis=1), [2.5, 5.0])
- assert_equal(average(z,axis=0, weights=w2), [0.,1., 99., 99., 4.0, 10.0])
+ assert_equal(average(z,axis=0, weights=w2),
+ [0.,1., 99., 99., 4.0, 10.0])
def test_testAverage3(self):
"Yet more tests of average!"
a = arange(6)
b = arange(6) * 3
- r1, w1 = average([[a,b],[b,a]], axis=1, returned=1)
+ r1, w1 = average([[a, b], [b, a]], axis=1, returned=1)
assert_equal(shape(r1) , shape(w1))
assert_equal(r1.shape , w1.shape)
- r2, w2 = average(ones((2,2,3)), axis=0, weights=[3,1], returned=1)
+ r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=1)
assert_equal(shape(w2) , shape(r2))
- r2, w2 = average(ones((2,2,3)), returned=1)
+ r2, w2 = average(ones((2, 2, 3)), returned=1)
assert_equal(shape(w2) , shape(r2))
- r2, w2 = average(ones((2,2,3)), weights=ones((2,2,3)), returned=1)
+ r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=1)
assert_equal(shape(w2), shape(r2))
- a2d = array([[1,2],[0,4]], float)
- a2dm = masked_array(a2d, [[0,0],[1,0]])
+ a2d = array([[1, 2], [0, 4]], float)
+ a2dm = masked_array(a2d, [[False, False],[True, False]])
a2da = average(a2d, axis=0)
assert_equal(a2da, [0.5, 3.0])
a2dma = average(a2dm, axis=0)
@@ -151,8 +169,19 @@ class TestAverage(TestCase):
a2dma = average(a2dm, axis=1)
assert_equal(a2dma, [1.5, 4.0])
+ def test_onintegers_with_mask(self):
+ "Test average on integers with mask"
+ a = average(array([1, 2]))
+ assert_equal(a, 1.5)
+ a = average(array([1, 2, 3, 4], mask=[False, False, True, True]))
+ assert_equal(a, 1.5)
+
+
class TestConcatenator(TestCase):
- "Tests for mr_, the equivalent of r_ for masked arrays."
+ """
+ Tests for mr_, the equivalent of r_ for masked arrays.
+ """
+
def test_1d(self):
"Tests mr_ on 1D arrays."
assert_array_equal(mr_[1,2,3,4,5,6],array([1,2,3,4,5,6]))
@@ -186,7 +215,10 @@ class TestConcatenator(TestCase):
class TestNotMasked(TestCase):
- "Tests notmasked_edges and notmasked_contiguous."
+ """
+ Tests notmasked_edges and notmasked_contiguous.
+ """
+
def test_edges(self):
"Tests unmasked_edges"
data = masked_array(np.arange(25).reshape(5, 5),
@@ -222,7 +254,6 @@ class TestNotMasked(TestCase):
assert_equal(test[1], [(0, 1, 2, 4), (4, 2, 4, 4)])
-
def test_contiguous(self):
"Tests notmasked_contiguous"
a = masked_array(np.arange(24).reshape(3,8),
@@ -248,7 +279,6 @@ class TestNotMasked(TestCase):
-
class Test2DFunctions(TestCase):
"Tests 2D functions"
def test_compress2d(self):
@@ -573,19 +603,19 @@ class TestPolynomial(TestCase):
class TestArraySetOps(TestCase):
#
- def test_unique1d_onlist(self):
- "Test unique1d on list"
+ def test_unique_onlist(self):
+ "Test unique on list"
data = [1, 1, 1, 2, 2, 3]
- test = unique1d(data, return_index=True, return_inverse=True)
+ test = unique(data, return_index=True, return_inverse=True)
self.failUnless(isinstance(test[0], MaskedArray))
assert_equal(test[0], masked_array([1, 2, 3], mask=[0, 0, 0]))
assert_equal(test[1], [0, 3, 5])
assert_equal(test[2], [0, 0, 0, 1, 1, 2])
- def test_unique1d_onmaskedarray(self):
- "Test unique1d on masked data w/use_mask=True"
+ def test_unique_onmaskedarray(self):
+ "Test unique on masked data w/use_mask=True"
data = masked_array([1, 1, 1, 2, 2, 3], mask=[0, 0, 1, 0, 1, 0])
- test = unique1d(data, return_index=True, return_inverse=True)
+ test = unique(data, return_index=True, return_inverse=True)
assert_equal(test[0], masked_array([1, 2, 3, -1], mask=[0, 0, 0, 1]))
assert_equal(test[1], [0, 3, 5, 2])
assert_equal(test[2], [0, 0, 3, 1, 3, 2])
@@ -593,26 +623,26 @@ class TestArraySetOps(TestCase):
data.fill_value = 3
data = masked_array([1, 1, 1, 2, 2, 3],
mask=[0, 0, 1, 0, 1, 0], fill_value=3)
- test = unique1d(data, return_index=True, return_inverse=True)
+ test = unique(data, return_index=True, return_inverse=True)
assert_equal(test[0], masked_array([1, 2, 3, -1], mask=[0, 0, 0, 1]))
assert_equal(test[1], [0, 3, 5, 2])
assert_equal(test[2], [0, 0, 3, 1, 3, 2])
- def test_unique1d_allmasked(self):
+ def test_unique_allmasked(self):
"Test all masked"
data = masked_array([1, 1, 1], mask=True)
- test = unique1d(data, return_index=True, return_inverse=True)
+ test = unique(data, return_index=True, return_inverse=True)
assert_equal(test[0], masked_array([1,], mask=[True]))
assert_equal(test[1], [0])
assert_equal(test[2], [0, 0, 0])
#
"Test masked"
data = masked
- test = unique1d(data, return_index=True, return_inverse=True)
+ test = unique(data, return_index=True, return_inverse=True)
assert_equal(test[0], masked_array(masked))
assert_equal(test[1], [0])
assert_equal(test[2], [0])
-
+
def test_ediff1d(self):
"Tests mediff1d"
x = masked_array(np.arange(5), mask=[1,0,0,0,1])
@@ -689,15 +719,6 @@ class TestArraySetOps(TestCase):
x = array([1, 3, 3, 3], mask=[0, 0, 0, 1])
y = array([3, 1, 1, 1], mask=[0, 0, 0, 1])
test = intersect1d(x, y)
- control = array([1, 1, 3, 3, -1], mask=[0, 0, 0, 0, 1])
- assert_equal(test, control)
-
-
- def test_intersect1d_nu(self):
- "Test intersect1d_nu"
- x = array([1, 3, 3, 3], mask=[0, 0, 0, 1])
- y = array([3, 1, 1, 1], mask=[0, 0, 0, 1])
- test = intersect1d_nu(x, y)
control = array([1, 3, -1], mask=[0, 0, 1])
assert_equal(test, control)
@@ -705,7 +726,7 @@ class TestArraySetOps(TestCase):
def test_setxor1d(self):
"Test setxor1d"
a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1])
- b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, -1])
+ b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1])
test = setxor1d(a, b)
assert_equal(test, array([3, 4, 7]))
#
@@ -729,30 +750,35 @@ class TestArraySetOps(TestCase):
assert_array_equal([], setxor1d([],[]))
- def test_setmember1d( self ):
- "Test setmember1d"
+ def test_in1d( self ):
+ "Test in1d"
a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1])
- b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, -1])
- test = setmember1d(a, b)
+ b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1])
+ test = in1d(a, b)
assert_equal(test, [True, True, True, False, True])
#
- assert_array_equal([], setmember1d([],[]))
+ a = array([5, 5, 2, 1, -1], mask=[0, 0, 0, 0, 1])
+ b = array([1, 5, -1], mask=[0, 0, 1])
+ test = in1d(a, b)
+ assert_equal(test, [True, True, False, True, True])
+ #
+ assert_array_equal([], in1d([],[]))
def test_union1d( self ):
"Test union1d"
- a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1])
- b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, -1])
+ a = array([1, 2, 5, 7, 5, -1], mask=[0, 0, 0, 0, 0, 1])
+ b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1])
test = union1d(a, b)
control = array([1, 2, 3, 4, 5, 7, -1], mask=[0, 0, 0, 0, 0, 0, 1])
assert_equal(test, control)
#
- assert_array_equal([], setmember1d([],[]))
+ assert_array_equal([], union1d([],[]))
def test_setdiff1d( self ):
"Test setdiff1d"
- a = array([6, 5, 4, 7, 1, 2, 1], mask=[0, 0, 0, 0, 0, 0, 1])
+ a = array([6, 5, 4, 7, 7, 1, 2, 1], mask=[0, 0, 0, 0, 0, 0, 0, 1])
b = array([2, 4, 3, 3, 2, 1, 5])
test = setdiff1d(a, b)
assert_equal(test, array([6, 7, -1], mask=[0, 0, 1]))
@@ -769,8 +795,6 @@ class TestArraySetOps(TestCase):
assert_array_equal(setdiff1d(a,b), np.array(['c']))
-
-
class TestShapeBase(TestCase):
#
def test_atleast1d(self):
diff --git a/numpy/testing/decorators.py b/numpy/testing/decorators.py
index 15788141c..e337c35e2 100644
--- a/numpy/testing/decorators.py
+++ b/numpy/testing/decorators.py
@@ -9,6 +9,8 @@ setup and teardown functions and so on - see nose.tools for more
information.
"""
+import warnings
+import sys
def slow(t):
"""Labels a test as 'slow'.
@@ -170,3 +172,114 @@ def knownfailureif(fail_condition, msg=None):
return nose.tools.make_decorator(f)(knownfailer)
return knownfail_decorator
+
+# The following two classes are copied from python 2.6 warnings module (context
+# manager)
+class WarningMessage(object):
+
+ """Holds the result of a single showwarning() call."""
+
+ _WARNING_DETAILS = ("message", "category", "filename", "lineno", "file",
+ "line")
+
+ def __init__(self, message, category, filename, lineno, file=None,
+ line=None):
+ local_values = locals()
+ for attr in self._WARNING_DETAILS:
+ setattr(self, attr, local_values[attr])
+ if category:
+ self._category_name = category.__name__
+ else:
+ self._category_name = None
+
+ def __str__(self):
+ return ("{message : %r, category : %r, filename : %r, lineno : %s, "
+ "line : %r}" % (self.message, self._category_name,
+ self.filename, self.lineno, self.line))
+
+class WarningManager:
+ def __init__(self, record=False, module=None):
+ self._record = record
+ if module is None:
+ self._module = sys.modules['warnings']
+ else:
+ self._module = module
+ self._entered = False
+
+ def __enter__(self):
+ if self._entered:
+ raise RuntimeError("Cannot enter %r twice" % self)
+ self._entered = True
+ self._filters = self._module.filters
+ self._module.filters = self._filters[:]
+ self._showwarning = self._module.showwarning
+ if self._record:
+ log = []
+ def showwarning(*args, **kwargs):
+ log.append(WarningMessage(*args, **kwargs))
+ self._module.showwarning = showwarning
+ return log
+ else:
+ return None
+
+ def __exit__(self):
+ if not self._entered:
+ raise RuntimeError("Cannot exit %r without entering first" % self)
+ self._module.filters = self._filters
+ self._module.showwarning = self._showwarning
+
+def deprecated(conditional=True):
+ """This decorator can be used to filter Deprecation Warning, to avoid
+ printing them during the test suite run, while checking that the test
+ actually raises a DeprecationWarning.
+
+ Parameters
+ ----------
+ conditional : bool or callable.
+ Flag to determine whether to mark test as deprecated or not. If the
+ condition is a callable, it is used at runtime to dynamically make the
+ decision.
+
+ Returns
+ -------
+ decorator : function
+ Decorator, which, when applied to a function, causes SkipTest
+ to be raised when the skip_condition was True, and the function
+ to be called normally otherwise.
+
+ Notes
+ -----
+
+ .. versionadded:: 1.4.0
+ """
+ def deprecate_decorator(f):
+ # Local import to avoid a hard nose dependency and only incur the
+ # import time overhead at actual test-time.
+ import nose
+ from noseclasses import KnownFailureTest
+
+ def _deprecated_imp(*args, **kwargs):
+ # Poor man's replacement for the with statement
+ ctx = WarningManager(record=True)
+ l = ctx.__enter__()
+ warnings.simplefilter('always')
+ try:
+ f(*args, **kwargs)
+ if not len(l) > 0:
+ raise AssertionError("No warning raised when calling %s"
+ % f.__name__)
+ if not l[0].category is DeprecationWarning:
+ raise AssertionError("First warning for %s is not a " \
+ "DeprecationWarning( is %s)" % (f.__name__, l[0]))
+ finally:
+ ctx.__exit__()
+
+ if callable(conditional):
+ cond = conditional()
+ else:
+ cond = conditional
+ if cond:
+ return nose.tools.make_decorator(f)(_deprecated_imp)
+ else:
+ return f
+ return deprecate_decorator
diff --git a/numpy/testing/nosetester.py b/numpy/testing/nosetester.py
index 7a10a5b1f..a818e5e62 100644
--- a/numpy/testing/nosetester.py
+++ b/numpy/testing/nosetester.py
@@ -115,18 +115,23 @@ class NoseTester(object):
If None, extract calling module path
Default is None
'''
+ package_name = None
if package is None:
f = sys._getframe(1)
- package = f.f_locals.get('__file__', None)
- assert package is not None
- package = os.path.dirname(package)
+ package_path = f.f_locals.get('__file__', None)
+ assert package_path is not None
+ package_path = os.path.dirname(package_path)
+ package_name = f.f_locals.get('__name__', None)
elif isinstance(package, type(os)):
- package = os.path.dirname(package.__file__)
- self.package_path = package
+ package_path = os.path.dirname(package.__file__)
+ package_name = getattr(package, '__name__', None)
+ self.package_path = package_path
# find the package name under test; this name is used to limit coverage
# reporting (if enabled)
- self.package_name = get_package_name(package)
+ if package_name is None:
+ package_name = get_package_name(package_path)
+ self.package_name = package_name
def _test_argv(self, label, verbose, extra_argv):
''' Generate argv for nosetest command
diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py
index aabdc88a0..0ecf0622d 100644
--- a/numpy/testing/tests/test_utils.py
+++ b/numpy/testing/tests/test_utils.py
@@ -48,7 +48,7 @@ class _GenericTest(object):
a = np.array([1, 1], dtype=np.object)
self._test_equal(a, 1)
-class TestEqual(_GenericTest, unittest.TestCase):
+class TestArrayEqual(_GenericTest, unittest.TestCase):
def setUp(self):
self._assert_func = assert_array_equal
@@ -126,11 +126,148 @@ class TestEqual(_GenericTest, unittest.TestCase):
self._test_not_equal(c, b)
+class TestEqual(TestArrayEqual):
+ def setUp(self):
+ self._assert_func = assert_equal
+
+ def test_nan_items(self):
+ self._assert_func(np.nan, np.nan)
+ self._assert_func([np.nan], [np.nan])
+ self._test_not_equal(np.nan, [np.nan])
+ self._test_not_equal(np.nan, 1)
+
+ def test_inf_items(self):
+ self._assert_func(np.inf, np.inf)
+ self._assert_func([np.inf], [np.inf])
+ self._test_not_equal(np.inf, [np.inf])
+
+ def test_non_numeric(self):
+ self._assert_func('ab', 'ab')
+ self._test_not_equal('ab', 'abb')
+
+ def test_complex_item(self):
+ self._assert_func(complex(1, 2), complex(1, 2))
+ self._assert_func(complex(1, np.nan), complex(1, np.nan))
+ self._test_not_equal(complex(1, np.nan), complex(1, 2))
+ self._test_not_equal(complex(np.nan, 1), complex(1, np.nan))
+ self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2))
+
+ def test_negative_zero(self):
+ self._test_not_equal(np.PZERO, np.NZERO)
+
+ def test_complex(self):
+ x = np.array([complex(1, 2), complex(1, np.nan)])
+ y = np.array([complex(1, 2), complex(1, 2)])
+ self._assert_func(x, x)
+ self._test_not_equal(x, y)
+
+class TestArrayAlmostEqual(_GenericTest, unittest.TestCase):
+ def setUp(self):
+ self._assert_func = assert_array_almost_equal
+
+ def test_simple(self):
+ x = np.array([1234.2222])
+ y = np.array([1234.2223])
+
+ self._assert_func(x, y, decimal=3)
+ self._assert_func(x, y, decimal=4)
+ self.failUnlessRaises(AssertionError,
+ lambda: self._assert_func(x, y, decimal=5))
+
+ def test_nan(self):
+ anan = np.array([np.nan])
+ aone = np.array([1])
+ ainf = np.array([np.inf])
+ self._assert_func(anan, anan)
+ self.failUnlessRaises(AssertionError,
+ lambda : self._assert_func(anan, aone))
+ self.failUnlessRaises(AssertionError,
+ lambda : self._assert_func(anan, ainf))
+ self.failUnlessRaises(AssertionError,
+ lambda : self._assert_func(ainf, anan))
class TestAlmostEqual(_GenericTest, unittest.TestCase):
def setUp(self):
- self._assert_func = assert_array_almost_equal
+ self._assert_func = assert_almost_equal
+
+ def test_nan_item(self):
+ self._assert_func(np.nan, np.nan)
+ self.failUnlessRaises(AssertionError,
+ lambda : self._assert_func(np.nan, 1))
+ self.failUnlessRaises(AssertionError,
+ lambda : self._assert_func(np.nan, np.inf))
+ self.failUnlessRaises(AssertionError,
+ lambda : self._assert_func(np.inf, np.nan))
+
+ def test_inf_item(self):
+ self._assert_func(np.inf, np.inf)
+ self._assert_func(-np.inf, -np.inf)
+
+ def test_simple_item(self):
+ self._test_not_equal(1, 2)
+
+ def test_complex_item(self):
+ self._assert_func(complex(1, 2), complex(1, 2))
+ self._assert_func(complex(1, np.nan), complex(1, np.nan))
+ self._assert_func(complex(np.inf, np.nan), complex(np.inf, np.nan))
+ self._test_not_equal(complex(1, np.nan), complex(1, 2))
+ self._test_not_equal(complex(np.nan, 1), complex(1, np.nan))
+ self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2))
+
+ def test_complex(self):
+ x = np.array([complex(1, 2), complex(1, np.nan)])
+ z = np.array([complex(1, 2), complex(np.nan, 1)])
+ y = np.array([complex(1, 2), complex(1, 2)])
+ self._assert_func(x, x)
+ self._test_not_equal(x, y)
+ self._test_not_equal(x, z)
+
+class TestApproxEqual(unittest.TestCase):
+ def setUp(self):
+ self._assert_func = assert_approx_equal
+ def test_simple_arrays(self):
+ x = np.array([1234.22])
+ y = np.array([1234.23])
+
+ self._assert_func(x, y, significant=5)
+ self._assert_func(x, y, significant=6)
+ self.failUnlessRaises(AssertionError,
+ lambda: self._assert_func(x, y, significant=7))
+
+ def test_simple_items(self):
+ x = 1234.22
+ y = 1234.23
+
+ self._assert_func(x, y, significant=4)
+ self._assert_func(x, y, significant=5)
+ self._assert_func(x, y, significant=6)
+ self.failUnlessRaises(AssertionError,
+ lambda: self._assert_func(x, y, significant=7))
+
+ def test_nan_array(self):
+ anan = np.array(np.nan)
+ aone = np.array(1)
+ ainf = np.array(np.inf)
+ self._assert_func(anan, anan)
+ self.failUnlessRaises(AssertionError,
+ lambda : self._assert_func(anan, aone))
+ self.failUnlessRaises(AssertionError,
+ lambda : self._assert_func(anan, ainf))
+ self.failUnlessRaises(AssertionError,
+ lambda : self._assert_func(ainf, anan))
+
+ def test_nan_items(self):
+ anan = np.array(np.nan)
+ aone = np.array(1)
+ ainf = np.array(np.inf)
+ self._assert_func(anan, anan)
+ self.failUnlessRaises(AssertionError,
+ lambda : self._assert_func(anan, aone))
+ self.failUnlessRaises(AssertionError,
+ lambda : self._assert_func(anan, ainf))
+ self.failUnlessRaises(AssertionError,
+ lambda : self._assert_func(ainf, anan))
class TestRaises(unittest.TestCase):
def setUp(self):
diff --git a/numpy/testing/utils.py b/numpy/testing/utils.py
index ba9b16b18..96b2d462c 100644
--- a/numpy/testing/utils.py
+++ b/numpy/testing/utils.py
@@ -6,6 +6,7 @@ import os
import sys
import re
import operator
+import types
from nosetester import import_nose
__all__ = ['assert_equal', 'assert_almost_equal','assert_approx_equal',
@@ -22,6 +23,56 @@ def assert_(val, msg='') :
if not val :
raise AssertionError(msg)
+def gisnan(x):
+ """like isnan, but always raise an error if type not supported instead of
+ returning a TypeError object.
+
+ Notes
+ -----
+ isnan and other ufunc sometimes return a NotImplementedType object instead
+ of raising any exception. This function is a wrapper to make sure an
+ exception is always raised.
+
+ This should be removed once this problem is solved at the Ufunc level."""
+ from numpy.core import isnan
+ st = isnan(x)
+ if isinstance(st, types.NotImplementedType):
+ raise TypeError("isnan not supported for this type")
+ return st
+
+def gisfinite(x):
+ """like isfinite, but always raise an error if type not supported instead of
+ returning a TypeError object.
+
+ Notes
+ -----
+ isfinite and other ufunc sometimes return a NotImplementedType object instead
+ of raising any exception. This function is a wrapper to make sure an
+ exception is always raised.
+
+ This should be removed once this problem is solved at the Ufunc level."""
+ from numpy.core import isfinite
+ st = isfinite(x)
+ if isinstance(st, types.NotImplementedType):
+ raise TypeError("isfinite not supported for this type")
+ return st
+
+def gisinf(x):
+ """like isinf, but always raise an error if type not supported instead of
+ returning a TypeError object.
+
+ Notes
+ -----
+ isinf and other ufunc sometimes return a NotImplementedType object instead
+ of raising any exception. This function is a wrapper to make sure an
+ exception is always raised.
+
+ This should be removed once this problem is solved at the Ufunc level."""
+ from numpy.core import isinf
+ st = isinf(x)
+ if isinstance(st, types.NotImplementedType):
+ raise TypeError("isinf not supported for this type")
+ return st
def rand(*args):
"""Returns an array of random numbers with the given shape.
@@ -181,10 +232,69 @@ def assert_equal(actual,desired,err_msg='',verbose=True):
for k in range(len(desired)):
assert_equal(actual[k], desired[k], 'item=%r\n%s' % (k,err_msg), verbose)
return
- from numpy.core import ndarray
+ from numpy.core import ndarray, isscalar, signbit
+ from numpy.lib import iscomplexobj, real, imag
if isinstance(actual, ndarray) or isinstance(desired, ndarray):
return assert_array_equal(actual, desired, err_msg, verbose)
msg = build_err_msg([actual, desired], err_msg, verbose=verbose)
+
+ # Handle complex numbers: separate into real/imag to handle
+ # nan/inf/negative zero correctly
+ # XXX: catch ValueError for subclasses of ndarray where iscomplex fail
+ try:
+ usecomplex = iscomplexobj(actual) or iscomplexobj(desired)
+ except ValueError:
+ usecomplex = False
+
+ if usecomplex:
+ if iscomplexobj(actual):
+ actualr = real(actual)
+ actuali = imag(actual)
+ else:
+ actualr = actual
+ actuali = 0
+ if iscomplexobj(desired):
+ desiredr = real(desired)
+ desiredi = imag(desired)
+ else:
+ desiredr = desired
+ desiredi = 0
+ try:
+ assert_equal(actualr, desiredr)
+ assert_equal(actuali, desiredi)
+ except AssertionError:
+ raise AssertionError("Items are not equal:\n" \
+ "ACTUAL: %s\n" \
+ "DESIRED: %s\n" % (str(actual), str(desired)))
+
+ # Inf/nan/negative zero handling
+ try:
+ # isscalar test to check cases such as [np.nan] != np.nan
+ if isscalar(desired) != isscalar(actual):
+ raise AssertionError(msg)
+
+ # If one of desired/actual is not finite, handle it specially here:
+ # check that both are nan if any is a nan, and test for equality
+ # otherwise
+ if not (gisfinite(desired) and gisfinite(actual)):
+ isdesnan = gisnan(desired)
+ isactnan = gisnan(actual)
+ if isdesnan or isactnan:
+ if not (isdesnan and isactnan):
+ raise AssertionError(msg)
+ else:
+ if not desired == actual:
+ raise AssertionError(msg)
+ return
+ elif desired == 0 and actual == 0:
+ if not signbit(desired) == signbit(actual):
+ raise AssertionError(msg)
+ # If TypeError or ValueError raised while using isnan and co, just handle
+ # as before
+ except TypeError:
+ pass
+ except ValueError:
+ pass
if desired != actual :
raise AssertionError(msg)
@@ -258,9 +368,55 @@ def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True):
"""
from numpy.core import ndarray
+ from numpy.lib import iscomplexobj, real, imag
+
+ # Handle complex numbers: separate into real/imag to handle
+ # nan/inf/negative zero correctly
+ # XXX: catch ValueError for subclasses of ndarray where iscomplex fail
+ try:
+ usecomplex = iscomplexobj(actual) or iscomplexobj(desired)
+ except ValueError:
+ usecomplex = False
+
+ if usecomplex:
+ if iscomplexobj(actual):
+ actualr = real(actual)
+ actuali = imag(actual)
+ else:
+ actualr = actual
+ actuali = 0
+ if iscomplexobj(desired):
+ desiredr = real(desired)
+ desiredi = imag(desired)
+ else:
+ desiredr = desired
+ desiredi = 0
+ try:
+ assert_almost_equal(actualr, desiredr, decimal=decimal)
+ assert_almost_equal(actuali, desiredi, decimal=decimal)
+ except AssertionError:
+ raise AssertionError("Items are not equal:\n" \
+ "ACTUAL: %s\n" \
+ "DESIRED: %s\n" % (str(actual), str(desired)))
+
if isinstance(actual, ndarray) or isinstance(desired, ndarray):
return assert_array_almost_equal(actual, desired, decimal, err_msg)
- msg = build_err_msg([actual, desired], err_msg, verbose=verbose)
+ msg = build_err_msg([actual, desired], err_msg, verbose=verbose,
+ header='Arrays are not almost equal')
+ try:
+ # If one of desired/actual is not finite, handle it specially here:
+ # check that both are nan if any is a nan, and test for equality
+ # otherwise
+ if not (gisfinite(desired) and gisfinite(actual)):
+ if gisnan(desired) or gisnan(actual):
+ if not (gisnan(desired) and gisnan(actual)):
+ raise AssertionError(msg)
+ else:
+ if not desired == actual:
+ raise AssertionError(msg)
+ return
+ except TypeError:
+ pass
if round(abs(desired - actual),decimal) != 0 :
raise AssertionError(msg)
@@ -317,12 +473,14 @@ def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True):
True
"""
- import math
+ import numpy as np
actual, desired = map(float, (actual, desired))
if desired==actual:
return
# Normalized the numbers to be in range (-10.0,10.0)
- scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual))))))
+ # scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual))))))
+ scale = 0.5*(np.abs(desired) + np.abs(actual))
+ scale = np.power(10,np.floor(np.log10(scale)))
try:
sc_desired = desired/scale
except ZeroDivisionError:
@@ -335,7 +493,21 @@ def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True):
header='Items are not equal to %d significant digits:' %
significant,
verbose=verbose)
- if math.fabs(sc_desired - sc_actual) >= pow(10.,-(significant-1)) :
+ try:
+ # If one of desired/actual is not finite, handle it specially here:
+ # check that both are nan if any is a nan, and test for equality
+ # otherwise
+ if not (gisfinite(desired) and gisfinite(actual)):
+ if gisnan(desired) or gisnan(actual):
+ if not (gisnan(desired) and gisnan(actual)):
+ raise AssertionError(msg)
+ else:
+ if not desired == actual:
+ raise AssertionError(msg)
+ return
+ except TypeError:
+ pass
+ if np.abs(sc_desired - sc_actual) >= np.power(10.,-(significant-1)) :
raise AssertionError(msg)
def assert_array_compare(comparison, x, y, err_msg='', verbose=True,
@@ -374,6 +546,10 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True,
'%s mismatch)' % (xnanid, ynanid),
verbose=verbose, header=header,
names=('x', 'y'))
+ raise AssertionError(msg)
+ # If only one item, it was a nan, so just return
+ if x.size == y.size == 1:
+ return
val = comparison(x[~xnanid], y[~ynanid])
else:
val = comparison(x,y)
@@ -526,9 +702,22 @@ def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True):
y: array([ 1. , 2.33333, 5. ])
"""
- from numpy.core import around, number, float_
+ from numpy.core import around, number, float_, any
from numpy.lib import issubdtype
def compare(x, y):
+ try:
+ if any(gisinf(x)) or any( gisinf(y)):
+ xinfid = gisinf(x)
+ yinfid = gisinf(y)
+ if not xinfid == yinfid:
+ return False
+ # if one item, x and y is +- inf
+ if x.size == y.size == 1:
+ return x == y
+ x = x[~xinfid]
+ y = y[~yinfid]
+ except TypeError:
+ pass
z = abs(x-y)
if not issubdtype(z.dtype, number):
z = z.astype(float_) # handle object arrays