summaryrefslogtreecommitdiff
path: root/numpy
diff options
context:
space:
mode:
Diffstat (limited to 'numpy')
-rw-r--r--numpy/core/__init__.py5
-rw-r--r--numpy/core/_internal.py46
-rw-r--r--numpy/core/_methods.py1
-rw-r--r--numpy/core/arrayprint.py41
-rw-r--r--numpy/core/defchararray.py38
-rw-r--r--numpy/core/function_base.py6
-rw-r--r--numpy/core/getlimits.py48
-rw-r--r--numpy/core/machar.py52
-rw-r--r--numpy/core/memmap.py8
-rw-r--r--numpy/core/numeric.py85
-rw-r--r--numpy/core/numerictypes.py74
-rw-r--r--numpy/core/records.py32
-rw-r--r--numpy/core/setup.py72
-rw-r--r--numpy/core/setup_common.py41
-rw-r--r--numpy/core/shape_base.py12
-rw-r--r--numpy/core/src/multiarray/arraytypes.c.src53
-rw-r--r--numpy/core/src/multiarray/mapping.c4
-rw-r--r--numpy/core/src/umath/loops.c.src29
-rw-r--r--numpy/core/tests/test_function_base.py1
-rw-r--r--numpy/core/tests/test_indexing.py21
-rw-r--r--numpy/core/tests/test_multiarray.py44
-rw-r--r--numpy/lib/index_tricks.py2
-rw-r--r--numpy/lib/tests/test_index_tricks.py9
23 files changed, 412 insertions, 312 deletions
diff --git a/numpy/core/__init__.py b/numpy/core/__init__.py
index f6b9534eb..41314cee4 100644
--- a/numpy/core/__init__.py
+++ b/numpy/core/__init__.py
@@ -18,7 +18,7 @@ del envbak
del os
from . import umath
-from . import _internal # for freeze programs
+from . import _internal # for freeze programs
from . import numerictypes as nt
multiarray.set_typeDict(nt.sctypeDict)
from . import numeric
@@ -40,8 +40,7 @@ from . import shape_base
from .shape_base import *
del nt
-from .fromnumeric import amax as max, amin as min, \
- round_ as round
+from .fromnumeric import amax as max, amin as min, round_ as round
from .numeric import absolute as abs
__all__ = ['char', 'rec', 'memmap']
diff --git a/numpy/core/_internal.py b/numpy/core/_internal.py
index a20bf10e4..f8271d5ab 100644
--- a/numpy/core/_internal.py
+++ b/numpy/core/_internal.py
@@ -8,9 +8,8 @@ from __future__ import division, absolute_import, print_function
import re
import sys
-import warnings
-from numpy.compat import asbytes, bytes, basestring
+from numpy.compat import asbytes, basestring
from .multiarray import dtype, array, ndarray
import ctypes
from .numerictypes import object_
@@ -73,10 +72,10 @@ def _usefields(adict, align):
else:
titles.append(None)
- return dtype({"names" : names,
- "formats" : formats,
- "offsets" : offsets,
- "titles" : titles}, align)
+ return dtype({"names": names,
+ "formats": formats,
+ "offsets": offsets,
+ "titles": titles}, align)
# construct an array_protocol descriptor list
@@ -101,7 +100,6 @@ def _array_descr(descriptor):
else:
return (_array_descr(subdtype[0]), subdtype[1])
-
names = descriptor.names
ordered_fields = [fields[x] + (x,) for x in names]
result = []
@@ -167,8 +165,8 @@ def _commastring(astr):
mo = sep_re.match(astr, pos=startindex)
if not mo:
raise ValueError(
- 'format number %d of "%s" is not recognized' %
- (len(result)+1, astr))
+ 'format number %d of "%s" is not recognized' %
+ (len(result)+1, astr))
startindex = mo.end()
if order2 == asbytes(''):
@@ -179,7 +177,9 @@ def _commastring(astr):
order1 = _convorder.get(order1, order1)
order2 = _convorder.get(order2, order2)
if (order1 != order2):
- raise ValueError('inconsistent byte-order specification %s and %s' % (order1, order2))
+ raise ValueError(
+ 'inconsistent byte-order specification %s and %s' %
+ (order1, order2))
order = order1
if order in [asbytes('|'), asbytes('='), _nbo]:
@@ -354,9 +354,9 @@ def _get_all_field_offsets(dtype, base_offset=0):
fields = []
if dtype.fields is not None:
for name in dtype.names:
- sub_dtype = dtype.fields[name][0]
- sub_offset = dtype.fields[name][1] + base_offset
- fields.extend(_get_all_field_offsets(sub_dtype, sub_offset))
+ sub_dtype = dtype.fields[name][0]
+ sub_offset = dtype.fields[name][1] + base_offset
+ fields.extend(_get_all_field_offsets(sub_dtype, sub_offset))
else:
if dtype.shape:
sub_offsets = _get_all_field_offsets(dtype.base, base_offset)
@@ -484,7 +484,7 @@ def _view_is_safe(oldtype, newtype):
# 'tiled positions' of the object match up. Here, we allow
# for arbirary itemsizes (even those possibly disallowed
# due to stride/data length issues).
- if old_size == new_size:
+ if old_size == new_size:
new_num = old_num = 1
else:
gcd_new_old = _gcd(new_size, old_size)
@@ -525,7 +525,7 @@ _pep3118_native_map = {
's': 'S',
'w': 'U',
'O': 'O',
- 'x': 'V', # padding
+ 'x': 'V', # padding
}
_pep3118_native_typechars = ''.join(_pep3118_native_map.keys())
@@ -549,7 +549,7 @@ _pep3118_standard_map = {
's': 'S',
'w': 'U',
'O': 'O',
- 'x': 'V', # padding
+ 'x': 'V', # padding
}
_pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys())
@@ -560,11 +560,12 @@ def _dtype_from_pep3118(spec, byteorder='@', is_subdtype=False):
this_explicit_name = False
common_alignment = 1
is_padding = False
- last_offset = 0
dummy_name_index = [0]
+
def next_dummy_name():
dummy_name_index[0] += 1
+
def get_dummy_name():
while True:
name = 'f%d' % dummy_name_index[0]
@@ -688,7 +689,6 @@ def _dtype_from_pep3118(spec, byteorder='@', is_subdtype=False):
raise RuntimeError("Duplicate field name '%s' in PEP3118 format"
% name)
fields[name] = (value, offset)
- last_offset = offset
if not this_explicit_name:
next_dummy_name()
@@ -698,8 +698,8 @@ def _dtype_from_pep3118(spec, byteorder='@', is_subdtype=False):
offset += extra_offset
# Check if this was a simple 1-item type
- if len(fields) == 1 and not explicit_name and fields['f0'][1] == 0 \
- and not is_subdtype:
+ if (len(fields) == 1 and not explicit_name and
+ fields['f0'][1] == 0 and not is_subdtype):
ret = fields['f0'][0]
else:
ret = dtype(fields)
@@ -724,8 +724,8 @@ def _add_trailing_padding(value, padding):
else:
vfields = dict(value.fields)
- if value.names and value.names[-1] == '' and \
- value[''].char == 'V':
+ if (value.names and value.names[-1] == '' and
+ value[''].char == 'V'):
# A trailing padding field is already present
vfields[''] = ('V%d' % (vfields[''][0].itemsize + padding),
vfields[''][1])
@@ -757,5 +757,5 @@ def _prod(a):
def _gcd(a, b):
"""Calculate the greatest common divisor of a and b"""
while b:
- a, b = b, a%b
+ a, b = b, a % b
return a
diff --git a/numpy/core/_methods.py b/numpy/core/_methods.py
index 00716e1b4..5fc2bc445 100644
--- a/numpy/core/_methods.py
+++ b/numpy/core/_methods.py
@@ -58,7 +58,6 @@ def _mean(a, axis=None, dtype=None, out=None, keepdims=False):
if rcount == 0:
warnings.warn("Mean of empty slice.", RuntimeWarning)
-
# Cast bool, unsigned int, and int to float64 by default
if dtype is None and issubclass(arr.dtype.type, (nt.integer, nt.bool_)):
dtype = mu.dtype('f8')
diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py
index b8acaee97..a28b5a89e 100644
--- a/numpy/core/arrayprint.py
+++ b/numpy/core/arrayprint.py
@@ -31,10 +31,11 @@ else:
_MAXINT = sys.maxint
_MININT = -sys.maxint - 1
-def product(x, y): return x*y
+def product(x, y):
+ return x*y
_summaryEdgeItems = 3 # repr N leading and trailing items of each dimension
-_summaryThreshold = 1000 # total items > triggers array summarization
+_summaryThreshold = 1000 # total items > triggers array summarization
_float_output_precision = 8
_float_output_suppress_small = False
@@ -149,9 +150,10 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None,
... suppress=False, threshold=1000, formatter=None)
"""
- global _summaryThreshold, _summaryEdgeItems, _float_output_precision, \
- _line_width, _float_output_suppress_small, _nan_str, _inf_str, \
- _formatter
+ global _summaryThreshold, _summaryEdgeItems, _float_output_precision
+ global _line_width, _float_output_suppress_small, _nan_str, _inf_str
+ global _formatter
+
if linewidth is not None:
_line_width = linewidth
if threshold is not None:
@@ -254,17 +256,17 @@ def _array2string(a, max_line_width, precision, suppress_small, separator=' ',
summary_insert = ""
data = ravel(asarray(a))
- formatdict = {'bool' : _boolFormatter,
- 'int' : IntegerFormat(data),
- 'float' : FloatFormat(data, precision, suppress_small),
- 'longfloat' : LongFloatFormat(precision),
- 'complexfloat' : ComplexFormat(data, precision,
+ formatdict = {'bool': _boolFormatter,
+ 'int': IntegerFormat(data),
+ 'float': FloatFormat(data, precision, suppress_small),
+ 'longfloat': LongFloatFormat(precision),
+ 'complexfloat': ComplexFormat(data, precision,
suppress_small),
- 'longcomplexfloat' : LongComplexFormat(precision),
- 'datetime' : DatetimeFormat(data),
- 'timedelta' : TimedeltaFormat(data),
- 'numpystr' : repr_format,
- 'str' : str}
+ 'longcomplexfloat': LongComplexFormat(precision),
+ 'datetime': DatetimeFormat(data),
+ 'timedelta': TimedeltaFormat(data),
+ 'numpystr': repr_format,
+ 'str': str}
if formatter is not None:
fkeys = [k for k in formatter.keys() if formatter[k] is not None]
@@ -469,10 +471,13 @@ def _formatArray(a, format_function, rank, max_line_len,
return str(obj)
if summary_insert and 2*edge_items < len(a):
- leading_items, trailing_items, summary_insert1 = \
- edge_items, edge_items, summary_insert
+ leading_items = edge_items
+ trailing_items = edge_items
+ summary_insert1 = summary_insert
else:
- leading_items, trailing_items, summary_insert1 = 0, len(a), ""
+ leading_items = 0
+ trailing_items = len(a)
+ summary_insert1 = ""
if rank == 1:
s = ""
diff --git a/numpy/core/defchararray.py b/numpy/core/defchararray.py
index ead11e8d8..e18f912d6 100644
--- a/numpy/core/defchararray.py
+++ b/numpy/core/defchararray.py
@@ -25,17 +25,18 @@ from numpy.core.multiarray import _vec_string
from numpy.compat import asbytes, long
import numpy
-__all__ = ['chararray',
- 'equal', 'not_equal', 'greater_equal', 'less_equal', 'greater', 'less',
- 'str_len', 'add', 'multiply', 'mod', 'capitalize', 'center', 'count',
- 'decode', 'encode', 'endswith', 'expandtabs', 'find', 'format',
- 'index', 'isalnum', 'isalpha', 'isdigit', 'islower', 'isspace',
- 'istitle', 'isupper', 'join', 'ljust', 'lower', 'lstrip',
- 'partition', 'replace', 'rfind', 'rindex', 'rjust', 'rpartition',
- 'rsplit', 'rstrip', 'split', 'splitlines', 'startswith', 'strip',
- 'swapcase', 'title', 'translate', 'upper', 'zfill',
- 'isnumeric', 'isdecimal',
- 'array', 'asarray']
+__all__ = [
+ 'chararray', 'equal', 'not_equal', 'greater_equal', 'less_equal',
+ 'greater', 'less', 'str_len', 'add', 'multiply', 'mod', 'capitalize',
+ 'center', 'count', 'decode', 'encode', 'endswith', 'expandtabs',
+ 'find', 'index', 'isalnum', 'isalpha', 'isdigit', 'islower', 'isspace',
+ 'istitle', 'isupper', 'join', 'ljust', 'lower', 'lstrip', 'partition',
+ 'replace', 'rfind', 'rindex', 'rjust', 'rpartition', 'rsplit',
+ 'rstrip', 'split', 'splitlines', 'startswith', 'strip', 'swapcase',
+ 'title', 'translate', 'upper', 'zfill', 'isnumeric', 'isdecimal',
+ 'array', 'asarray'
+ ]
+
_globalvar = 0
if sys.version_info[0] >= 3:
@@ -55,8 +56,8 @@ def _use_unicode(*args):
result should be unicode.
"""
for x in args:
- if (isinstance(x, _unicode)
- or issubclass(numpy.asarray(x).dtype.type, unicode_)):
+ if (isinstance(x, _unicode) or
+ issubclass(numpy.asarray(x).dtype.type, unicode_)):
return unicode_
return string_
@@ -1068,7 +1069,7 @@ def replace(a, old, new, count=None):
"""
return _to_string_or_unicode_array(
_vec_string(
- a, object_, 'replace', [old, new] +_clean_args(count)))
+ a, object_, 'replace', [old, new] + _clean_args(count)))
def rfind(a, sub, start=0, end=None):
@@ -2039,7 +2040,6 @@ class chararray(ndarray):
"""
return count(self, sub, start, end)
-
def decode(self, encoding=None, errors=None):
"""
Calls `str.decode` element-wise.
@@ -2610,10 +2610,10 @@ def array(obj, itemsize=None, copy=True, unicode=None, order=None):
if order is not None:
obj = numpy.asarray(obj, order=order)
- if (copy
- or (itemsize != obj.itemsize)
- or (not unicode and isinstance(obj, unicode_))
- or (unicode and isinstance(obj, string_))):
+ if (copy or
+ (itemsize != obj.itemsize) or
+ (not unicode and isinstance(obj, unicode_)) or
+ (unicode and isinstance(obj, string_))):
obj = obj.astype((dtype, long(itemsize)))
return obj
diff --git a/numpy/core/function_base.py b/numpy/core/function_base.py
index 1e759e0c2..532ef2950 100644
--- a/numpy/core/function_base.py
+++ b/numpy/core/function_base.py
@@ -3,7 +3,7 @@ from __future__ import division, absolute_import, print_function
__all__ = ['logspace', 'linspace']
from . import numeric as _nx
-from .numeric import array, result_type, NaN
+from .numeric import result_type, NaN
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):
@@ -25,7 +25,7 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):
evenly spaced samples, so that `stop` is excluded. Note that the step
size changes when `endpoint` is False.
num : int, optional
- Number of samples to generate. Default is 50.
+ Number of samples to generate. Default is 50. Must be non-negative.
endpoint : bool, optional
If True, `stop` is the last sample. Otherwise, it is not included.
Default is True.
@@ -82,6 +82,8 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):
"""
num = int(num)
+ if num < 0:
+ raise ValueError("Number of samples, %s, must be non-negative." % num)
div = (num - 1) if endpoint else num
# Convert float/complex array scalars to float, gh-3504
diff --git a/numpy/core/getlimits.py b/numpy/core/getlimits.py
index bd1c4571b..2ea9c0e11 100644
--- a/numpy/core/getlimits.py
+++ b/numpy/core/getlimits.py
@@ -12,7 +12,8 @@ from .numeric import array
def _frz(a):
"""fix rank-0 --> rank-1"""
- if a.ndim == 0: a.shape = (1,)
+ if a.ndim == 0:
+ a.shape = (1,)
return a
_convert_to_float = {
@@ -170,25 +171,25 @@ class finfo(object):
return self
def __str__(self):
- return '''\
-Machine parameters for %(dtype)s
----------------------------------------------------------------------
-precision=%(precision)3s resolution= %(_str_resolution)s
-machep=%(machep)6s eps= %(_str_eps)s
-negep =%(negep)6s epsneg= %(_str_epsneg)s
-minexp=%(minexp)6s tiny= %(_str_tiny)s
-maxexp=%(maxexp)6s max= %(_str_max)s
-nexp =%(nexp)6s min= -max
----------------------------------------------------------------------
-''' % self.__dict__
+ fmt = (
+ 'Machine parameters for %(dtype)s\n'
+ '---------------------------------------------------------------\n'
+ 'precision=%(precision)3s resolution= %(_str_resolution)s\n'
+ 'machep=%(machep)6s eps= %(_str_eps)s\n'
+ 'negep =%(negep)6s epsneg= %(_str_epsneg)s\n'
+ 'minexp=%(minexp)6s tiny= %(_str_tiny)s\n'
+ 'maxexp=%(maxexp)6s max= %(_str_max)s\n'
+ 'nexp =%(nexp)6s min= -max\n'
+ '---------------------------------------------------------------\n'
+ )
+ return fmt % self.__dict__
def __repr__(self):
c = self.__class__.__name__
d = self.__dict__.copy()
d['klass'] = c
- return ("%(klass)s(resolution=%(resolution)s, min=-%(_str_max)s," \
- + " max=%(_str_max)s, dtype=%(dtype)s)") \
- % d
+ return (("%(klass)s(resolution=%(resolution)s, min=-%(_str_max)s,"
+ " max=%(_str_max)s, dtype=%(dtype)s)") % d)
class iinfo(object):
@@ -249,7 +250,7 @@ class iinfo(object):
self.kind = self.dtype.kind
self.bits = self.dtype.itemsize * 8
self.key = "%s%d" % (self.kind, self.bits)
- if not self.kind in 'iu':
+ if self.kind not in 'iu':
raise ValueError("Invalid integer data type.")
def min(self):
@@ -282,13 +283,14 @@ class iinfo(object):
def __str__(self):
"""String representation."""
- return '''\
-Machine parameters for %(dtype)s
----------------------------------------------------------------------
-min = %(min)s
-max = %(max)s
----------------------------------------------------------------------
-''' % {'dtype': self.dtype, 'min': self.min, 'max': self.max}
+ fmt = (
+ 'Machine parameters for %(dtype)s\n'
+ '---------------------------------------------------------------\n'
+ 'min = %(min)s\n'
+ 'max = %(max)s\n'
+ '---------------------------------------------------------------\n'
+ )
+ return fmt % {'dtype': self.dtype, 'min': self.min, 'max': self.max}
def __repr__(self):
return "%s(min=%s, max=%s, dtype=%s)" % (self.__class__.__name__,
diff --git a/numpy/core/machar.py b/numpy/core/machar.py
index 9eb4430a6..6f2735d32 100644
--- a/numpy/core/machar.py
+++ b/numpy/core/machar.py
@@ -94,16 +94,19 @@ class MachAr(object):
Cambridge University Press, 2002, p. 31.
"""
+
def __init__(self, float_conv=float,int_conv=int,
float_to_float=float,
- float_to_str = lambda v:'%24.16e' % v,
- title = 'Python floating point number'):
+ float_to_str=lambda v:'%24.16e' % v,
+ title='Python floating point number'):
"""
- float_conv - convert integer to float (array)
- int_conv - convert float (array) to integer
- float_to_float - convert float array to float
- float_to_str - convert array float to str
- title - description of used floating point numbers
+
+ float_conv - convert integer to float (array)
+ int_conv - convert float (array) to integer
+ float_to_float - convert float array to float
+ float_to_str - convert array float to str
+ title - description of used floating point numbers
+
"""
# We ignore all errors here because we are purposely triggering
# underflow to detect the properties of the runninng arch.
@@ -169,7 +172,7 @@ class MachAr(object):
irnd = 1
tempa = a + beta
temp = tempa + betah
- if irnd==0 and any(temp-tempa != zero):
+ if irnd == 0 and any(temp-tempa != zero):
irnd = 2
# Determine negep and epsneg
@@ -188,7 +191,7 @@ class MachAr(object):
# Prevent infinite loop on PPC with gcc 4.0:
if negep < 0:
raise RuntimeError("could not determine machine tolerance "
- "for 'negep', locals() -> %s" % (locals()))
+ "for 'negep', locals() -> %s" % (locals()))
else:
raise RuntimeError(msg % (_, one.dtype))
negep = -negep
@@ -211,7 +214,7 @@ class MachAr(object):
# Determine ngrd
ngrd = 0
temp = one + eps
- if irnd==0 and any(temp*one - one != zero):
+ if irnd == 0 and any(temp*one - one != zero):
ngrd = 1
# Determine iexp
@@ -223,9 +226,9 @@ class MachAr(object):
for _ in range(max_iterN):
y = z
z = y*y
- a = z*one # Check here for underflow
+ a = z*one # Check here for underflow
temp = z*t
- if any(a+a == zero) or any(abs(z)>=y):
+ if any(a+a == zero) or any(abs(z) >= y):
break
temp1 = temp * betain
if any(temp1*beta == z):
@@ -251,7 +254,7 @@ class MachAr(object):
y = y * betain
a = y * one
temp = y * t
- if any(a+a != zero) and any(abs(y) < xmin):
+ if any((a + a) != zero) and any(abs(y) < xmin):
k = k + 1
temp1 = temp * betain
if any(temp1*beta == y) and any(temp != y):
@@ -285,7 +288,7 @@ class MachAr(object):
xmax = xmax / (xmin*beta*beta*beta)
i = maxexp + minexp + 3
for j in range(i):
- if ibeta==2:
+ if ibeta == 2:
xmax = xmax + xmax
else:
xmax = xmax * beta
@@ -322,16 +325,17 @@ class MachAr(object):
self._str_resolution = float_to_str(resolution)
def __str__(self):
- return '''\
-Machine parameters for %(title)s
----------------------------------------------------------------------
-ibeta=%(ibeta)s it=%(it)s iexp=%(iexp)s ngrd=%(ngrd)s irnd=%(irnd)s
-machep=%(machep)s eps=%(_str_eps)s (beta**machep == epsilon)
-negep =%(negep)s epsneg=%(_str_epsneg)s (beta**epsneg)
-minexp=%(minexp)s xmin=%(_str_xmin)s (beta**minexp == tiny)
-maxexp=%(maxexp)s xmax=%(_str_xmax)s ((1-epsneg)*beta**maxexp == huge)
----------------------------------------------------------------------
-''' % self.__dict__
+ fmt = (
+ 'Machine parameters for %(title)s\n'
+ '---------------------------------------------------------------------\n'
+ 'ibeta=%(ibeta)s it=%(it)s iexp=%(iexp)s ngrd=%(ngrd)s irnd=%(irnd)s\n'
+ 'machep=%(machep)s eps=%(_str_eps)s (beta**machep == epsilon)\n'
+ 'negep =%(negep)s epsneg=%(_str_epsneg)s (beta**epsneg)\n'
+ 'minexp=%(minexp)s xmin=%(_str_xmin)s (beta**minexp == tiny)\n'
+ 'maxexp=%(maxexp)s xmax=%(_str_xmax)s ((1-epsneg)*beta**maxexp == huge)\n'
+ '---------------------------------------------------------------------\n'
+ )
+ return fmt % self.__dict__
if __name__ == '__main__':
diff --git a/numpy/core/memmap.py b/numpy/core/memmap.py
index 6397e8939..70d7b72b4 100644
--- a/numpy/core/memmap.py
+++ b/numpy/core/memmap.py
@@ -1,14 +1,11 @@
from __future__ import division, absolute_import, print_function
-__all__ = ['memmap']
-
-import warnings
-import sys
-
import numpy as np
from .numeric import uint8, ndarray, dtype
from numpy.compat import long, basestring
+__all__ = ['memmap']
+
dtypedescr = dtype
valid_filemodes = ["r", "c", "r+", "w+"]
writeable_filemodes = ["r+", "w+"]
@@ -200,6 +197,7 @@ class memmap(ndarray):
"""
__array_priority__ = -100.0
+
def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0,
shape=None, order='C'):
# Import here to minimize 'import numpy' overhead
diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py
index f7b0f49fa..fd53b5c72 100644
--- a/numpy/core/numeric.py
+++ b/numpy/core/numeric.py
@@ -1,6 +1,5 @@
from __future__ import division, absolute_import, print_function
-import os
import sys
import warnings
import collections
@@ -21,30 +20,28 @@ else:
loads = pickle.loads
-__all__ = ['newaxis', 'ndarray', 'flatiter', 'nditer', 'nested_iters', 'ufunc',
- 'arange', 'array', 'zeros', 'count_nonzero',
- 'empty', 'broadcast', 'dtype', 'fromstring', 'fromfile',
- 'frombuffer', 'int_asbuffer', 'where', 'argwhere', 'copyto',
- 'concatenate', 'fastCopyAndTranspose', 'lexsort', 'set_numeric_ops',
- 'can_cast', 'promote_types', 'min_scalar_type', 'result_type',
- 'asarray', 'asanyarray', 'ascontiguousarray', 'asfortranarray',
- 'isfortran', 'empty_like', 'zeros_like', 'ones_like',
- 'correlate', 'convolve', 'inner', 'dot', 'einsum', 'outer', 'vdot',
- 'alterdot', 'restoredot', 'roll', 'rollaxis', 'cross', 'tensordot',
- 'array2string', 'get_printoptions', 'set_printoptions',
- 'array_repr', 'array_str', 'set_string_function',
- 'little_endian', 'require',
- 'fromiter', 'array_equal', 'array_equiv',
- 'indices', 'fromfunction', 'isclose',
- 'load', 'loads', 'isscalar', 'binary_repr', 'base_repr',
- 'ones', 'identity', 'allclose', 'compare_chararrays', 'putmask',
- 'seterr', 'geterr', 'setbufsize', 'getbufsize',
- 'seterrcall', 'geterrcall', 'errstate', 'flatnonzero',
- 'Inf', 'inf', 'infty', 'Infinity',
- 'nan', 'NaN', 'False_', 'True_', 'bitwise_not',
- 'CLIP', 'RAISE', 'WRAP', 'MAXDIMS', 'BUFSIZE', 'ALLOW_THREADS',
- 'ComplexWarning', 'may_share_memory', 'full', 'full_like',
- 'matmul']
+__all__ = [
+ 'newaxis', 'ndarray', 'flatiter', 'nditer', 'nested_iters', 'ufunc',
+ 'arange', 'array', 'zeros', 'count_nonzero', 'empty', 'broadcast',
+ 'dtype', 'fromstring', 'fromfile', 'frombuffer', 'int_asbuffer',
+ 'where', 'argwhere', 'copyto', 'concatenate', 'fastCopyAndTranspose',
+ 'lexsort', 'set_numeric_ops', 'can_cast', 'promote_types',
+ 'min_scalar_type', 'result_type', 'asarray', 'asanyarray',
+ 'ascontiguousarray', 'asfortranarray', 'isfortran', 'empty_like',
+ 'zeros_like', 'ones_like', 'correlate', 'convolve', 'inner', 'dot',
+ 'einsum', 'outer', 'vdot', 'alterdot', 'restoredot', 'roll',
+ 'rollaxis', 'cross', 'tensordot', 'array2string', 'get_printoptions',
+ 'set_printoptions', 'array_repr', 'array_str', 'set_string_function',
+ 'little_endian', 'require', 'fromiter', 'array_equal', 'array_equiv',
+ 'indices', 'fromfunction', 'isclose', 'load', 'loads', 'isscalar',
+ 'binary_repr', 'base_repr', 'ones', 'identity', 'allclose',
+ 'compare_chararrays', 'putmask', 'seterr', 'geterr', 'setbufsize',
+ 'getbufsize', 'seterrcall', 'geterrcall', 'errstate', 'flatnonzero',
+ 'Inf', 'inf', 'infty', 'Infinity', 'nan', 'NaN', 'False_', 'True_',
+ 'bitwise_not', 'CLIP', 'RAISE', 'WRAP', 'MAXDIMS', 'BUFSIZE',
+ 'ALLOW_THREADS', 'ComplexWarning', 'may_share_memory', 'full',
+ 'full_like', 'matmul',
+ ]
if sys.version_info[0] < 3:
__all__.extend(['getbuffer', 'newbuffer'])
@@ -826,8 +823,8 @@ def flatnonzero(a):
return a.ravel().nonzero()[0]
_mode_from_name_dict = {'v': 0,
- 's' : 1,
- 'f' : 2}
+ 's': 1,
+ 'f': 2}
def _mode_from_name(mode):
if isinstance(mode, basestring):
@@ -991,9 +988,9 @@ def convolve(a,v,mode='full'):
a, v = array(a, copy=False, ndmin=1), array(v, copy=False, ndmin=1)
if (len(v) > len(a)):
a, v = v, a
- if len(a) == 0 :
+ if len(a) == 0:
raise ValueError('a cannot be empty')
- if len(v) == 0 :
+ if len(v) == 0:
raise ValueError('v cannot be empty')
mode = _mode_from_name(mode)
return multiarray.correlate(a, v[::-1], mode)
@@ -1281,7 +1278,8 @@ def tensordot(a, b, axes=2):
bs = b.shape
ndb = len(b.shape)
equal = True
- if (na != nb): equal = False
+ if na != nb:
+ equal = False
else:
for k in range(na):
if as_[axes_a[k]] != bs[axes_b[k]]:
@@ -1701,14 +1699,14 @@ def array_repr(arr, max_line_width=None, precision=None, suppress_small=None):
'array([ 0.000001, 0. , 2. , 3. ])'
"""
- if arr.size > 0 or arr.shape==(0,):
+ if arr.size > 0 or arr.shape == (0,):
lst = array2string(arr, max_line_width, precision, suppress_small,
', ', "array(")
- else: # show zero-length shape unless it is (0,)
+ else: # show zero-length shape unless it is (0,)
lst = "[], shape=%s" % (repr(arr.shape),)
if arr.__class__ is not ndarray:
- cName= arr.__class__.__name__
+ cName = arr.__class__.__name__
else:
cName = "array"
@@ -1900,7 +1898,7 @@ def indices(dimensions, dtype=int):
for i, dim in enumerate(dimensions):
tmp = arange(dim, dtype=dtype)
tmp.shape = (1,)*i + (dim,)+(1,)*(N-i-1)
- newdim = dimensions[:i] + (1,)+ dimensions[i+1:]
+ newdim = dimensions[:i] + (1,) + dimensions[i+1:]
val = zeros(newdim, dtype)
add(tmp, val, res[i])
return res
@@ -2548,13 +2546,17 @@ def seterr(all=None, divide=None, over=None, under=None, invalid=None):
pyvals = umath.geterrobj()
old = geterr()
- if divide is None: divide = all or old['divide']
- if over is None: over = all or old['over']
- if under is None: under = all or old['under']
- if invalid is None: invalid = all or old['invalid']
+ if divide is None:
+ divide = all or old['divide']
+ if over is None:
+ over = all or old['over']
+ if under is None:
+ under = all or old['under']
+ if invalid is None:
+ invalid = all or old['invalid']
maskvalue = ((_errdict[divide] << SHIFT_DIVIDEBYZERO) +
- (_errdict[over] << SHIFT_OVERFLOW ) +
+ (_errdict[over] << SHIFT_OVERFLOW) +
(_errdict[under] << SHIFT_UNDERFLOW) +
(_errdict[invalid] << SHIFT_INVALID))
@@ -2626,9 +2628,9 @@ def setbufsize(size):
if size > 10e6:
raise ValueError("Buffer size, %s, is too big." % size)
if size < 5:
- raise ValueError("Buffer size, %s, is too small." %size)
+ raise ValueError("Buffer size, %s, is too small." % size)
if size % 16 != 0:
- raise ValueError("Buffer size, %s, is not a multiple of 16." %size)
+ raise ValueError("Buffer size, %s, is not a multiple of 16." % size)
pyvals = umath.geterrobj()
old = getbufsize()
@@ -2849,6 +2851,7 @@ class errstate(object):
"""
# Note that we don't want to run the above doctests because they will fail
# without a from __future__ import with_statement
+
def __init__(self, **kwargs):
self.call = kwargs.pop('call', _Unspecified)
self.kwargs = kwargs
diff --git a/numpy/core/numerictypes.py b/numpy/core/numerictypes.py
index 0c03cce89..7dc6e0bd8 100644
--- a/numpy/core/numerictypes.py
+++ b/numpy/core/numerictypes.py
@@ -82,6 +82,18 @@ Exported symbols include:
"""
from __future__ import division, absolute_import, print_function
+import types as _types
+import sys
+import numbers
+
+from numpy.compat import bytes, long
+from numpy.core.multiarray import (
+ typeinfo, ndarray, array, empty, dtype, datetime_data,
+ datetime_as_string, busday_offset, busday_count, is_busday,
+ busdaycalendar
+ )
+
+
# we add more at the bottom
__all__ = ['sctypeDict', 'sctypeNA', 'typeDict', 'typeNA', 'sctypes',
'ScalarType', 'obj2sctype', 'cast', 'nbytes', 'sctype2char',
@@ -90,15 +102,6 @@ __all__ = ['sctypeDict', 'sctypeNA', 'typeDict', 'typeNA', 'sctypes',
'busday_offset', 'busday_count', 'is_busday', 'busdaycalendar',
]
-from numpy.core.multiarray import (
- typeinfo, ndarray, array, empty, dtype, datetime_data,
- datetime_as_string, busday_offset, busday_count, is_busday,
- busdaycalendar
- )
-import types as _types
-import sys
-from numpy.compat import bytes, long
-import numbers
# we don't export these for import *, but we do want them accessible
# as numerictypes.bool, etc.
@@ -117,16 +120,9 @@ else:
_all_chars = [chr(_m) for _m in range(256)]
_ascii_upper = _all_chars[65:65+26]
_ascii_lower = _all_chars[97:97+26]
-LOWER_TABLE="".join(_all_chars[:65] + _ascii_lower + _all_chars[65+26:])
-UPPER_TABLE="".join(_all_chars[:97] + _ascii_upper + _all_chars[97+26:])
+LOWER_TABLE = "".join(_all_chars[:65] + _ascii_lower + _all_chars[65+26:])
+UPPER_TABLE = "".join(_all_chars[:97] + _ascii_upper + _all_chars[97+26:])
-#import string
-# assert (string.maketrans(string.ascii_uppercase, string.ascii_lowercase) == \
-# LOWER_TABLE)
-# assert (string.maketrnas(string_ascii_lowercase, string.ascii_uppercase) == \
-# UPPER_TABLE)
-#LOWER_TABLE = string.maketrans(string.ascii_uppercase, string.ascii_lowercase)
-#UPPER_TABLE = string.maketrans(string.ascii_lowercase, string.ascii_uppercase)
def english_lower(s):
""" Apply English case rules to convert ASCII strings to all lower case.
@@ -251,30 +247,30 @@ def bitname(obj):
if name == 'bool_':
char = 'b'
base = 'bool'
- elif name=='void':
+ elif name == 'void':
char = 'V'
base = 'void'
- elif name=='object_':
+ elif name == 'object_':
char = 'O'
base = 'object'
bits = 0
- elif name=='datetime64':
+ elif name == 'datetime64':
char = 'M'
- elif name=='timedelta64':
+ elif name == 'timedelta64':
char = 'm'
if sys.version_info[0] >= 3:
- if name=='bytes_':
+ if name == 'bytes_':
char = 'S'
base = 'bytes'
- elif name=='str_':
+ elif name == 'str_':
char = 'U'
base = 'str'
else:
- if name=='string_':
+ if name == 'string_':
char = 'S'
base = 'string'
- elif name=='unicode_':
+ elif name == 'unicode_':
char = 'U'
base = 'unicode'
@@ -310,11 +306,12 @@ def _add_aliases():
typeobj = typeinfo[a][-1]
# insert bit-width version for this class (if relevant)
base, bit, char = bitname(typeobj)
- if base[-3:] == 'int' or char[0] in 'ui': continue
+ if base[-3:] == 'int' or char[0] in 'ui':
+ continue
if base != '':
myname = "%s%d" % (base, bit)
- if (name != 'longdouble' and name != 'clongdouble') or \
- myname not in allTypes.keys():
+ if ((name != 'longdouble' and name != 'clongdouble') or
+ myname not in allTypes.keys()):
allTypes[myname] = typeobj
sctypeDict[myname] = typeobj
if base == 'complex':
@@ -334,15 +331,10 @@ def _add_aliases():
sctypeNA[char] = na_name
_add_aliases()
-# Integers handled so that
-# The int32, int64 types should agree exactly with
-# PyArray_INT32, PyArray_INT64 in C
-# We need to enforce the same checking as is done
-# in arrayobject.h where the order of getting a
-# bit-width match is:
-# long, longlong, int, short, char
-# for int8, int16, int32, int64, int128
-
+# Integers are handled so that the int32 and int64 types should agree
+# exactly with NPY_INT32, NPY_INT64. We need to enforce the same checking
+# as is done in arrayobject.h where the order of getting a bit-width match
+# is long, longlong, int, short, char.
def _add_integer_aliases():
_ctypes = ['LONG', 'LONGLONG', 'INT', 'SHORT', 'BYTE']
for ctype in _ctypes:
@@ -549,7 +541,7 @@ _python_types = {int: 'int_',
bytes: 'bytes_',
unicode: 'unicode_',
buffer_type: 'void',
- }
+ }
if sys.version_info[0] >= 3:
def _python_type(t):
@@ -778,6 +770,7 @@ class _typedict(dict):
first they have to be populated.
"""
+
def __getitem__(self, obj):
return dict.__getitem__(self, obj2sctype(obj))
@@ -864,7 +857,7 @@ except AttributeError:
ScalarType.extend(_sctype2char_dict.keys())
ScalarType = tuple(ScalarType)
for key in _sctype2char_dict.keys():
- cast[key] = lambda x, k=key : array(x, copy=False).astype(k)
+ cast[key] = lambda x, k=key: array(x, copy=False).astype(k)
# Create the typestring lookup dictionary
_typestr = _typedict()
@@ -965,6 +958,7 @@ def _register_types():
numbers.Integral.register(integer)
numbers.Complex.register(inexact)
numbers.Real.register(floating)
+
_register_types()
def find_common_type(array_types, scalar_types):
diff --git a/numpy/core/records.py b/numpy/core/records.py
index 9c6d8347a..4a995533a 100644
--- a/numpy/core/records.py
+++ b/numpy/core/records.py
@@ -139,6 +139,7 @@ class format_parser:
dtype([('f0', '<f8'), ('f1', '<i4'), ('f2', '|S5')])
"""
+
def __init__(self, formats, names, titles, aligned=False, byteorder=None):
self._parseFormats(formats, aligned)
self._setfieldnames(names, titles)
@@ -434,7 +435,7 @@ class recarray(ndarray):
# accessed by attribute).
try:
return object.__getattribute__(self, attr)
- except AttributeError: # attr must be a fieldname
+ except AttributeError: # attr must be a fieldname
pass
# look for a field with this name
@@ -478,9 +479,10 @@ class recarray(ndarray):
fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
if attr not in fielddict:
return ret
- if newattr: # We just added this one
- try: # or this setattr worked on an internal
- # attribute.
+ if newattr:
+ # We just added this one or this setattr worked on an
+ # internal attribute.
+ try:
object.__delattr__(self, attr)
except:
return ret
@@ -506,14 +508,14 @@ class recarray(ndarray):
def __repr__(self):
# get data/shape string. logic taken from numeric.array_repr
- if self.size > 0 or self.shape==(0,):
+ if self.size > 0 or self.shape == (0,):
lst = sb.array2string(self, separator=', ')
else:
# show zero-length shape unless it is (0,)
lst = "[], shape=%s" % (repr(self.shape),)
if (self.dtype.type is record
- or (not issubclass(self.dtype.type, nt.void)) ):
+ or (not issubclass(self.dtype.type, nt.void))):
# If this is a full record array (has numpy.record dtype),
# or if it has a scalar (non-void) dtype with no records,
# represent it using the rec.array function. Since rec.array
@@ -683,7 +685,6 @@ def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None,
""" create a (read-only) record array from binary data contained in
a string"""
-
if dtype is None and formats is None:
raise ValueError("Must have dtype= or formats=")
@@ -756,7 +757,7 @@ def fromfile(fd, dtype=None, shape=None, offset=0, formats=None,
shapesize = shapeprod * itemsize
if shapesize < 0:
shape = list(shape)
- shape[ shape.index(-1) ] = size / -shapesize
+ shape[shape.index(-1)] = size / -shapesize
shape = tuple(shape)
shapeprod = sb.array(shape).prod()
@@ -781,10 +782,9 @@ def array(obj, dtype=None, shape=None, offset=0, strides=None, formats=None,
"""Construct a record array from a wide-variety of objects.
"""
- if (isinstance(obj, (type(None), str)) or isfileobj(obj)) \
- and (formats is None) \
- and (dtype is None):
- raise ValueError("Must define formats (or dtype) if object is "\
+ if ((isinstance(obj, (type(None), str)) or isfileobj(obj)) and
+ (formats is None) and (dtype is None)):
+ raise ValueError("Must define formats (or dtype) if object is "
"None, string, or an open file")
kwds = {}
@@ -795,10 +795,10 @@ def array(obj, dtype=None, shape=None, offset=0, strides=None, formats=None,
aligned, byteorder)._descr
else:
kwds = {'formats': formats,
- 'names' : names,
- 'titles' : titles,
- 'aligned' : aligned,
- 'byteorder' : byteorder
+ 'names': names,
+ 'titles': titles,
+ 'aligned': aligned,
+ 'byteorder': byteorder
}
if obj is None:
diff --git a/numpy/core/setup.py b/numpy/core/setup.py
index 7a82f1e35..9221bd2c4 100644
--- a/numpy/core/setup.py
+++ b/numpy/core/setup.py
@@ -3,11 +3,9 @@ from __future__ import division, print_function
import imp
import os
import sys
-import shutil
import pickle
import copy
import warnings
-import re
from os.path import join
from numpy.distutils import log
from distutils.dep_util import newer
@@ -247,7 +245,7 @@ def check_ieee_macros(config):
# Note: including Python.h is necessary because it modifies some math.h
# definitions
for f in macros:
- st = config.check_decl(f, headers = ["Python.h", "math.h"])
+ st = config.check_decl(f, headers=["Python.h", "math.h"])
if st:
_add_decl(f)
@@ -276,7 +274,7 @@ def check_types(config_cmd, ext, build_dir):
result = config_cmd.check_header('Python.h')
if not result:
raise SystemError(
- "Cannot compile 'Python.h'. Perhaps you need to "\
+ "Cannot compile 'Python.h'. Perhaps you need to "
"install python-dev|python-devel.")
res = config_cmd.check_header("endian.h")
if res:
@@ -285,7 +283,7 @@ def check_types(config_cmd, ext, build_dir):
# Check basic types sizes
for type in ('short', 'int', 'long'):
- res = config_cmd.check_decl("SIZEOF_%s" % sym2def(type), headers = ["Python.h"])
+ res = config_cmd.check_decl("SIZEOF_%s" % sym2def(type), headers=["Python.h"])
if res:
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), "SIZEOF_%s" % sym2def(type)))
else:
@@ -297,7 +295,7 @@ def check_types(config_cmd, ext, build_dir):
for type in ('float', 'double', 'long double'):
already_declared = config_cmd.check_decl("SIZEOF_%s" % sym2def(type),
- headers = ["Python.h"])
+ headers=["Python.h"])
res = config_cmd.check_type_size(type, expected=expected[type])
if res >= 0:
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
@@ -317,7 +315,6 @@ def check_types(config_cmd, ext, build_dir):
else:
raise SystemError("Checking sizeof (%s) failed !" % complex_def)
-
for type in ('Py_intptr_t', 'off_t'):
res = config_cmd.check_type_size(type, headers=["Python.h"],
library_dirs=[pythonlib_dir()],
@@ -350,7 +347,7 @@ def check_types(config_cmd, ext, build_dir):
if not config_cmd.check_decl('CHAR_BIT', headers=['Python.h']):
raise RuntimeError(
- "Config wo CHAR_BIT is not supported"\
+ "Config wo CHAR_BIT is not supported"
", please contact the maintainers")
return private_defines, public_defines
@@ -382,7 +379,7 @@ def visibility_define(config):
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration, dot_join
- from numpy.distutils.system_info import get_info, default_lib_dirs
+ from numpy.distutils.system_info import get_info
config = Configuration('core', parent_package, top_path)
local_dir = config.local_path
@@ -401,7 +398,7 @@ def configuration(parent_package='',top_path=None):
open(generate_umath_py, 'U'), generate_umath_py,
('.py', 'U', 1))
- header_dir = 'include/numpy' # this is relative to config.path_in_package
+ header_dir = 'include/numpy' # this is relative to config.path_in_package
cocache = CallOnceOnly()
@@ -431,7 +428,7 @@ def configuration(parent_package='',top_path=None):
moredefs.append('__NPY_PRIVATE_NO_SIGNAL')
# Windows checks
- if sys.platform=='win32' or os.name=='nt':
+ if sys.platform == 'win32' or os.name == 'nt':
win32_checks(moredefs)
# C99 restrict keyword
@@ -558,7 +555,7 @@ def configuration(parent_package='',top_path=None):
moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))
# Check wether we can use inttypes (C99) formats
- if config_cmd.check_decl('PRIdPTR', headers = ['inttypes.h']):
+ if config_cmd.check_decl('PRIdPTR', headers=['inttypes.h']):
moredefs.append(('NPY_USE_C99_FORMATS', 1))
# visibility check
@@ -645,7 +642,7 @@ def configuration(parent_package='',top_path=None):
# generate_numpyconfig_h as sources *before* adding npymath.
config.add_extension('_dummy',
- sources = [join('src', 'dummymodule.c'),
+ sources=[join('src', 'dummymodule.c'),
generate_config_h,
generate_numpyconfig_h,
generate_numpy_api]
@@ -656,6 +653,7 @@ def configuration(parent_package='',top_path=None):
#######################################################################
subst_dict = dict([("sep", os.path.sep), ("pkgname", "numpy.core")])
+
def get_mathlib_info(*args):
# Another ugly hack: the mathlib info is known once build_src is run,
# but we cannot use add_installed_pkg_config here either, so we only
@@ -678,7 +676,8 @@ def configuration(parent_package='',top_path=None):
npymath_sources = [join('src', 'npymath', 'npy_math.c.src'),
join('src', 'npymath', 'ieee754.c.src'),
join('src', 'npymath', 'npy_math_complex.c.src'),
- join('src', 'npymath', 'halffloat.c')]
+ join('src', 'npymath', 'halffloat.c')
+ ]
config.add_installed_library('npymath',
sources=npymath_sources + [get_mathlib_info],
install_dir='lib')
@@ -692,19 +691,18 @@ def configuration(parent_package='',top_path=None):
#######################################################################
# This library is created for the build but it is not installed
- npysort_sources=[join('src', 'npysort', 'quicksort.c.src'),
- join('src', 'npysort', 'mergesort.c.src'),
- join('src', 'npysort', 'heapsort.c.src'),
- join('src', 'private', 'npy_partition.h.src'),
- join('src', 'npysort', 'selection.c.src'),
- join('src', 'private', 'npy_binsearch.h.src'),
- join('src', 'npysort', 'binsearch.c.src'),
- ]
+ npysort_sources = [join('src', 'npysort', 'quicksort.c.src'),
+ join('src', 'npysort', 'mergesort.c.src'),
+ join('src', 'npysort', 'heapsort.c.src'),
+ join('src', 'private', 'npy_partition.h.src'),
+ join('src', 'npysort', 'selection.c.src'),
+ join('src', 'private', 'npy_binsearch.h.src'),
+ join('src', 'npysort', 'binsearch.c.src'),
+ ]
config.add_library('npysort',
sources=npysort_sources,
include_dirs=[])
-
#######################################################################
# multiarray module #
#######################################################################
@@ -721,7 +719,8 @@ def configuration(parent_package='',top_path=None):
join(local_dir, subpath, 'nditer_templ.c.src'),
join(local_dir, subpath, 'lowlevel_strided_loops.c.src'),
join(local_dir, subpath, 'einsum.c.src'),
- join(local_dir, 'src', 'private', 'templ_common.h.src')]
+ join(local_dir, 'src', 'private', 'templ_common.h.src')
+ ]
# numpy.distutils generate .c from .c.src in weird directories, we have
# to add them there as they depend on the build_dir
@@ -834,11 +833,11 @@ def configuration(parent_package='',top_path=None):
]
blas_info = get_info('blas_opt', 0)
- if blas_info and ('HAVE_CBLAS', None) in blas_info.get('define_macros', []):
+ if blas_info and ('HAVE_CBLAS', None) in blas_info.get('define_macros', []):
extra_info = blas_info
multiarray_src.extend([join('src', 'multiarray', 'cblasfuncs.c'),
join('src', 'multiarray', 'python_xerbla.c'),
- ])
+ ])
if uses_accelerate_framework(blas_info):
multiarray_src.extend(get_sgemv_fix())
else:
@@ -849,7 +848,6 @@ def configuration(parent_package='',top_path=None):
multiarray_src = [join('src', 'multiarray', 'multiarraymodule_onefile.c')]
multiarray_src.append(generate_multiarray_templated_sources)
-
config.add_extension('multiarray',
sources=multiarray_src +
[generate_config_h,
@@ -885,7 +883,6 @@ def configuration(parent_package='',top_path=None):
cmd.ensure_finalized()
cmd.template_sources(sources, ext)
-
def generate_umath_c(ext, build_dir):
target = join(build_dir, header_dir, '__umath_generated.c')
dir = os.path.dirname(target)
@@ -926,50 +923,49 @@ def configuration(parent_package='',top_path=None):
umath_src.append(join('src', 'umath', 'simd.inc.src'))
config.add_extension('umath',
- sources = umath_src +
+ sources=umath_src +
[generate_config_h,
generate_numpyconfig_h,
generate_umath_c,
generate_ufunc_api],
- depends = deps + umath_deps,
- libraries = ['npymath'],
+ depends=deps + umath_deps,
+ libraries=['npymath'],
)
-
#######################################################################
# umath_tests module #
#######################################################################
config.add_extension('umath_tests',
- sources = [join('src', 'umath', 'umath_tests.c.src')])
+ sources=[join('src', 'umath', 'umath_tests.c.src')])
#######################################################################
# custom rational dtype module #
#######################################################################
config.add_extension('test_rational',
- sources = [join('src', 'umath', 'test_rational.c.src')])
+ sources=[join('src', 'umath', 'test_rational.c.src')])
#######################################################################
# struct_ufunc_test module #
#######################################################################
config.add_extension('struct_ufunc_test',
- sources = [join('src', 'umath', 'struct_ufunc_test.c.src')])
+ sources=[join('src', 'umath', 'struct_ufunc_test.c.src')])
#######################################################################
# multiarray_tests module #
#######################################################################
config.add_extension('multiarray_tests',
- sources = [join('src', 'multiarray', 'multiarray_tests.c.src')])
+ sources=[join('src', 'multiarray', 'multiarray_tests.c.src')])
#######################################################################
# operand_flag_tests module #
#######################################################################
config.add_extension('operand_flag_tests',
- sources = [join('src', 'umath', 'operand_flag_tests.c.src')])
+ sources=[join('src', 'umath', 'operand_flag_tests.c.src')])
config.add_data_dir('tests')
config.add_data_dir('tests/data')
@@ -978,6 +974,6 @@ def configuration(parent_package='',top_path=None):
return config
-if __name__=='__main__':
+if __name__ == '__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
diff --git a/numpy/core/setup_common.py b/numpy/core/setup_common.py
index 09e6b1595..c923b6d91 100644
--- a/numpy/core/setup_common.py
+++ b/numpy/core/setup_common.py
@@ -2,12 +2,10 @@ from __future__ import division, absolute_import, print_function
# Code common to build tools
import sys
-from os.path import join
import warnings
import copy
import binascii
-from distutils.ccompiler import CompileError
#-------------------
# Versioning support
@@ -54,11 +52,13 @@ def is_released(config):
return True
def get_api_versions(apiversion, codegen_dir):
- """Return current C API checksum and the recorded checksum for the given
- version of the C API version."""
- api_files = [join(codegen_dir, 'numpy_api_order.txt'),
- join(codegen_dir, 'ufunc_api_order.txt')]
+ """
+ Return current C API checksum and the recorded checksum.
+
+ Return current C API checksum and the recorded checksum for the given
+ version of the C API version.
+ """
# Compute the hash of the current API as defined in the .txt files in
# code_generators
sys.path.insert(0, codegen_dir)
@@ -83,11 +83,12 @@ def check_api_version(apiversion, codegen_dir):
# To compute the checksum of the current API, use
# code_generators/cversions.py script
if not curapi_hash == api_hash:
- msg = "API mismatch detected, the C API version " \
- "numbers have to be updated. Current C api version is %d, " \
- "with checksum %s, but recorded checksum for C API version %d in " \
- "codegen_dir/cversions.txt is %s. If functions were added in the " \
- "C API, you have to update C_API_VERSION in %s."
+ msg = ("API mismatch detected, the C API version "
+ "numbers have to be updated. Current C api version is %d, "
+ "with checksum %s, but recorded checksum for C API version %d in "
+ "codegen_dir/cversions.txt is %s. If functions were added in the "
+ "C API, you have to update C_API_VERSION in %s."
+ )
warnings.warn(msg % (apiversion, curapi_hash, apiversion, api_hash,
__file__),
MismatchCAPIWarning)
@@ -106,8 +107,8 @@ OPTIONAL_STDFUNCS = ["expm1", "log1p", "acosh", "asinh", "atanh",
OPTIONAL_HEADERS = [
# sse headers only enabled automatically on amd64/x32 builds
- "xmmintrin.h", # SSE
- "emmintrin.h", # SSE2
+ "xmmintrin.h", # SSE
+ "emmintrin.h", # SSE2
"features.h", # for glibc version linux
]
@@ -121,8 +122,8 @@ OPTIONAL_INTRINSICS = [("__builtin_isnan", '5.'),
("__builtin_bswap64", '5u'),
("__builtin_expect", '5, 0'),
("__builtin_mul_overflow", '5, 5, (int*)5'),
- ("_mm_load_ps", '(float*)0', "xmmintrin.h"), # SSE
- ("_mm_load_pd", '(double*)0', "emmintrin.h"), # SSE2
+ ("_mm_load_ps", '(float*)0', "xmmintrin.h"), # SSE
+ ("_mm_load_pd", '(double*)0', "emmintrin.h"), # SSE2
]
# function attributes
@@ -134,7 +135,7 @@ OPTIONAL_FUNCTION_ATTRIBUTES = [('__attribute__((optimize("unroll-loops")))',
'attribute_optimize_opt_3'),
('__attribute__((nonnull (1)))',
'attribute_nonnull'),
- ]
+ ]
# variable attributes tested via "int %s a" % attribute
OPTIONAL_VARIABLE_ATTRIBUTES = ["__thread", "__declspec(thread)"]
@@ -282,10 +283,10 @@ _MOTOROLA_EXTENDED_12B = ['300', '031', '000', '000', '353', '171',
_IEEE_QUAD_PREC_BE = ['300', '031', '326', '363', '105', '100', '000', '000',
'000', '000', '000', '000', '000', '000', '000', '000']
_IEEE_QUAD_PREC_LE = _IEEE_QUAD_PREC_BE[::-1]
-_DOUBLE_DOUBLE_BE = ['301', '235', '157', '064', '124', '000', '000', '000'] + \
- ['000'] * 8
-_DOUBLE_DOUBLE_LE = ['000', '000', '000', '124', '064', '157', '235', '301'] + \
- ['000'] * 8
+_DOUBLE_DOUBLE_BE = (['301', '235', '157', '064', '124', '000', '000', '000'] +
+ ['000'] * 8)
+_DOUBLE_DOUBLE_LE = (['000', '000', '000', '124', '064', '157', '235', '301'] +
+ ['000'] * 8)
def long_double_representation(lines):
"""Given a binary dump as given by GNU od -b, look for long double
diff --git a/numpy/core/shape_base.py b/numpy/core/shape_base.py
index 3259f3b1d..0dd2e164a 100644
--- a/numpy/core/shape_base.py
+++ b/numpy/core/shape_base.py
@@ -4,7 +4,7 @@ __all__ = ['atleast_1d', 'atleast_2d', 'atleast_3d', 'vstack', 'hstack',
'stack']
from . import numeric as _nx
-from .numeric import array, asanyarray, newaxis
+from .numeric import asanyarray, newaxis
def atleast_1d(*arys):
"""
@@ -48,9 +48,9 @@ def atleast_1d(*arys):
res = []
for ary in arys:
ary = asanyarray(ary)
- if len(ary.shape) == 0 :
+ if len(ary.shape) == 0:
result = ary.reshape(1)
- else :
+ else:
result = ary
res.append(result)
if len(res) == 1:
@@ -98,11 +98,11 @@ def atleast_2d(*arys):
res = []
for ary in arys:
ary = asanyarray(ary)
- if len(ary.shape) == 0 :
+ if len(ary.shape) == 0:
result = ary.reshape(1, 1)
- elif len(ary.shape) == 1 :
+ elif len(ary.shape) == 1:
result = ary[newaxis,:]
- else :
+ else:
result = ary
res.append(result)
if len(res) == 1:
diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src
index bffcc26a6..ce7b61287 100644
--- a/numpy/core/src/multiarray/arraytypes.c.src
+++ b/numpy/core/src/multiarray/arraytypes.c.src
@@ -2987,18 +2987,16 @@ BOOL_argmin(npy_bool *ip, npy_intp n, npy_intp *min_ind,
* #fname = BYTE, UBYTE, SHORT, USHORT, INT, UINT,
* LONG, ULONG, LONGLONG, ULONGLONG,
* HALF, FLOAT, DOUBLE, LONGDOUBLE,
- * CFLOAT, CDOUBLE, CLONGDOUBLE,
- * DATETIME, TIMEDELTA#
+ * CFLOAT, CDOUBLE, CLONGDOUBLE#
* #type = npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint,
* npy_long, npy_ulong, npy_longlong, npy_ulonglong,
* npy_half, npy_float, npy_double, npy_longdouble,
- * npy_float, npy_double, npy_longdouble,
- * npy_datetime, npy_timedelta#
- * #isfloat = 0*10, 1*7, 0*2#
- * #isnan = nop*10, npy_half_isnan, npy_isnan*6, nop*2#
- * #le = _LESS_THAN_OR_EQUAL*10, npy_half_le, _LESS_THAN_OR_EQUAL*8#
- * #iscomplex = 0*14, 1*3, 0*2#
- * #incr = ip++*14, ip+=2*3, ip++*2#
+ * npy_float, npy_double, npy_longdouble#
+ * #isfloat = 0*10, 1*7#
+ * #isnan = nop*10, npy_half_isnan, npy_isnan*6#
+ * #le = _LESS_THAN_OR_EQUAL*10, npy_half_le, _LESS_THAN_OR_EQUAL*6#
+ * #iscomplex = 0*14, 1*3#
+ * #incr = ip++*14, ip+=2*3#
*/
static int
@fname@_argmin(@type@ *ip, npy_intp n, npy_intp *min_ind,
@@ -3062,6 +3060,43 @@ static int
#undef _LESS_THAN_OR_EQUAL
+/**begin repeat
+ *
+ * #fname = DATETIME, TIMEDELTA#
+ * #type = npy_datetime, npy_timedelta#
+ */
+static int
+@fname@_argmin(@type@ *ip, npy_intp n, npy_intp *min_ind,
+ PyArrayObject *NPY_UNUSED(aip))
+{
+ /* NPY_DATETIME_NAT is smaller than every other value, we skip
+ * it for consistency with min().
+ */
+ npy_intp i;
+ @type@ mp = NPY_DATETIME_NAT;
+
+ i = 0;
+ while (i < n && mp == NPY_DATETIME_NAT) {
+ mp = ip[i];
+ i++;
+ }
+ if (i == n) {
+ /* All NaTs: return 0 */
+ *min_ind = 0;
+ return 0;
+ }
+ *min_ind = i - 1;
+ for (; i < n; i++) {
+ if (mp > ip[i] && ip[i] != NPY_DATETIME_NAT) {
+ mp = ip[i];
+ *min_ind = i;
+ }
+ }
+ return 0;
+}
+
+/**end repeat**/
+
static int
OBJECT_argmax(PyObject **ip, npy_intp n, npy_intp *max_ind,
PyArrayObject *NPY_UNUSED(aip))
diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c
index 668a5b627..2216a3637 100644
--- a/numpy/core/src/multiarray/mapping.c
+++ b/numpy/core/src/multiarray/mapping.c
@@ -1226,8 +1226,6 @@ array_assign_boolean_subscript(PyArrayObject *self,
return -1;
}
- NPY_BEGIN_THREADS_NDITER(iter);
-
innerstrides = NpyIter_GetInnerStrideArray(iter);
dataptrs = NpyIter_GetDataPtrArray(iter);
@@ -1247,6 +1245,8 @@ array_assign_boolean_subscript(PyArrayObject *self,
return -1;
}
+ NPY_BEGIN_THREADS_NDITER(iter);
+
do {
innersize = *NpyIter_GetInnerLoopSizePtr(iter);
self_data = dataptrs[0];
diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src
index d0fd0e43b..21e36ee2f 100644
--- a/numpy/core/src/umath/loops.c.src
+++ b/numpy/core/src/umath/loops.c.src
@@ -1141,26 +1141,17 @@ NPY_NO_EXPORT void
NPY_NO_EXPORT void
@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
{
- if (IS_BINARY_REDUCE) {
- BINARY_REDUCE_LOOP(@type@) {
- const @type@ in2 = *(@type@ *)ip2;
- io1 = (io1 @OP@ in2 || in2 == NPY_DATETIME_NAT) ? io1 : in2;
+ BINARY_LOOP {
+ const @type@ in1 = *(@type@ *)ip1;
+ const @type@ in2 = *(@type@ *)ip2;
+ if (in1 == NPY_DATETIME_NAT) {
+ *((@type@ *)op1) = in2;
}
- *((@type@ *)iop1) = io1;
- }
- else {
- BINARY_LOOP {
- const @type@ in1 = *(@type@ *)ip1;
- const @type@ in2 = *(@type@ *)ip2;
- if (in1 == NPY_DATETIME_NAT) {
- *((@type@ *)op1) = in2;
- }
- else if (in2 == NPY_DATETIME_NAT) {
- *((@type@ *)op1) = in1;
- }
- else {
- *((@type@ *)op1) = (in1 @OP@ in2) ? in1 : in2;
- }
+ else if (in2 == NPY_DATETIME_NAT) {
+ *((@type@ *)op1) = in1;
+ }
+ else {
+ *((@type@ *)op1) = (in1 @OP@ in2) ? in1 : in2;
}
}
}
diff --git a/numpy/core/tests/test_function_base.py b/numpy/core/tests/test_function_base.py
index a64d44473..aba030f3d 100644
--- a/numpy/core/tests/test_function_base.py
+++ b/numpy/core/tests/test_function_base.py
@@ -34,6 +34,7 @@ class TestLinspace(TestCase):
assert_(y[-1] == 10)
y = linspace(2, 10, endpoint=0)
assert_(y[-1] < 10)
+ assert_raises(ValueError, linspace, 0, 10, num=-1)
def test_corner(self):
y = list(linspace(0, 1, 1))
diff --git a/numpy/core/tests/test_indexing.py b/numpy/core/tests/test_indexing.py
index d412c44fb..d27a30f3a 100644
--- a/numpy/core/tests/test_indexing.py
+++ b/numpy/core/tests/test_indexing.py
@@ -535,6 +535,27 @@ class TestSubclasses(TestCase):
assert_array_equal(new_s.finalize_status, new_s)
assert_array_equal(new_s.old, s)
+class TestFancingIndexingCast(TestCase):
+ def test_boolean_index_cast_assign(self):
+ # Setup the boolean index and float arrays.
+ shape = (8, 63)
+ bool_index = np.zeros(shape).astype(bool)
+ bool_index[0, 1] = True
+ zero_array = np.zeros(shape)
+
+ # Assigning float is fine.
+ zero_array[bool_index] = np.array([1])
+ assert_equal(zero_array[0, 1], 1)
+
+ # Fancy indexing works, although we get a cast warning.
+ assert_warns(np.ComplexWarning,
+ zero_array.__setitem__, ([0], [1]), np.array([2 + 1j]))
+ assert_equal(zero_array[0, 1], 2) # No complex part
+
+ # Cast complex to float, throwing away the imaginary portion.
+ assert_warns(np.ComplexWarning,
+ zero_array.__setitem__, bool_index, np.array([1j]))
+ assert_equal(zero_array[0, 1], 0)
class TestFancyIndexingEquivalence(TestCase):
def test_object_assign(self):
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index 9822d7dfc..34045b4a4 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -2681,12 +2681,24 @@ class TestArgmax(TestCase):
np.datetime64('2015-11-20T12:20:59'),
np.datetime64('1932-09-23T10:10:13'),
np.datetime64('2014-10-10T03:50:30')], 3),
+ # Assorted tests with NaTs
+ ([np.datetime64('NaT'),
+ np.datetime64('NaT'),
+ np.datetime64('2010-01-03T05:14:12'),
+ np.datetime64('NaT'),
+ np.datetime64('2015-09-23T10:10:13'),
+ np.datetime64('1932-10-10T03:50:30')], 4),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
- np.datetime64('2001-10-15T04:10:32'),
+ np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 0),
+ ([np.timedelta64(2, 's'),
+ np.timedelta64(1, 's'),
+ np.timedelta64('NaT', 's'),
+ np.timedelta64(3, 's')], 3),
+ ([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 0),
@@ -2793,12 +2805,24 @@ class TestArgmin(TestCase):
np.datetime64('2014-11-20T12:20:59'),
np.datetime64('2015-09-23T10:10:13'),
np.datetime64('1932-10-10T03:50:30')], 5),
+ # Assorted tests with NaTs
+ ([np.datetime64('NaT'),
+ np.datetime64('NaT'),
+ np.datetime64('2010-01-03T05:14:12'),
+ np.datetime64('NaT'),
+ np.datetime64('2015-09-23T10:10:13'),
+ np.datetime64('1932-10-10T03:50:30')], 5),
([np.datetime64('2059-03-14T12:43:12'),
np.datetime64('1996-09-21T14:43:15'),
- np.datetime64('2001-10-15T04:10:32'),
+ np.datetime64('NaT'),
np.datetime64('2022-12-25T16:02:16'),
np.datetime64('1963-10-04T03:14:12'),
np.datetime64('2013-05-08T18:15:23')], 4),
+ ([np.timedelta64(2, 's'),
+ np.timedelta64(1, 's'),
+ np.timedelta64('NaT', 's'),
+ np.timedelta64(3, 's')], 1),
+ ([np.timedelta64('NaT', 's')] * 3, 0),
([timedelta(days=5, seconds=14), timedelta(days=2, seconds=35),
timedelta(days=-1, seconds=23)], 2),
@@ -2887,6 +2911,7 @@ class TestArgmin(TestCase):
class TestMinMax(TestCase):
+
def test_scalar(self):
assert_raises(ValueError, np.amax, 1, 1)
assert_raises(ValueError, np.amin, 1, 1)
@@ -2900,6 +2925,21 @@ class TestMinMax(TestCase):
assert_raises(ValueError, np.amax, [1, 2, 3], 1000)
assert_equal(np.amax([[1, 2, 3]], axis=1), 3)
+ def test_datetime(self):
+ # NaTs are ignored
+ for dtype in ('m8[s]', 'm8[Y]'):
+ a = np.arange(10).astype(dtype)
+ a[3] = 'NaT'
+ assert_equal(np.amin(a), a[0])
+ assert_equal(np.amax(a), a[9])
+ a[0] = 'NaT'
+ assert_equal(np.amin(a), a[1])
+ assert_equal(np.amax(a), a[9])
+ a.fill('NaT')
+ assert_equal(np.amin(a), a[0])
+ assert_equal(np.amax(a), a[0])
+
+
class TestNewaxis(TestCase):
def test_basic(self):
sk = array([0, -0.1, 0.1])
diff --git a/numpy/lib/index_tricks.py b/numpy/lib/index_tricks.py
index cece8ac8d..c68bf2634 100644
--- a/numpy/lib/index_tricks.py
+++ b/numpy/lib/index_tricks.py
@@ -80,7 +80,7 @@ def ix_(*args):
new = new.astype(_nx.intp)
if issubdtype(new.dtype, _nx.bool_):
new, = new.nonzero()
- new.shape = (1,)*k + (new.size,) + (1,)*(nd-k-1)
+ new = new.reshape((1,)*k + (new.size,) + (1,)*(nd-k-1))
out.append(new)
return tuple(out)
diff --git a/numpy/lib/tests/test_index_tricks.py b/numpy/lib/tests/test_index_tricks.py
index 0e3c98ee1..bb2ae1509 100644
--- a/numpy/lib/tests/test_index_tricks.py
+++ b/numpy/lib/tests/test_index_tricks.py
@@ -196,6 +196,15 @@ class TestIx_(TestCase):
idx2d = [[1, 2, 3], [4, 5, 6]]
assert_raises(ValueError, np.ix_, idx2d)
+ def test_repeated_input(self):
+ length_of_vector = 5
+ x = np.arange(length_of_vector)
+ out = ix_(x, x)
+ assert_equal(out[0].shape, (length_of_vector, 1))
+ assert_equal(out[1].shape, (1, length_of_vector))
+ # check that input shape is not modified
+ assert_equal(x.shape, (length_of_vector,))
+
def test_c_():
a = np.c_[np.array([[1, 2, 3]]), 0, 0, np.array([[4, 5, 6]])]