summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--scipy/base/arrayprint.py13
-rw-r--r--scipy/base/numerictypes.py53
-rw-r--r--scipy/base/records.py84
-rw-r--r--scipy/base/src/arrayobject.c15
-rw-r--r--scipy/base/src/multiarraymodule.c33
5 files changed, 113 insertions, 85 deletions
diff --git a/scipy/base/arrayprint.py b/scipy/base/arrayprint.py
index 6124a1bc2..4f3352ddd 100644
--- a/scipy/base/arrayprint.py
+++ b/scipy/base/arrayprint.py
@@ -140,26 +140,24 @@ def _array2string(a, max_line_width, precision, suppress_small, separator=' ',
summary_insert = ""
data = a.ravel()
-
-
items_per_line = a.shape[-1]
try:
format_function = a._format
except AttributeError:
- type = a.dtype
- if issubclass(type, _nt.bool):
+ dtype = a.dtype
+ if issubclass(dtype, _nt.bool):
format = "%s"
format_function = lambda x, f = format: format % x
- if issubclass(type, _nt.integer):
+ if issubclass(dtype, _nt.integer):
max_str_len = max(len(str(max_reduce(data))),
len(str(min_reduce(data))))
format = '%' + str(max_str_len) + 'd'
format_function = lambda x, f = format: _formatInteger(x, f)
- elif issubclass(type, _nt.floating):
+ elif issubclass(dtype, _nt.floating):
format = _floatFormat(data, precision, suppress_small)
format_function = lambda x, f = format: _formatFloat(x, f)
- elif issubclass(type, _nt.complexfloating):
+ elif issubclass(dtype, _nt.complexfloating):
real_format = _floatFormat(
data.real, precision, suppress_small, sign=0)
imag_format = _floatFormat(
@@ -174,7 +172,6 @@ def _array2string(a, max_line_width, precision, suppress_small, separator=' ',
next_line_prefix += " "*len(prefix) # skip over array(
-
lst = _formatArray(a, format_function, len(a.shape), max_line_width,
next_line_prefix, separator,
_summaryEdgeItems, summary_insert)[:-1]
diff --git a/scipy/base/numerictypes.py b/scipy/base/numerictypes.py
index ce0594dc6..1423571a4 100644
--- a/scipy/base/numerictypes.py
+++ b/scipy/base/numerictypes.py
@@ -80,7 +80,7 @@ $Id: numerictypes.py,v 1.17 2005/09/09 22:20:06 teoliphant Exp $
# we add more at the bottom
__all__ = ['typeDict', 'arraytypes', 'ScalarType', 'obj2dtype', 'cast', 'nbytes', 'dtype2char']
-from multiarray import typeinfo, ndarray, array
+from multiarray import typeinfo, ndarray, array, empty
import types as _types
# we don't export these for import *, but we do want them accessible
@@ -161,6 +161,7 @@ def _add_types():
if base != '':
allTypes["%s%d" % (base, bit)] = typeobj
typeDict["%s%d" % (base, bit)] = typeobj
+ typeDict["%s%d" % (base.capitalize(), bit)] = typeobj
if char != '':
typeDict[char] = typeobj
@@ -207,7 +208,6 @@ def _set_up_aliases():
_set_up_aliases()
# Now, construct dictionary to lookup character codes from types
-
_dtype2char_dict = {}
def _construct_char_code_lookup():
for name in typeinfo.keys():
@@ -217,6 +217,7 @@ def _construct_char_code_lookup():
_dtype2char_dict[tup[-1]] = tup[0]
_construct_char_code_lookup()
+
arraytypes = {'int': [],
'uint':[],
'float':[],
@@ -302,30 +303,12 @@ def obj2dtype(rep, default=None):
res = typeDict.get(rep, default)
return res
-def dtype2char(dtype):
- dtype = obj2dtype(dtype)
- if dtype is None:
- raise ValueError, "unrecognized type"
- return _dtype2char_dict[dtype]
-
-
-# Create dictionary of casting functions that wrap sequences
-# indexed by type or type character
# This dictionary allows look up based on any alias for a type
class _typedict(dict):
def __getitem__(self, obj):
return dict.__getitem__(self, obj2dtype(obj))
-cast = _typedict()
-ScalarType = [_types.IntType, _types.FloatType,
- _types.ComplexType, _types.LongType, _types.BooleanType,
- _types.StringType, _types.UnicodeType, _types.BufferType]
-ScalarType.extend(_dtype2char_dict.keys())
-ScalarType = tuple(ScalarType)
-for key in _dtype2char_dict.keys():
- cast[key] = lambda x, k=key : array(x, copy=False).astype(k)
-
nbytes = _typedict()
_alignment = _typedict()
_maxvals = _typedict()
@@ -346,6 +329,36 @@ def _construct_lookups():
_construct_lookups()
+def dtype2char(dtype):
+ dtype = obj2dtype(dtype)
+ if dtype is None:
+ raise ValueError, "unrecognized type"
+ return _dtype2char_dict[dtype]
+
+# Create dictionary of casting functions that wrap sequences
+# indexed by type or type character
+
+
+cast = _typedict()
+ScalarType = [_types.IntType, _types.FloatType,
+ _types.ComplexType, _types.LongType, _types.BooleanType,
+ _types.StringType, _types.UnicodeType, _types.BufferType]
+ScalarType.extend(_dtype2char_dict.keys())
+ScalarType = tuple(ScalarType)
+for key in _dtype2char_dict.keys():
+ cast[key] = lambda x, k=key : array(x, copy=False).astype(k)
+
+
+_unicodesize = array('u','U').itemsize
+
+# Create the typestring lookup dictionary
+_typestr = _typedict()
+for key in _dtype2char_dict.keys():
+ if issubclass(key, allTypes['flexible']):
+ _typestr[key] = _dtype2char_dict[key]
+ else:
+ _typestr[key] = empty((1,),key).dtypestr[1:]
+
# Now add the types we've determined to this module
for key in allTypes:
globals()[key] = allTypes[key]
diff --git a/scipy/base/records.py b/scipy/base/records.py
index c80e76676..7de3a95b2 100644
--- a/scipy/base/records.py
+++ b/scipy/base/records.py
@@ -10,7 +10,9 @@ import re
# are equally allowed
format_re = re.compile(r'(?P<repeat> *[(]?[ ,0-9]*[)]? *)(?P<dtype>[A-Za-z0-9.]*)')
-numfmt = sb.typeDict
+numfmt = nt.typeDict
+
+
def find_duplicate(list):
"""Find duplication in a list, return a list of dupicated elements"""
@@ -63,7 +65,9 @@ class format_parser:
def _parseFormats(self, formats, aligned=0):
""" Parse the field formats """
- revdict = sb.revdict
+ _alignment = nt._alignment
+ _bytes = nt.nbytes
+ _typestr = nt._typestr
if (type(formats) in [types.ListType, types.TupleType]):
_fmt = formats[:]
elif (type(formats) == types.StringType):
@@ -83,8 +87,7 @@ class format_parser:
sum = 0
maxalign = 1
- unisize = sb.typeinfo['UNICODE'][2] or 2
- print unisize
+ unisize = nt._unicodesize
for i in range(self._nfields):
# parse the formats into repeats and formats
@@ -106,8 +109,8 @@ class format_parser:
else:
_repeat = eval(_repeat)
_fmt[i] = numfmt[_dtype]
- if not issubclass(_fmt[i], sb.Flexible):
- self._itemsizes[i] = revdict[_fmt[i]][0][2] >> 3
+ if not issubclass(_fmt[i], nt.flexible):
+ self._itemsizes[i] = _bytes[_fmt[i]]
self._repeats[i] = _repeat
if (type(_repeat) in [types.ListType, types.TupleType]):
@@ -118,24 +121,20 @@ class format_parser:
sum += self._sizes[i]
if self._rec_aligned:
# round sum up to multiple of alignment factor
- align = revdict[_fmt[i]][0][3]
+ align = _alignment[_fmt[i]]
sum = ((sum + align - 1)/align) * align
maxalign = max(maxalign, align)
self._stops[i] = sum - 1
# Unify the appearance of _format, independent of input formats
- revfmt = revdict[_fmt[i]][1][0]
+ revfmt = _typestr[_fmt[i]]
self._formats[i] = `_repeat`+revfmt
- if issubclass(_fmt[i], sb.Flexible):
- if issubclass(_fmt[i], sb.Unicode):
+ if issubclass(_fmt[i], nt.flexible):
+ if issubclass(_fmt[i], nt.unicode_):
self._formats[i] += `self._itemsizes[i] / unisize`
else:
self._formats[i] += `self._itemsizes[i]`
- elif issubclass(_fmt[i], sb.Object):
- pass
- else:
- self._formats[i] += `revdict[_fmt[i]][1][1]`
-
+
self._fmt = _fmt
# This pads record so next record is aligned if self._rec_align is true.
# Otherwise next the record starts right after the end of the last one.
@@ -146,51 +145,40 @@ class record(nt.void):
pass
class ndrecarray(sb.ndarray):
- def __new__(self, *args, **kwds):
- buf = args[0]
+ def __new__(subtype, *args, **kwds):
+ shape = args[0]
formats = args[1]
- shape = kwds.get('shape',-1)
+ buf = kwds.get('buf',None)
aligned = kwds.get('aligned',0)
parsed = format_parser(formats, aligned)
itemsize = parsed._total_itemsize
- if (shape != None):
- if type(shape) in [types.IntType, types.LongType]:
- shape = (shape,)
- elif (type(shape) == types.TupleType and type(shape[0]) in \
- [types.IntType, types.LongType]):
- pass
- else:
- raise NameError, "Illegal shape %s" % `shape`
if buf is None:
- this = sb.ndarray.__new__(self, shape, record, itemsize)
+ self = sb.ndarray.__new__(subtype, shape, record, itemsize)
else:
byteorder = kwds.get('byteorder', sys.byteorder)
swapped = 0
if (byteorder != sys.byteorder):
swapped = 1
- this = sb.ndarray.__new__(self, shape, record, itemsize, buffer=buf,
+ self = sb.ndarray.__new__(subtype, shape, record, itemsize, buffer=buf,
swapped=swapped)
- this.parsed = parsed
- return this
-
+ self.parsed = parsed
+ return self
- def __init__(self, buf, formats, shape=-1, names=None, byteoffset=0,
- bytestride=None, byteorder=sys.byteorder, aligned=0):
- print "init: ", buf, formats, shape, names, byteoffset, bytestride,\
+ def __init__(self, shape, formats, names=None, buf=None, offset=0,
+ strides=None, byteorder=sys.byteorder, aligned=0):
+ print "init: ", buf, formats, shape, names, offset, strides,\
byteorder, aligned
self._updateattr()
self._fieldNames(names)
self._fields = {}
-
# This should grab the names out of self.parsed that are important
# to have later and should set self._attributes
# to the list of meta information that needs to be carried around
def _updateattr(self):
self._nfields = self.parsed._nfields
- self._attributes = ['_rec_aligned', '_nfields']
- del self.parsed
+ self._attributes = ['parsed']
def __array_finalize__(self, obj):
self._attributes = obj._attributes
@@ -224,3 +212,25 @@ class ndrecarray(sb.ndarray):
if _dup:
raise ValueError, "Duplicate field names: %s" % _dup
+ def _get_fields(self):
+ self._fields = {}
+ parsed = self.parsed
+ basearr = self.__array__()
+ for indx in range(self._nfields):
+ # We need the offset and the data type of the field
+ _offset = parsed._stops[indx] - parsed._sizes[indx] + 1
+ _type = parsed._fmt[indx]
+ if issubclass(_type, nt.flexible):
+ _type = nt.dtype2char(_type)+`parsed._itemsizes[indx]`
+ arr = basearr.getfield(_type, _offset)
+ # Put this array as a value in dictionary
+ # Do both name and index
+ self._fields[indx] = arr
+ self._fields[self._names[indx]] = arr
+
+ def field(self, field_name):
+ if self._fields == {}:
+ self._get_fields()
+ return self._fields[field_name]
+
+
diff --git a/scipy/base/src/arrayobject.c b/scipy/base/src/arrayobject.c
index 80724aab7..02e930ad6 100644
--- a/scipy/base/src/arrayobject.c
+++ b/scipy/base/src/arrayobject.c
@@ -3341,7 +3341,11 @@ PyArray_New(PyTypeObject *subtype, int nd, intp *dims, int type_num,
"O", obj);
if (res == NULL) {
if (self->flags & OWNDATA) PyDataMem_FREE(self);
- goto fail;
+ PyDimMem_FREE(self->dimensions);
+ /* theoretically should free self
+ but this causes segmentation faults...
+ Not sure why */
+ return NULL;
}
else Py_DECREF(res);
}
@@ -3350,7 +3354,7 @@ PyArray_New(PyTypeObject *subtype, int nd, intp *dims, int type_num,
fail:
PyDimMem_FREE(self->dimensions);
- self->ob_type->tp_free((PyObject *)self);
+ subtype->tp_free((PyObject *)self);
return NULL;
}
@@ -3535,8 +3539,8 @@ PyArray_FillWithScalar(PyArrayObject *arr, PyObject *obj)
static PyObject *
array_new(PyTypeObject *subtype, PyObject *args, PyObject *kwds)
{
- static char *kwlist[] = {"shape", "dtype", "itemlen", "buffer", "offset",
- "strides", "swap", "fortran", NULL};
+ static char *kwlist[] = {"shape", "dtype", "itemlen", "buffer",
+ "offset", "strides", "swap", "fortran", NULL};
int itemsize = -1;
PyArray_Typecode typecode = {PyArray_NOTYPE, 0, 0};
int type_num = PyArray_NOTYPE;
@@ -3654,7 +3658,7 @@ array_new(PyTypeObject *subtype, PyObject *args, PyObject *kwds)
PyDimMem_FREE(dims.ptr);
if (strides.ptr) PyDimMem_FREE(strides.ptr);
- return PyArray_Return(ret);
+ return (PyObject *)ret;
fail:
if (dims.ptr) PyDimMem_FREE(dims.ptr);
@@ -4871,6 +4875,7 @@ Array_FromScalar(PyObject *op, PyArray_Typecode *typecode)
if (itemsize == 0 && PyTypeNum_ISEXTENDED(type)) {
itemsize = PyObject_Length(op);
+ if (type == PyArray_UNICODE) itemsize *= sizeof(Py_UNICODE);
}
ret = (PyArrayObject *)PyArray_New(&PyArray_Type, 0, NULL, type,
diff --git a/scipy/base/src/multiarraymodule.c b/scipy/base/src/multiarraymodule.c
index c160ee425..3223ae9ab 100644
--- a/scipy/base/src/multiarraymodule.c
+++ b/scipy/base/src/multiarraymodule.c
@@ -2765,20 +2765,22 @@ PyArray_TypecodeConverter(PyObject *obj, PyArray_Typecode *at)
more than one byte and itemsize must be
the number of bytes.
*/
- if (check_num == PyArray_UNICODELTR || \
- check_num == PyArray_UNICODE)
- at->itemsize *= sizeof(Py_UNICODE);
-
+ if (check_num == PyArray_UNICODELTR)
+ at->itemsize *= sizeof(Py_UNICODE);
+
/* Support for generic processing */
else if ((check_num != PyArray_STRINGLTR) &&
- (check_num != PyArray_VOIDLTR) &&
- (check_num != PyArray_STRING) &&
- (check_num != PyArray_VOID)) {
- check_num = \
- PyArray_TypestrConvert(at->itemsize,
- check_num);
- at->itemsize = 0;
- if (check_num == PyArray_NOTYPE) goto fail;
+ (check_num != PyArray_VOIDLTR)) {
+ if (at->itemsize == 0) {
+ /* reset because string conversion failed */
+ check_num = PyArray_NOTYPE+10;
+ }
+ else {
+ check_num = \
+ PyArray_TypestrConvert(at->itemsize,
+ check_num);
+ at->itemsize = 0;
+ }
}
}
}
@@ -2804,18 +2806,19 @@ PyArray_TypecodeConverter(PyObject *obj, PyArray_Typecode *at)
return PY_SUCCEED;
}
- if ((descr = PyArray_DescrFromType(check_num))==NULL) {
+ if ((check_num == PyArray_NOTYPE+10) || \
+ (descr = PyArray_DescrFromType(check_num))==NULL) {
/* Now check to see if the object is registered
in typeDict */
if (typeDict != NULL) {
item = PyDict_GetItem(typeDict, obj);
if (item) {
- PyArray_TypecodeFromTypeObject(obj, at);
+ PyArray_TypecodeFromTypeObject(item, at);
PyErr_Clear();
return PY_SUCCEED;
}
}
- return PY_FAIL;
+ goto fail;
}
at->type_num = descr->type_num;