summaryrefslogtreecommitdiff
path: root/numpy/lib
diff options
context:
space:
mode:
Diffstat (limited to 'numpy/lib')
-rw-r--r--numpy/lib/format.py12
-rw-r--r--numpy/lib/function_base.py2
-rw-r--r--numpy/lib/histograms.py14
-rw-r--r--numpy/lib/npyio.py13
-rw-r--r--numpy/lib/recfunctions.py4
-rw-r--r--numpy/lib/shape_base.py3
-rw-r--r--numpy/lib/stride_tricks.py2
-rw-r--r--numpy/lib/tests/test_format.py56
-rw-r--r--numpy/lib/tests/test_function_base.py5
-rw-r--r--numpy/lib/tests/test_histograms.py10
-rw-r--r--numpy/lib/tests/test_index_tricks.py3
-rw-r--r--numpy/lib/tests/test_packbits.py139
-rw-r--r--numpy/lib/tests/test_recfunctions.py9
13 files changed, 200 insertions, 72 deletions
diff --git a/numpy/lib/format.py b/numpy/lib/format.py
index 7ede0031f..cd8700051 100644
--- a/numpy/lib/format.py
+++ b/numpy/lib/format.py
@@ -274,15 +274,19 @@ def dtype_to_descr(dtype):
def descr_to_dtype(descr):
'''
descr may be stored as dtype.descr, which is a list of
- (name, format, [shape]) tuples. Offsets are not explicitly saved, rather
- empty fields with name,format == '', '|Vn' are added as padding.
+ (name, format, [shape]) tuples where format may be a str or a tuple.
+ Offsets are not explicitly saved, rather empty fields with
+ name, format == '', '|Vn' are added as padding.
This function reverses the process, eliminating the empty padding fields.
'''
- if isinstance(descr, (str, dict)):
+ if isinstance(descr, str):
# No padding removal needed
return numpy.dtype(descr)
-
+ elif isinstance(descr, tuple):
+ # subtype, will always have a shape descr[1]
+ dt = descr_to_dtype(descr[0])
+ return numpy.dtype((dt, descr[1]))
fields = []
offset = 0
for field in descr:
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index cab680751..7fa51d683 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -1235,6 +1235,8 @@ def diff(a, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue):
a = asanyarray(a)
nd = a.ndim
+ if nd == 0:
+ raise ValueError("diff requires input that is at least one dimensional")
axis = normalize_axis_index(axis, nd)
combined = []
diff --git a/numpy/lib/histograms.py b/numpy/lib/histograms.py
index bd44d2732..ee9a3053c 100644
--- a/numpy/lib/histograms.py
+++ b/numpy/lib/histograms.py
@@ -555,14 +555,14 @@ def histogram_bin_edges(a, bins=10, range=None, weights=None):
using the `ptp` of the data. The final bin count is obtained from
``np.round(np.ceil(range / h))``.
- 'Auto' (maximum of the 'Sturges' and 'FD' estimators)
+ 'auto' (maximum of the 'sturges' and 'fd' estimators)
A compromise to get a good value. For small datasets the Sturges
value will usually be chosen, while larger datasets will usually
default to FD. Avoids the overly conservative behaviour of FD
and Sturges for small and large datasets respectively.
Switchover point is usually :math:`a.size \approx 1000`.
- 'FD' (Freedman Diaconis Estimator)
+ 'fd' (Freedman Diaconis Estimator)
.. math:: h = 2 \frac{IQR}{n^{1/3}}
The binwidth is proportional to the interquartile range (IQR)
@@ -570,7 +570,7 @@ def histogram_bin_edges(a, bins=10, range=None, weights=None):
conservative for small datasets, but is quite good for large
datasets. The IQR is very robust to outliers.
- 'Scott'
+ 'scott'
.. math:: h = \sigma \sqrt[3]{\frac{24 * \sqrt{\pi}}{n}}
The binwidth is proportional to the standard deviation of the
@@ -580,14 +580,14 @@ def histogram_bin_edges(a, bins=10, range=None, weights=None):
outliers. Values are very similar to the Freedman-Diaconis
estimator in the absence of outliers.
- 'Rice'
+ 'rice'
.. math:: n_h = 2n^{1/3}
The number of bins is only proportional to cube root of
``a.size``. It tends to overestimate the number of bins and it
does not take into account data variability.
- 'Sturges'
+ 'sturges'
.. math:: n_h = \log _{2}n+1
The number of bins is the base 2 log of ``a.size``. This
@@ -595,7 +595,7 @@ def histogram_bin_edges(a, bins=10, range=None, weights=None):
larger, non-normal datasets. This is the default method in R's
``hist`` method.
- 'Doane'
+ 'doane'
.. math:: n_h = 1 + \log_{2}(n) +
\log_{2}(1 + \frac{|g_1|}{\sigma_{g_1}})
@@ -607,7 +607,7 @@ def histogram_bin_edges(a, bins=10, range=None, weights=None):
estimates for non-normal datasets. This estimator attempts to
account for the skew of the data.
- 'Sqrt'
+ 'sqrt'
.. math:: n_h = \sqrt n
The simplest and fastest estimator. Only takes into account the
diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py
index 0a284d78f..1845305d1 100644
--- a/numpy/lib/npyio.py
+++ b/numpy/lib/npyio.py
@@ -1549,7 +1549,8 @@ def fromregex(file, regexp, dtype, encoding=None):
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skip_header=0, skip_footer=0, converters=None,
missing_values=None, filling_values=None, usecols=None,
- names=None, excludelist=None, deletechars=None,
+ names=None, excludelist=None,
+ deletechars=''.join(sorted(NameValidator.defaultdeletechars)),
replace_space='_', autostrip=False, case_sensitive=True,
defaultfmt="f%i", unpack=None, usemask=False, loose=True,
invalid_raise=True, max_rows=None, encoding='bytes'):
@@ -1716,6 +1717,16 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
array((1, 1.3, b'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', 'S5')])
+ An example to show comments
+
+ >>> f = StringIO('''
+ ... text,# of chars
+ ... hello world,11
+ ... numpy,5''')
+ >>> np.genfromtxt(f, dtype='S12,S12', delimiter=',')
+ array([(b'text', b''), (b'hello world', b'11'), (b'numpy', b'5')],
+ dtype=[('f0', 'S12'), ('f1', 'S12')])
+
"""
if max_rows is not None:
if skip_footer:
diff --git a/numpy/lib/recfunctions.py b/numpy/lib/recfunctions.py
index ccbcfad91..08a9cf09c 100644
--- a/numpy/lib/recfunctions.py
+++ b/numpy/lib/recfunctions.py
@@ -976,7 +976,7 @@ def structured_to_unstructured(arr, dtype=None, copy=False, casting='unsafe'):
# next cast to a packed format with all fields converted to new dtype
packed_fields = np.dtype({'names': names,
- 'formats': [(out_dtype, c) for c in counts]})
+ 'formats': [(out_dtype, dt.shape) for dt in dts]})
arr = arr.astype(packed_fields, copy=copy, casting=casting)
# finally is it safe to view the packed fields as the unstructured type
@@ -1069,7 +1069,7 @@ def unstructured_to_structured(arr, dtype=None, names=None, align=False,
# first view as a packed structured array of one dtype
packed_fields = np.dtype({'names': names,
- 'formats': [(arr.dtype, c) for c in counts]})
+ 'formats': [(arr.dtype, dt.shape) for dt in dts]})
arr = np.ascontiguousarray(arr).view(packed_fields)
# next cast to an unpacked but flattened format with varied dtypes
diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py
index ac2a25604..8ebe7a695 100644
--- a/numpy/lib/shape_base.py
+++ b/numpy/lib/shape_base.py
@@ -532,8 +532,7 @@ def expand_dims(a, axis):
Returns
-------
res : ndarray
- Output array. The number of dimensions is one greater than that of
- the input array.
+ View of `a` with the number of dimensions increased by one.
See Also
--------
diff --git a/numpy/lib/stride_tricks.py b/numpy/lib/stride_tricks.py
index 0dc36e41c..fd401c57c 100644
--- a/numpy/lib/stride_tricks.py
+++ b/numpy/lib/stride_tricks.py
@@ -186,8 +186,6 @@ def _broadcast_shape(*args):
"""Returns the shape of the arrays that would result from broadcasting the
supplied arrays against each other.
"""
- if not args:
- return ()
# use the old-iterator because np.nditer does not handle size 0 arrays
# consistently
b = np.broadcast(*args[:32])
diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py
index 2cf799723..062c21725 100644
--- a/numpy/lib/tests/test_format.py
+++ b/numpy/lib/tests/test_format.py
@@ -412,6 +412,7 @@ record_arrays = [
np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')),
np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')),
np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')),
+ np.zeros(1, dtype=[('c', ('<f8', (5,)), (2,))])
]
@@ -629,6 +630,61 @@ def test_pickle_disallow():
assert_raises(ValueError, np.save, path, np.array([None], dtype=object),
allow_pickle=False)
+@pytest.mark.parametrize('dt', [
+ np.dtype(np.dtype([('a', np.int8),
+ ('b', np.int16),
+ ('c', np.int32),
+ ], align=True),
+ (3,)),
+ np.dtype([('x', np.dtype({'names':['a','b'],
+ 'formats':['i1','i1'],
+ 'offsets':[0,4],
+ 'itemsize':8,
+ },
+ (3,)),
+ (4,),
+ )]),
+ np.dtype([('x',
+ ('<f8', (5,)),
+ (2,),
+ )]),
+ np.dtype([('x', np.dtype((
+ np.dtype((
+ np.dtype({'names':['a','b'],
+ 'formats':['i1','i1'],
+ 'offsets':[0,4],
+ 'itemsize':8}),
+ (3,)
+ )),
+ (4,)
+ )))
+ ]),
+ np.dtype([
+ ('a', np.dtype((
+ np.dtype((
+ np.dtype((
+ np.dtype([
+ ('a', int),
+ ('b', np.dtype({'names':['a','b'],
+ 'formats':['i1','i1'],
+ 'offsets':[0,4],
+ 'itemsize':8})),
+ ]),
+ (3,),
+ )),
+ (4,),
+ )),
+ (5,),
+ )))
+ ]),
+ ])
+
+def test_descr_to_dtype(dt):
+ dt1 = format.descr_to_dtype(dt.descr)
+ assert_equal_(dt1, dt)
+ arr1 = np.zeros(3, dt)
+ arr2 = roundtrip(arr1)
+ assert_array_equal(arr1, arr2)
def test_version_2_0():
f = BytesIO()
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index 6d32c365a..a3d4c6efb 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -696,6 +696,9 @@ class TestDiff(object):
assert_raises(np.AxisError, diff, x, axis=3)
assert_raises(np.AxisError, diff, x, axis=-4)
+ x = np.array(1.11111111111, np.float64)
+ assert_raises(ValueError, diff, x)
+
def test_nd(self):
x = 20 * rand(10, 20, 30)
out1 = x[:, :, 1:] - x[:, :, :-1]
@@ -945,7 +948,7 @@ class TestGradient(object):
assert_equal(type(out), type(x))
# And make sure that the output and input don't have aliased mask
# arrays
- assert_(x.mask is not out.mask)
+ assert_(x._mask is not out._mask)
# Also check that edge_order=2 doesn't alter the original mask
x2 = np.ma.arange(5)
x2[2] = np.ma.masked
diff --git a/numpy/lib/tests/test_histograms.py b/numpy/lib/tests/test_histograms.py
index c96b01d42..afaa526af 100644
--- a/numpy/lib/tests/test_histograms.py
+++ b/numpy/lib/tests/test_histograms.py
@@ -554,15 +554,11 @@ class TestHistogramOptimBinNums(object):
return a / (a + b)
ll = [[nbins_ratio(seed, size) for size in np.geomspace(start=10, stop=100, num=4).round().astype(int)]
- for seed in range(256)]
+ for seed in range(10)]
# the average difference between the two methods decreases as the dataset size increases.
- assert_almost_equal(abs(np.mean(ll, axis=0) - 0.5),
- [0.1065248,
- 0.0968844,
- 0.0331818,
- 0.0178057],
- decimal=3)
+ avg = abs(np.mean(ll, axis=0) - 0.5)
+ assert_almost_equal(avg, [0.15, 0.09, 0.08, 0.03], decimal=2)
def test_simple_range(self):
"""
diff --git a/numpy/lib/tests/test_index_tricks.py b/numpy/lib/tests/test_index_tricks.py
index e687e2f54..2f7e97831 100644
--- a/numpy/lib/tests/test_index_tricks.py
+++ b/numpy/lib/tests/test_index_tricks.py
@@ -106,6 +106,9 @@ class TestRavelUnravelIndex(object):
np.ravel_multi_index(arr, (41, 7, 120, 36, 2706, 8, 6)),
[5627771580, 117259570957])
+ # test unravel_index for big indices (issue #9538)
+ assert_raises(ValueError, np.unravel_index, 1, (2**32-1, 2**31+1))
+
# test overflow checking for too big array (issue #7546)
dummy_arr = ([0],[0])
half_max = np.iinfo(np.intp).max // 2
diff --git a/numpy/lib/tests/test_packbits.py b/numpy/lib/tests/test_packbits.py
index 00d5ca827..95a465c36 100644
--- a/numpy/lib/tests/test_packbits.py
+++ b/numpy/lib/tests/test_packbits.py
@@ -2,7 +2,8 @@ from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import assert_array_equal, assert_equal, assert_raises
-
+import pytest
+from itertools import chain
def test_packbits():
# Copied from the docstring.
@@ -50,8 +51,8 @@ def test_packbits_empty_with_axis():
assert_equal(b.dtype, np.uint8)
assert_equal(b.shape, out_shape)
-
-def test_packbits_large():
+@pytest.mark.parametrize('bitorder', ('little', 'big'))
+def test_packbits_large(bitorder):
# test data large enough for 16 byte vectorization
a = np.array([1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0,
0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1,
@@ -71,7 +72,7 @@ def test_packbits_large():
a = a.repeat(3)
for dtype in '?bBhHiIlLqQ':
arr = np.array(a, dtype=dtype)
- b = np.packbits(arr, axis=None)
+ b = np.packbits(arr, axis=None, bitorder=bitorder)
assert_equal(b.dtype, np.uint8)
r = [252, 127, 192, 3, 254, 7, 252, 0, 7, 31, 240, 0, 28, 1, 255, 252,
113, 248, 3, 255, 192, 28, 15, 192, 28, 126, 0, 224, 127, 255,
@@ -81,9 +82,10 @@ def test_packbits_large():
255, 224, 1, 255, 252, 126, 63, 0, 1, 192, 252, 14, 63, 0, 15,
199, 252, 113, 255, 3, 128, 56, 252, 14, 7, 0, 113, 255, 255, 142, 56, 227,
129, 248, 227, 129, 199, 31, 128]
- assert_array_equal(b, r)
+ if bitorder == 'big':
+ assert_array_equal(b, r)
# equal for size being multiple of 8
- assert_array_equal(np.unpackbits(b)[:-4], a)
+ assert_array_equal(np.unpackbits(b, bitorder=bitorder)[:-4], a)
# check last byte of different remainders (16 byte vectorization)
b = [np.packbits(arr[:-i], axis=None)[-1] for i in range(1, 16)]
@@ -229,6 +231,20 @@ def test_unpackbits():
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 0, 1, 1, 1]]))
+def test_pack_unpack_order():
+ a = np.array([[2], [7], [23]], dtype=np.uint8)
+ b = np.unpackbits(a, axis=1)
+ assert_equal(b.dtype, np.uint8)
+ b_little = np.unpackbits(a, axis=1, bitorder='little')
+ b_big = np.unpackbits(a, axis=1, bitorder='big')
+ assert_array_equal(b, b_big)
+ assert_array_equal(a, np.packbits(b_little, axis=1, bitorder='little'))
+ assert_array_equal(b[:,::-1], b_little)
+ assert_array_equal(a, np.packbits(b_big, axis=1, bitorder='big'))
+ assert_raises(ValueError, np.unpackbits, a, bitorder='r')
+ assert_raises(TypeError, np.unpackbits, a, bitorder=10)
+
+
def test_unpackbits_empty():
a = np.empty((0,), dtype=np.uint8)
@@ -268,8 +284,7 @@ def test_unpackbits_large():
assert_array_equal(np.packbits(np.unpackbits(d, axis=0), axis=0), d)
-def test_unpackbits_count():
- # test complete invertibility of packbits and unpackbits with count
+class TestCount():
x = np.array([
[1, 0, 1, 0, 0, 1, 0],
[0, 1, 1, 1, 0, 0, 0],
@@ -279,53 +294,85 @@ def test_unpackbits_count():
[0, 0, 1, 1, 1, 0, 0],
[0, 1, 0, 1, 0, 1, 0],
], dtype=np.uint8)
-
padded1 = np.zeros(57, dtype=np.uint8)
padded1[:49] = x.ravel()
+ padded1b = np.zeros(57, dtype=np.uint8)
+ padded1b[:49] = x[::-1].copy().ravel()
+ padded2 = np.zeros((9, 9), dtype=np.uint8)
+ padded2[:7, :7] = x
- packed = np.packbits(x)
- for count in range(58):
- unpacked = np.unpackbits(packed, count=count)
+ @pytest.mark.parametrize('bitorder', ('little', 'big'))
+ @pytest.mark.parametrize('count', chain(range(58), range(-1, -57, -1)))
+ def test_roundtrip(self, bitorder, count):
+ if count < 0:
+ # one extra zero of padding
+ cutoff = count - 1
+ else:
+ cutoff = count
+ # test complete invertibility of packbits and unpackbits with count
+ packed = np.packbits(self.x, bitorder=bitorder)
+ unpacked = np.unpackbits(packed, count=count, bitorder=bitorder)
assert_equal(unpacked.dtype, np.uint8)
- assert_array_equal(unpacked, padded1[:count])
- for count in range(-1, -57, -1):
- unpacked = np.unpackbits(packed, count=count)
- assert_equal(unpacked.dtype, np.uint8)
- # count -1 because padded1 has 57 instead of 56 elements
- assert_array_equal(unpacked, padded1[:count-1])
- for kwargs in [{}, {'count': None}]:
+ assert_array_equal(unpacked, self.padded1[:cutoff])
+
+ @pytest.mark.parametrize('kwargs', [
+ {}, {'count': None},
+ ])
+ def test_count(self, kwargs):
+ packed = np.packbits(self.x)
unpacked = np.unpackbits(packed, **kwargs)
assert_equal(unpacked.dtype, np.uint8)
- assert_array_equal(unpacked, padded1[:-1])
- assert_raises(ValueError, np.unpackbits, packed, count=-57)
-
- padded2 = np.zeros((9, 9), dtype=np.uint8)
- padded2[:7, :7] = x
-
- packed0 = np.packbits(x, axis=0)
- packed1 = np.packbits(x, axis=1)
- for count in range(10):
- unpacked0 = np.unpackbits(packed0, axis=0, count=count)
+ assert_array_equal(unpacked, self.padded1[:-1])
+
+ @pytest.mark.parametrize('bitorder', ('little', 'big'))
+ # delta==-1 when count<0 because one extra zero of padding
+ @pytest.mark.parametrize('count', chain(range(8), range(-1, -9, -1)))
+ def test_roundtrip_axis(self, bitorder, count):
+ if count < 0:
+ # one extra zero of padding
+ cutoff = count - 1
+ else:
+ cutoff = count
+ packed0 = np.packbits(self.x, axis=0, bitorder=bitorder)
+ unpacked0 = np.unpackbits(packed0, axis=0, count=count,
+ bitorder=bitorder)
assert_equal(unpacked0.dtype, np.uint8)
- assert_array_equal(unpacked0, padded2[:count, :x.shape[1]])
- unpacked1 = np.unpackbits(packed1, axis=1, count=count)
- assert_equal(unpacked1.dtype, np.uint8)
- assert_array_equal(unpacked1, padded2[:x.shape[1], :count])
- for count in range(-1, -9, -1):
- unpacked0 = np.unpackbits(packed0, axis=0, count=count)
- assert_equal(unpacked0.dtype, np.uint8)
- # count -1 because one extra zero of padding
- assert_array_equal(unpacked0, padded2[:count-1, :x.shape[1]])
- unpacked1 = np.unpackbits(packed1, axis=1, count=count)
+ assert_array_equal(unpacked0, self.padded2[:cutoff, :self.x.shape[1]])
+
+ packed1 = np.packbits(self.x, axis=1, bitorder=bitorder)
+ unpacked1 = np.unpackbits(packed1, axis=1, count=count,
+ bitorder=bitorder)
assert_equal(unpacked1.dtype, np.uint8)
- assert_array_equal(unpacked1, padded2[:x.shape[0], :count-1])
- for kwargs in [{}, {'count': None}]:
+ assert_array_equal(unpacked1, self.padded2[:self.x.shape[0], :cutoff])
+
+ @pytest.mark.parametrize('kwargs', [
+ {}, {'count': None},
+ {'bitorder' : 'little'},
+ {'bitorder': 'little', 'count': None},
+ {'bitorder' : 'big'},
+ {'bitorder': 'big', 'count': None},
+ ])
+ def test_axis_count(self, kwargs):
+ packed0 = np.packbits(self.x, axis=0)
unpacked0 = np.unpackbits(packed0, axis=0, **kwargs)
assert_equal(unpacked0.dtype, np.uint8)
- assert_array_equal(unpacked0, padded2[:-1, :x.shape[1]])
+ if kwargs.get('bitorder', 'big') == 'big':
+ assert_array_equal(unpacked0, self.padded2[:-1, :self.x.shape[1]])
+ else:
+ assert_array_equal(unpacked0[::-1, :], self.padded2[:-1, :self.x.shape[1]])
+
+ packed1 = np.packbits(self.x, axis=1)
unpacked1 = np.unpackbits(packed1, axis=1, **kwargs)
assert_equal(unpacked1.dtype, np.uint8)
- assert_array_equal(unpacked1, padded2[:x.shape[0], :-1])
- assert_raises(ValueError, np.unpackbits, packed0, axis=0, count=-9)
- assert_raises(ValueError, np.unpackbits, packed1, axis=1, count=-9)
-
+ if kwargs.get('bitorder', 'big') == 'big':
+ assert_array_equal(unpacked1, self.padded2[:self.x.shape[0], :-1])
+ else:
+ assert_array_equal(unpacked1[:, ::-1], self.padded2[:self.x.shape[0], :-1])
+
+ def test_bad_count(self):
+ packed0 = np.packbits(self.x, axis=0)
+ assert_raises(ValueError, np.unpackbits, packed0, axis=0, count=-9)
+ packed1 = np.packbits(self.x, axis=1)
+ assert_raises(ValueError, np.unpackbits, packed1, axis=1, count=-9)
+ packed = np.packbits(self.x)
+ assert_raises(ValueError, np.unpackbits, packed, count=-57)
diff --git a/numpy/lib/tests/test_recfunctions.py b/numpy/lib/tests/test_recfunctions.py
index 112678294..f713fb64d 100644
--- a/numpy/lib/tests/test_recfunctions.py
+++ b/numpy/lib/tests/test_recfunctions.py
@@ -243,6 +243,15 @@ class TestRecFunctions(object):
assert_(dd.base is d)
assert_(ddd.base is d)
+ # including uniform fields with subarrays unpacked
+ d = np.array([(1, [2, 3], [[ 4, 5], [ 6, 7]]),
+ (8, [9, 10], [[11, 12], [13, 14]])],
+ dtype=[('x0', 'i4'), ('x1', ('i4', 2)), ('x2', ('i4', (2, 2)))])
+ dd = structured_to_unstructured(d)
+ ddd = unstructured_to_structured(dd, d.dtype)
+ assert_(dd.base is d)
+ assert_(ddd.base is d)
+
# test that nested fields with identical names don't break anything
point = np.dtype([('x', int), ('y', int)])
triangle = np.dtype([('a', point), ('b', point), ('c', point)])