summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--benchmarks/benchmarks/bench_array_coercion.py57
-rw-r--r--benchmarks/benchmarks/bench_function_base.py2
-rw-r--r--benchmarks/benchmarks/bench_itemselection.py45
-rw-r--r--benchmarks/benchmarks/bench_overrides.py2
-rw-r--r--benchmarks/benchmarks/bench_ufunc.py2
-rw-r--r--benchmarks/benchmarks/common.py2
-rw-r--r--doc/Py3K.rst.txt2
-rw-r--r--doc/TESTS.rst.txt4
-rw-r--r--doc/changelog/1.18.1-changelog.rst33
-rw-r--r--doc/neps/nep-0037-array-module.rst550
-rw-r--r--doc/release/upcoming_changes/15233.highlight.rst4
-rw-r--r--doc/release/upcoming_changes/15251.c_api.rst10
-rw-r--r--doc/release/upcoming_changes/15255.compatibility.rst12
-rw-r--r--doc/source/reference/c-api/array.rst2
-rw-r--r--doc/source/reference/random/multithreading.rst2
-rw-r--r--doc/source/reference/ufuncs.rst4
-rw-r--r--doc/source/release.rst1
-rw-r--r--doc/source/release/1.18.0-notes.rst6
-rw-r--r--doc/source/release/1.18.1-notes.rst52
-rw-r--r--doc/source/release/1.19.0-notes.rst6
-rw-r--r--numpy/_globals.py2
-rw-r--r--numpy/_pytesttester.py5
-rw-r--r--numpy/compat/py3k.py2
-rw-r--r--numpy/core/_add_newdocs.py72
-rw-r--r--numpy/core/_internal.py12
-rw-r--r--numpy/core/_ufunc_config.py8
-rw-r--r--numpy/core/arrayprint.py73
-rw-r--r--numpy/core/code_generators/genapi.py14
-rw-r--r--numpy/core/code_generators/generate_umath.py29
-rw-r--r--numpy/core/einsumfunc.py62
-rw-r--r--numpy/core/getlimits.py18
-rw-r--r--numpy/core/include/numpy/ndarraytypes.h19
-rw-r--r--numpy/core/include/numpy/npy_3kcompat.h45
-rw-r--r--numpy/core/include/numpy/npy_common.h10
-rw-r--r--numpy/core/machar.py2
-rw-r--r--numpy/core/multiarray.py2
-rw-r--r--numpy/core/numeric.py8
-rw-r--r--numpy/core/records.py2
-rw-r--r--numpy/core/setup.py2
-rw-r--r--numpy/core/src/common/array_assign.c10
-rw-r--r--numpy/core/src/common/array_assign.h20
-rw-r--r--numpy/core/src/common/get_attr_string.h18
-rw-r--r--numpy/core/src/common/lowlevel_strided_loops.h36
-rw-r--r--numpy/core/src/common/npy_longdouble.c4
-rw-r--r--numpy/core/src/common/ucsnarrow.c22
-rw-r--r--numpy/core/src/multiarray/_datetime.h10
-rw-r--r--numpy/core/src/multiarray/_multiarray_tests.c.src39
-rw-r--r--numpy/core/src/multiarray/alloc.c9
-rw-r--r--numpy/core/src/multiarray/array_assign_array.c18
-rw-r--r--numpy/core/src/multiarray/array_assign_scalar.c10
-rw-r--r--numpy/core/src/multiarray/arrayobject.c70
-rw-r--r--numpy/core/src/multiarray/arraytypes.c.src41
-rw-r--r--numpy/core/src/multiarray/buffer.c70
-rw-r--r--numpy/core/src/multiarray/calculation.c4
-rw-r--r--numpy/core/src/multiarray/common.c62
-rw-r--r--numpy/core/src/multiarray/common.h4
-rw-r--r--numpy/core/src/multiarray/compiled_base.c10
-rw-r--r--numpy/core/src/multiarray/conversion_utils.c35
-rw-r--r--numpy/core/src/multiarray/conversion_utils.h2
-rw-r--r--numpy/core/src/multiarray/convert.c6
-rw-r--r--numpy/core/src/multiarray/ctors.c63
-rw-r--r--numpy/core/src/multiarray/datetime.c34
-rw-r--r--numpy/core/src/multiarray/datetime_busdaycal.c9
-rw-r--r--numpy/core/src/multiarray/datetime_strings.c6
-rw-r--r--numpy/core/src/multiarray/datetime_strings.h2
-rw-r--r--numpy/core/src/multiarray/descriptor.c949
-rw-r--r--numpy/core/src/multiarray/descriptor.h2
-rw-r--r--numpy/core/src/multiarray/dtype_transfer.c18
-rw-r--r--numpy/core/src/multiarray/einsum.c.src34
-rw-r--r--numpy/core/src/multiarray/flagsobject.c55
-rw-r--r--numpy/core/src/multiarray/getset.c46
-rw-r--r--numpy/core/src/multiarray/item_selection.c18
-rw-r--r--numpy/core/src/multiarray/item_selection.h2
-rw-r--r--numpy/core/src/multiarray/iterators.c27
-rw-r--r--numpy/core/src/multiarray/lowlevel_strided_loops.c.src18
-rw-r--r--numpy/core/src/multiarray/mapping.c17
-rw-r--r--numpy/core/src/multiarray/methods.c77
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c102
-rw-r--r--numpy/core/src/multiarray/nditer_constr.c8
-rw-r--r--numpy/core/src/multiarray/nditer_pywrap.c9
-rw-r--r--numpy/core/src/multiarray/npy_buffer.h2
-rw-r--r--numpy/core/src/multiarray/number.c105
-rw-r--r--numpy/core/src/multiarray/scalarapi.c78
-rw-r--r--numpy/core/src/multiarray/scalartypes.c.src376
-rw-r--r--numpy/core/src/multiarray/shape.c2
-rw-r--r--numpy/core/src/multiarray/shape.h2
-rw-r--r--numpy/core/src/multiarray/strfuncs.c33
-rw-r--r--numpy/core/src/multiarray/strfuncs.h5
-rw-r--r--numpy/core/src/multiarray/typeinfo.c14
-rw-r--r--numpy/core/src/umath/_rational_tests.c.src9
-rw-r--r--numpy/core/src/umath/funcs.inc.src8
-rw-r--r--numpy/core/src/umath/loops.c.src2
-rw-r--r--numpy/core/src/umath/reduction.h4
-rw-r--r--numpy/core/src/umath/scalarmath.c.src138
-rw-r--r--numpy/core/src/umath/ufunc_object.c19
-rw-r--r--numpy/core/src/umath/ufunc_type_resolution.c31
-rw-r--r--numpy/core/src/umath/ufunc_type_resolution.h7
-rw-r--r--numpy/core/src/umath/umathmodule.c14
-rw-r--r--numpy/core/tests/_locales.py2
-rw-r--r--numpy/core/tests/test_abc.py2
-rw-r--r--numpy/core/tests/test_arrayprint.py10
-rw-r--r--numpy/core/tests/test_datetime.py4
-rw-r--r--numpy/core/tests/test_defchararray.py16
-rw-r--r--numpy/core/tests/test_deprecations.py8
-rw-r--r--numpy/core/tests/test_dtype.py26
-rw-r--r--numpy/core/tests/test_einsum.py4
-rw-r--r--numpy/core/tests/test_errstate.py2
-rw-r--r--numpy/core/tests/test_function_base.py8
-rw-r--r--numpy/core/tests/test_getlimits.py16
-rw-r--r--numpy/core/tests/test_half.py2
-rw-r--r--numpy/core/tests/test_indexerrors.py2
-rw-r--r--numpy/core/tests/test_indexing.py82
-rw-r--r--numpy/core/tests/test_item_selection.py2
-rw-r--r--numpy/core/tests/test_longdouble.py2
-rw-r--r--numpy/core/tests/test_machar.py2
-rw-r--r--numpy/core/tests/test_mem_overlap.py6
-rw-r--r--numpy/core/tests/test_memmap.py2
-rw-r--r--numpy/core/tests/test_multiarray.py231
-rw-r--r--numpy/core/tests/test_nditer.py2
-rw-r--r--numpy/core/tests/test_numeric.py70
-rw-r--r--numpy/core/tests/test_numerictypes.py26
-rw-r--r--numpy/core/tests/test_overrides.py30
-rw-r--r--numpy/core/tests/test_records.py6
-rw-r--r--numpy/core/tests/test_regression.py12
-rw-r--r--numpy/core/tests/test_scalar_ctors.py4
-rw-r--r--numpy/core/tests/test_scalar_methods.py2
-rw-r--r--numpy/core/tests/test_scalarbuffer.py2
-rw-r--r--numpy/core/tests/test_scalarinherit.py6
-rw-r--r--numpy/core/tests/test_scalarmath.py30
-rw-r--r--numpy/core/tests/test_scalarprint.py2
-rw-r--r--numpy/core/tests/test_shape_base.py16
-rw-r--r--numpy/core/tests/test_ufunc.py30
-rw-r--r--numpy/core/tests/test_umath.py136
-rw-r--r--numpy/core/tests/test_umath_accuracy.py2
-rw-r--r--numpy/core/tests/test_umath_complex.py12
-rw-r--r--numpy/core/tests/test_unicode.py8
-rw-r--r--numpy/distutils/ccompiler.py14
-rw-r--r--numpy/distutils/command/config.py10
-rw-r--r--numpy/distutils/compat.py8
-rw-r--r--numpy/distutils/conv_template.py11
-rw-r--r--numpy/distutils/cpuinfo.py14
-rw-r--r--numpy/distutils/fcompiler/__init__.py12
-rw-r--r--numpy/distutils/fcompiler/compaq.py10
-rw-r--r--numpy/distutils/fcompiler/environment.py2
-rw-r--r--numpy/distutils/fcompiler/gnu.py5
-rw-r--r--numpy/distutils/misc_util.py10
-rw-r--r--numpy/distutils/npy_pkg_config.py4
-rw-r--r--numpy/distutils/system_info.py15
-rw-r--r--numpy/distutils/tests/test_ccompiler.py22
-rw-r--r--numpy/distutils/tests/test_exec_command.py8
-rw-r--r--numpy/distutils/tests/test_fcompiler_gnu.py4
-rw-r--r--numpy/distutils/tests/test_fcompiler_intel.py4
-rw-r--r--numpy/distutils/tests/test_fcompiler_nagfor.py2
-rw-r--r--numpy/distutils/tests/test_misc_util.py8
-rw-r--r--numpy/distutils/tests/test_npy_pkg_config.py4
-rw-r--r--numpy/distutils/tests/test_system_info.py2
-rw-r--r--numpy/distutils/unixccompiler.py9
-rw-r--r--numpy/doc/glossary.py4
-rw-r--r--numpy/doc/subclassing.py6
-rw-r--r--numpy/f2py/auxfuncs.py2
-rw-r--r--numpy/f2py/src/fortranobject.c39
-rw-r--r--numpy/f2py/tests/test_array_from_pyobj.py10
-rw-r--r--numpy/f2py/tests/test_callback.py2
-rw-r--r--numpy/f2py/tests/util.py2
-rw-r--r--numpy/fft/tests/test_helper.py8
-rw-r--r--numpy/fft/tests/test_pocketfft.py6
-rw-r--r--numpy/lib/_datasource.py4
-rw-r--r--numpy/lib/_iotools.py6
-rw-r--r--numpy/lib/arrayterator.py2
-rw-r--r--numpy/lib/function_base.py29
-rw-r--r--numpy/lib/index_tricks.py10
-rw-r--r--numpy/lib/mixins.py2
-rw-r--r--numpy/lib/npyio.py6
-rw-r--r--numpy/lib/polynomial.py2
-rw-r--r--numpy/lib/stride_tricks.py10
-rw-r--r--numpy/lib/tests/test__datasource.py12
-rw-r--r--numpy/lib/tests/test__iotools.py8
-rw-r--r--numpy/lib/tests/test_arraypad.py24
-rw-r--r--numpy/lib/tests/test_arraysetops.py6
-rw-r--r--numpy/lib/tests/test_financial.py2
-rw-r--r--numpy/lib/tests/test_function_base.py90
-rw-r--r--numpy/lib/tests/test_histograms.py6
-rw-r--r--numpy/lib/tests/test_index_tricks.py16
-rw-r--r--numpy/lib/tests/test_io.py16
-rw-r--r--numpy/lib/tests/test_mixins.py4
-rw-r--r--numpy/lib/tests/test_nanfunctions.py14
-rw-r--r--numpy/lib/tests/test_polynomial.py2
-rw-r--r--numpy/lib/tests/test_recfunctions.py16
-rw-r--r--numpy/lib/tests/test_regression.py2
-rw-r--r--numpy/lib/tests/test_shape_base.py32
-rw-r--r--numpy/lib/tests/test_stride_tricks.py9
-rw-r--r--numpy/lib/tests/test_twodim_base.py20
-rw-r--r--numpy/lib/tests/test_type_check.py36
-rw-r--r--numpy/lib/tests/test_ufunclike.py2
-rw-r--r--numpy/lib/tests/test_utils.py2
-rw-r--r--numpy/lib/user_array.py2
-rw-r--r--numpy/lib/utils.py2
-rw-r--r--numpy/linalg/lapack_lite/clapack_scrub.py2
-rw-r--r--numpy/linalg/lapack_lite/fortran.py4
-rwxr-xr-xnumpy/linalg/lapack_lite/make_lite.py4
-rw-r--r--numpy/linalg/tests/test_build.py4
-rw-r--r--numpy/linalg/tests/test_linalg.py26
-rw-r--r--numpy/linalg/tests/test_regression.py2
-rw-r--r--numpy/ma/core.py20
-rw-r--r--numpy/ma/extras.py2
-rw-r--r--numpy/ma/mrecords.py2
-rw-r--r--numpy/ma/tests/test_core.py36
-rw-r--r--numpy/ma/tests/test_deprecations.py4
-rw-r--r--numpy/ma/tests/test_extras.py28
-rw-r--r--numpy/ma/tests/test_mrecords.py6
-rw-r--r--numpy/ma/tests/test_old_ma.py6
-rw-r--r--numpy/ma/tests/test_regression.py2
-rw-r--r--numpy/ma/tests/test_subclassing.py4
-rw-r--r--numpy/ma/timer_comparison.py2
-rw-r--r--numpy/matrixlib/tests/test_defmatrix.py18
-rw-r--r--numpy/matrixlib/tests/test_interaction.py2
-rw-r--r--numpy/matrixlib/tests/test_masked_matrix.py6
-rw-r--r--numpy/matrixlib/tests/test_multiarray.py2
-rw-r--r--numpy/matrixlib/tests/test_numeric.py2
-rw-r--r--numpy/matrixlib/tests/test_regression.py2
-rw-r--r--numpy/polynomial/polyutils.py2
-rw-r--r--numpy/polynomial/tests/test_chebyshev.py24
-rw-r--r--numpy/polynomial/tests/test_classes.py4
-rw-r--r--numpy/polynomial/tests/test_hermite.py20
-rw-r--r--numpy/polynomial/tests/test_hermite_e.py20
-rw-r--r--numpy/polynomial/tests/test_laguerre.py20
-rw-r--r--numpy/polynomial/tests/test_legendre.py20
-rw-r--r--numpy/polynomial/tests/test_polynomial.py16
-rw-r--r--numpy/polynomial/tests/test_polyutils.py4
-rw-r--r--numpy/polynomial/tests/test_printing.py4
-rw-r--r--numpy/random/tests/test_direct.py4
-rw-r--r--numpy/random/tests/test_generator_mt19937.py20
-rw-r--r--numpy/random/tests/test_generator_mt19937_regressions.py4
-rw-r--r--numpy/random/tests/test_random.py18
-rw-r--r--numpy/random/tests/test_randomstate.py18
-rw-r--r--numpy/random/tests/test_randomstate_regression.py4
-rw-r--r--numpy/random/tests/test_regression.py4
-rw-r--r--numpy/random/tests/test_smoke.py2
-rw-r--r--numpy/testing/_private/noseclasses.py2
-rw-r--r--numpy/testing/_private/nosetester.py7
-rw-r--r--numpy/testing/_private/parameterized.py4
-rw-r--r--numpy/testing/_private/utils.py5
-rwxr-xr-xnumpy/testing/print_coercion_tables.py2
-rw-r--r--numpy/testing/tests/test_decorators.py2
-rw-r--r--numpy/testing/tests/test_utils.py28
-rw-r--r--numpy/tests/test_ctypeslib.py10
-rw-r--r--numpy/tests/test_public_api.py1
-rw-r--r--pytest.ini3
-rwxr-xr-xsetup.py42
-rw-r--r--tools/allocation_tracking/alloc_hook.pyx2
-rw-r--r--tools/allocation_tracking/track_allocations.py2
-rw-r--r--tools/npy_tempita/__init__.py12
-rw-r--r--tools/npy_tempita/_looper.py6
-rw-r--r--tools/refguide_check.py2
254 files changed, 2482 insertions, 3548 deletions
diff --git a/benchmarks/benchmarks/bench_array_coercion.py b/benchmarks/benchmarks/bench_array_coercion.py
new file mode 100644
index 000000000..2bae4c002
--- /dev/null
+++ b/benchmarks/benchmarks/bench_array_coercion.py
@@ -0,0 +1,57 @@
+from __future__ import absolute_import, division, print_function
+
+from .common import Benchmark
+
+import numpy as np
+
+
+class ArrayCoercionSmall(Benchmark):
+ # More detailed benchmarks for array coercion,
+ # some basic benchmarks are in `bench_core.py`.
+ params = [[range(3), [1], 1, np.array([5], dtype=np.int64), np.int64(5)]]
+ param_names = ['array_like']
+ int64 = np.dtype(np.int64)
+
+ def time_array_invalid_kwarg(self, array_like):
+ try:
+ np.array(array_like, ndmin="not-integer")
+ except TypeError:
+ pass
+
+ def time_array(self, array_like):
+ np.array(array_like)
+
+ def time_array_dtype_not_kwargs(self, array_like):
+ np.array(array_like, self.int64)
+
+ def time_array_no_copy(self, array_like):
+ np.array(array_like, copy=False)
+
+ def time_array_subok(self, array_like):
+ np.array(array_like, subok=True)
+
+ def time_array_all_kwargs(self, array_like):
+ np.array(array_like, dtype=self.int64, copy=False, order="F",
+ subok=False, ndmin=2)
+
+ def time_asarray(self, array_like):
+ np.asarray(array_like)
+
+ def time_asarray_dtype(self, array_like):
+ np.array(array_like, dtype=self.int64)
+
+ def time_asarray_dtype(self, array_like):
+ np.array(array_like, dtype=self.int64, order="F")
+
+ def time_asanyarray(self, array_like):
+ np.asarray(array_like)
+
+ def time_asanyarray_dtype(self, array_like):
+ np.array(array_like, dtype=self.int64)
+
+ def time_asanyarray_dtype(self, array_like):
+ np.array(array_like, dtype=self.int64, order="F")
+
+ def time_ascontiguousarray(self, array_like):
+ np.ascontiguousarray(array_like)
+
diff --git a/benchmarks/benchmarks/bench_function_base.py b/benchmarks/benchmarks/bench_function_base.py
index 3b4647773..b1e592749 100644
--- a/benchmarks/benchmarks/bench_function_base.py
+++ b/benchmarks/benchmarks/bench_function_base.py
@@ -104,7 +104,7 @@ def memoize(f):
return f
-class SortGenerator(object):
+class SortGenerator:
# The size of the unsorted area in the "random unsorted area"
# benchmarks
AREA_SIZE = 100
diff --git a/benchmarks/benchmarks/bench_itemselection.py b/benchmarks/benchmarks/bench_itemselection.py
new file mode 100644
index 000000000..27fc49e30
--- /dev/null
+++ b/benchmarks/benchmarks/bench_itemselection.py
@@ -0,0 +1,45 @@
+from __future__ import absolute_import, division, print_function
+
+from .common import Benchmark, TYPES1
+
+import numpy as np
+
+
+class Take(Benchmark):
+ params = [
+ [(1000, 1), (1000, 2), (2, 1000, 1), (1000, 3)],
+ ["raise", "wrap", "clip"],
+ TYPES1]
+ param_names = ["shape", "mode", "dtype"]
+
+ def setup(self, shape, mode, dtype):
+ self.arr = np.ones(shape, dtype)
+ self.indices = np.arange(1000)
+
+ def time_contiguous(self, shape, mode, dtype):
+ self.arr.take(self.indices, axis=-2, mode=mode)
+
+
+class PutMask(Benchmark):
+ params = [
+ [True, False],
+ TYPES1]
+ param_names = ["values_is_scalar", "dtype"]
+
+ def setup(self, values_is_scalar, dtype):
+ if values_is_scalar:
+ self.vals = np.array(1., dtype=dtype)
+ else:
+ self.vals = np.ones(1000, dtype=dtype)
+
+ self.arr = np.ones(1000, dtype=dtype)
+
+ self.dense_mask = np.ones(1000, dtype="bool")
+ self.sparse_mask = np.zeros(1000, dtype="bool")
+
+ def time_dense(self, values_is_scalar, dtype):
+ np.putmask(self.arr, self.dense_mask, self.vals)
+
+ def time_sparse(self, values_is_scalar, dtype):
+ np.putmask(self.arr, self.sparse_mask, self.vals)
+
diff --git a/benchmarks/benchmarks/bench_overrides.py b/benchmarks/benchmarks/bench_overrides.py
index f03120efa..e44951785 100644
--- a/benchmarks/benchmarks/bench_overrides.py
+++ b/benchmarks/benchmarks/bench_overrides.py
@@ -33,7 +33,7 @@ def mock_concatenate(arrays, axis=0, out=None):
pass
-class DuckArray(object):
+class DuckArray:
def __array_function__(self, func, types, args, kwargs):
pass
diff --git a/benchmarks/benchmarks/bench_ufunc.py b/benchmarks/benchmarks/bench_ufunc.py
index 73159bd97..9f45a7257 100644
--- a/benchmarks/benchmarks/bench_ufunc.py
+++ b/benchmarks/benchmarks/bench_ufunc.py
@@ -150,7 +150,7 @@ class Scalar(Benchmark):
(self.y + self.z)
-class ArgPack(object):
+class ArgPack:
__slots__ = ['args', 'kwargs']
def __init__(self, *args, **kwargs):
self.args = args
diff --git a/benchmarks/benchmarks/common.py b/benchmarks/benchmarks/common.py
index c6037dea9..3fd81a164 100644
--- a/benchmarks/benchmarks/common.py
+++ b/benchmarks/benchmarks/common.py
@@ -110,5 +110,5 @@ def get_indexes_rand_():
return indexes_rand_
-class Benchmark(object):
+class Benchmark:
goal_time = 0.25
diff --git a/doc/Py3K.rst.txt b/doc/Py3K.rst.txt
index fe2dd13c4..cde0394dd 100644
--- a/doc/Py3K.rst.txt
+++ b/doc/Py3K.rst.txt
@@ -225,8 +225,6 @@ A #define in config.h, defined when building for Py3.
Currently, this is generated as a part of the config.
Is this sensible (we could also use Py_VERSION_HEX)?
- This is being cleaned up in the C code.
-
private/npy_3kcompat.h
----------------------
diff --git a/doc/TESTS.rst.txt b/doc/TESTS.rst.txt
index 14cb28df8..9023c7100 100644
--- a/doc/TESTS.rst.txt
+++ b/doc/TESTS.rst.txt
@@ -119,7 +119,7 @@ that makes it hard to identify the test from the output of running the test
suite with ``verbose=2`` (or similar verbosity setting). Use plain comments
(``#``) if necessary.
-Labeling tests
+Labeling tests
--------------
As an alternative to ``pytest.mark.<label>``, there are a number of labels you
@@ -174,7 +174,7 @@ name; thus::
print 'doing teardown'
- class TestMe(object):
+ class TestMe:
def setup():
"""Class-level setup"""
print 'doing setup'
diff --git a/doc/changelog/1.18.1-changelog.rst b/doc/changelog/1.18.1-changelog.rst
new file mode 100644
index 000000000..d3df29198
--- /dev/null
+++ b/doc/changelog/1.18.1-changelog.rst
@@ -0,0 +1,33 @@
+
+Contributors
+============
+
+A total of 7 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Charles Harris
+* Matti Picus
+* Maxwell Aladago
+* Pauli Virtanen
+* Ralf Gommers
+* Tyler Reddy
+* Warren Weckesser
+
+Pull requests merged
+====================
+
+A total of 13 pull requests were merged for this release.
+
+* `#15158 <https://github.com/numpy/numpy/pull/15158>`__: MAINT: Update pavement.py for towncrier.
+* `#15159 <https://github.com/numpy/numpy/pull/15159>`__: DOC: add moved modules to 1.18 release note
+* `#15161 <https://github.com/numpy/numpy/pull/15161>`__: MAINT, DOC: Minor backports and updates for 1.18.x
+* `#15176 <https://github.com/numpy/numpy/pull/15176>`__: TST: Add assert_array_equal test for big integer arrays
+* `#15184 <https://github.com/numpy/numpy/pull/15184>`__: BUG: use tmp dir and check version for cython test (#15170)
+* `#15220 <https://github.com/numpy/numpy/pull/15220>`__: BUG: distutils: fix msvc+gfortran openblas handling corner case
+* `#15221 <https://github.com/numpy/numpy/pull/15221>`__: BUG: remove -std=c99 for c++ compilation (#15194)
+* `#15222 <https://github.com/numpy/numpy/pull/15222>`__: MAINT: unskip test on win32
+* `#15223 <https://github.com/numpy/numpy/pull/15223>`__: TST: add BLAS ILP64 run in Travis & Azure
+* `#15245 <https://github.com/numpy/numpy/pull/15245>`__: MAINT: only add --std=c99 where needed
+* `#15246 <https://github.com/numpy/numpy/pull/15246>`__: BUG: lib: Fix handling of integer arrays by gradient.
+* `#15247 <https://github.com/numpy/numpy/pull/15247>`__: MAINT: Do not use private Python function in testing
+* `#15250 <https://github.com/numpy/numpy/pull/15250>`__: REL: Prepare for the NumPy 1.18.1 release.
diff --git a/doc/neps/nep-0037-array-module.rst b/doc/neps/nep-0037-array-module.rst
new file mode 100644
index 000000000..387356490
--- /dev/null
+++ b/doc/neps/nep-0037-array-module.rst
@@ -0,0 +1,550 @@
+===================================================
+NEP 37 — A dispatch protocol for NumPy-like modules
+===================================================
+
+:Author: Stephan Hoyer <shoyer@google.com>
+:Author: Hameer Abbasi
+:Author: Sebastian Berg
+:Status: Draft
+:Type: Standards Track
+:Created: 2019-12-29
+
+Abstract
+--------
+
+NEP-18's ``__array_function__`` has been a mixed success. Some projects (e.g.,
+dask, CuPy, xarray, sparse, Pint) have enthusiastically adopted it. Others
+(e.g., PyTorch, JAX, SciPy) have been more reluctant. Here we propose a new
+protocol, ``__array_module__``, that we expect could eventually subsume most
+use-cases for ``__array_function__``. The protocol requires explicit adoption
+by both users and library authors, which ensures backwards compatibility, and
+is also significantly simpler than ``__array_function__``, both of which we
+expect will make it easier to adopt.
+
+Why ``__array_function__`` hasn't been enough
+---------------------------------------------
+
+There are two broad ways in which NEP-18 has fallen short of its goals:
+
+1. **Maintainability concerns**. `__array_function__` has significant
+ implications for libraries that use it:
+
+ - Projects like `PyTorch
+ <https://github.com/pytorch/pytorch/issues/22402>`_, `JAX
+ <https://github.com/google/jax/issues/1565>`_ and even `scipy.sparse
+ <https://github.com/scipy/scipy/issues/10362>`_ have been reluctant to
+ implement `__array_function__` in part because they are concerned about
+ **breaking existing code**: users expect NumPy functions like
+ ``np.concatenate`` to return NumPy arrays. This is a fundamental
+ limitation of the ``__array_function__`` design, which we chose to allow
+ overriding the existing ``numpy`` namespace.
+ - ``__array_function__`` currently requires an "all or nothing" approach to
+ implementing NumPy's API. There is no good pathway for **incremental
+ adoption**, which is particularly problematic for established projects
+ for which adopting ``__array_function__`` would result in breaking
+ changes.
+ - It is no longer possible to use **aliases to NumPy functions** within
+ modules that support overrides. For example, both CuPy and JAX set
+ ``result_type = np.result_type``.
+ - Implementing **fall-back mechanisms** for unimplemented NumPy functions
+ by using NumPy's implementation is hard to get right (but see the
+ `version from dask <https://github.com/dask/dask/pull/5043>`_), because
+ ``__array_function__`` does not present a consistent interface.
+ Converting all arguments of array type requires recursing into generic
+ arguments of the form ``*args, **kwargs``.
+
+2. **Limitations on what can be overridden.** ``__array_function__`` has some
+ important gaps, most notably array creation and coercion functions:
+
+ - **Array creation** routines (e.g., ``np.arange`` and those in
+ ``np.random``) need some other mechanism for indicating what type of
+ arrays to create. `NEP 36 <https://github.com/numpy/numpy/pull/14715>`_
+ proposed adding optional ``like=`` arguments to functions without
+ existing array arguments. However, we still lack any mechanism to
+ override methods on objects, such as those needed by
+ ``np.random.RandomState``.
+ - **Array conversion** can't reuse the existing coercion functions like
+ ``np.asarray``, because ``np.asarray`` sometimes means "convert to an
+ exact ``np.ndarray``" and other times means "convert to something _like_
+ a NumPy array." This led to the `NEP 30
+ <https://numpy.org/neps/nep-0030-duck-array-protocol.html>`_ proposal for
+ a separate ``np.duckarray`` function, but this still does not resolve how
+ to cast one duck array into a type matching another duck array.
+
+``get_array_module`` and the ``__array_module__`` protocol
+----------------------------------------------------------
+
+We propose a new user-facing mechanism for dispatching to a duck-array
+implementation, ``numpy.get_array_module``. ``get_array_module`` performs the
+same type resolution as ``__array_function__`` and returns a module with an API
+promised to match the standard interface of ``numpy`` that can implement
+operations on all provided array types.
+
+The protocol itself is both simpler and more powerful than
+``__array_function__``, because it doesn't need to worry about actually
+implementing functions. We believe it resolves most of the maintainability and
+functionality limitations of ``__array_function__``.
+
+The new protocol is opt-in, explicit and with local control; see
+:ref:`appendix-design-choices` for discussion on the importance of these design
+features.
+
+The array module contract
+=========================
+
+Modules returned by ``get_array_module``/``__array_module__`` should make a
+best effort to implement NumPy's core functionality on new array types(s).
+Unimplemented functionality should simply be omitted (e.g., accessing an
+unimplemented function should raise ``AttributeError``). In the future, we
+anticipate codifying a protocol for requesting restricted subsets of ``numpy``;
+see :ref:`requesting-restricted-subsets` for more details.
+
+How to use ``get_array_module``
+===============================
+
+Code that wants to support generic duck arrays should explicitly call
+``get_array_module`` to determine an appropriate array module from which to
+call functions, rather than using the ``numpy`` namespace directly. For
+example:
+
+.. code:: python
+
+ # calls the appropriate version of np.something for x and y
+ module = np.get_array_module(x, y)
+ module.something(x, y)
+
+Both array creation and array conversion are supported, because dispatching is
+handled by ``get_array_module`` rather than via the types of function
+arguments. For example, to use random number generation functions or methods,
+we can simply pull out the appropriate submodule:
+
+.. code:: python
+
+ def duckarray_add_random(array):
+ module = np.get_array_module(array)
+ noise = module.random.randn(*array.shape)
+ return array + noise
+
+We can also write the duck-array ``stack`` function from `NEP 30
+<https://numpy.org/neps/nep-0030-duck-array-protocol.html>`_, without the need
+for a new ``np.duckarray`` function:
+
+.. code:: python
+
+ def duckarray_stack(arrays):
+ module = np.get_array_module(*arrays)
+ arrays = [module.asarray(arr) for arr in arrays]
+ shapes = {arr.shape for arr in arrays}
+ if len(shapes) != 1:
+ raise ValueError('all input arrays must have the same shape')
+ expanded_arrays = [arr[module.newaxis, ...] for arr in arrays]
+ return module.concatenate(expanded_arrays, axis=0)
+
+By default, ``get_array_module`` will return the ``numpy`` module if no
+arguments are arrays. This fall-back can be explicitly controlled by providing
+the ``module`` keyword-only argument. It is also possible to indicate that an
+exception should be raised instead of returning a default array module by
+setting ``module=None``.
+
+How to implement ``__array_module__``
+=====================================
+
+Libraries implementing a duck array type that want to support
+``get_array_module`` need to implement the corresponding protocol,
+``__array_module__``. This new protocol is based on Python's dispatch protocol
+for arithmetic, and is essentially a simpler version of ``__array_function__``.
+
+Only one argument is passed into ``__array_module__``, a Python collection of
+unique array types passed into ``get_array_module``, i.e., all arguments with
+an ``__array_module__`` attribute.
+
+The special method should either return an namespace with an API matching
+``numpy``, or ``NotImplemented``, indicating that it does not know how to
+handle the operation:
+
+.. code:: python
+
+ class MyArray:
+ def __array_module__(self, types):
+ if not all(issubclass(t, MyArray) for t in types):
+ return NotImplemented
+ return my_array_module
+
+Returning custom objects from ``__array_module__``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+``my_array_module`` will typically, but need not always, be a Python module.
+Returning a custom objects (e.g., with functions implemented via
+``__getattr__``) may be useful for some advanced use cases.
+
+For example, custom objects could allow for partial implementations of duck
+array modules that fall-back to NumPy (although this is not recommended in
+general because such fall-back behavior can be error prone):
+
+.. code:: python
+
+ class MyArray:
+ def __array_module__(self, types):
+ if all(issubclass(t, MyArray) for t in types):
+ return ArrayModule()
+ else:
+ return NotImplemented
+
+ class ArrayModule:
+ def __getattr__(self, name):
+ import base_module
+ return getattr(base_module, name, getattr(numpy, name))
+
+Subclassing from ``numpy.ndarray``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+All of the same guidance about well-defined type casting hierarchies from
+NEP-18 still applies. ``numpy.ndarray`` itself contains a matching
+implementation of ``__array_module__``, which is convenient for subclasses:
+
+.. code:: python
+
+ class ndarray:
+ def __array_module__(self, types):
+ if all(issubclass(t, ndarray) for t in types):
+ return numpy
+ else:
+ return NotImplemented
+
+NumPy's internal machinery
+==========================
+
+The type resolution rules of ``get_array_module`` follow the same model as
+Python and NumPy's existing dispatch protocols: subclasses are called before
+super-classes, and otherwise left to right. ``__array_module__`` is guaranteed
+to be called only a single time on each unique type.
+
+The actual implementation of `get_array_module` will be in C, but should be
+equivalent to this Python code:
+
+.. code:: python
+
+ def get_array_module(*arrays, default=numpy):
+ implementing_arrays, types = _implementing_arrays_and_types(arrays)
+ if not implementing_arrays and default is not None:
+ return default
+ for array in implementing_arrays:
+ module = array.__array_module__(types)
+ if module is not NotImplemented:
+ return module
+ raise TypeError("no common array module found")
+
+ def _implementing_arrays_and_types(relevant_arrays):
+ types = []
+ implementing_arrays = []
+ for array in relevant_arrays:
+ t = type(array)
+ if t not in types and hasattr(t, '__array_module__'):
+ types.append(t)
+ # Subclasses before superclasses, otherwise left to right
+ index = len(implementing_arrays)
+ for i, old_array in enumerate(implementing_arrays):
+ if issubclass(t, type(old_array)):
+ index = i
+ break
+ implementing_arrays.insert(index, array)
+ return implementing_arrays, types
+
+Relationship with ``__array_ufunc__`` and ``__array_function__``
+----------------------------------------------------------------
+
+These older protocols have distinct use-cases and should remain
+===============================================================
+
+``__array_module__`` is intended to resolve limitations of
+``__array_function__``, so it is natural to consider whether it could entirely
+replace ``__array_function__``. This would offer dual benefits: (1) simplifying
+the user-story about how to override NumPy and (2) removing the slowdown
+associated with checking for dispatch when calling every NumPy function.
+
+However, ``__array_module__`` and ``__array_function__`` are pretty different
+from a user perspective: it requires explicit calls to ``get_array_function``,
+rather than simply reusing original ``numpy`` functions. This is probably fine
+for *libraries* that rely on duck-arrays, but may be frustratingly verbose for
+interactive use.
+
+Some of the dispatching use-cases for ``__array_ufunc__`` are also solved by
+``__array_module__``, but not all of them. For example, it is still useful to
+be able to define non-NumPy ufuncs (e.g., from Numba or SciPy) in a generic way
+on non-NumPy arrays (e.g., with dask.array).
+
+Given their existing adoption and distinct use cases, we don't think it makes
+sense to remove or deprecate ``__array_function__`` and ``__array_ufunc__`` at
+this time.
+
+Mixin classes to implement ``__array_function__`` and ``__array_ufunc__``
+=========================================================================
+
+Despite the user-facing differences, ``__array_module__`` and a module
+implementing NumPy's API still contain sufficient functionality needed to
+implement dispatching with the existing duck array protocols.
+
+For example, the following mixin classes would provide sensible defaults for
+these special methods in terms of ``get_array_module`` and
+``__array_module__``:
+
+.. code:: python
+
+ class ArrayUfuncFromModuleMixin:
+
+ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+ arrays = inputs + kwargs.get('out', ())
+ try:
+ array_module = np.get_array_module(*arrays)
+ except TypeError:
+ return NotImplemented
+
+ try:
+ # Note this may have false positive matches, if ufunc.__name__
+ # matches the name of a ufunc defined by NumPy. Unfortunately
+ # there is no way to determine in which module a ufunc was
+ # defined.
+ new_ufunc = getattr(array_module, ufunc.__name__)
+ except AttributeError:
+ return NotImplemented
+
+ try:
+ callable = getattr(new_ufunc, method)
+ except AttributeError:
+ return NotImplemented
+
+ return callable(*inputs, **kwargs)
+
+ class ArrayFunctionFromModuleMixin:
+
+ def __array_function__(self, func, types, args, kwargs):
+ array_module = self.__array_module__(types)
+ if array_module is NotImplemented:
+ return NotImplemented
+
+ # Traverse submodules to find the appropriate function
+ modules = func.__module__.split('.')
+ assert modules[0] == 'numpy'
+ for submodule in modules[1:]:
+ module = getattr(module, submodule, None)
+ new_func = getattr(module, func.__name__, None)
+ if new_func is None:
+ return NotImplemented
+
+ return new_func(*args, **kwargs)
+
+To make it easier to write duck arrays, we could also add these mixin classes
+into ``numpy.lib.mixins`` (but the examples above may suffice).
+
+Alternatives considered
+-----------------------
+
+Naming
+======
+
+We like the name ``__array_module__`` because it mirrors the existing
+``__array_function__`` and ``__array_ufunc__`` protocols. Another reasonable
+choice could be ``__array_namespace__``.
+
+It is less clear what the NumPy function that calls this protocol should be
+called (``get_array_module`` in this proposal). Some possible alternatives:
+``array_module``, ``common_array_module``, ``resolve_array_module``,
+``get_namespace``, ``get_numpy``, ``get_numpylike_module``,
+``get_duck_array_module``.
+
+.. _requesting-restricted-subsets:
+
+Requesting restricted subsets of NumPy's API
+============================================
+
+Over time, NumPy has accumulated a very large API surface, with over 600
+attributes in the top level ``numpy`` module alone. It is unlikely that any
+duck array library could or would want to implement all of these functions and
+classes, because the frequently used subset of NumPy is much smaller.
+
+We think it would be useful exercise to define "minimal" subset(s) of NumPy's
+API, omitting rarely used or non-recommended functionality. For example,
+minimal NumPy might include ``stack``, but not the other stacking functions
+``column_stack``, ``dstack``, ``hstack`` and ``vstack``. This could clearly
+indicate to duck array authors and users want functionality is core and what
+functionality they can skip.
+
+Support for requesting a restricted subset of NumPy's API would be a natural
+feature to include in ``get_array_function`` and ``__array_module__``, e.g.,
+
+.. code:: python
+
+ # array_module is only guaranteed to contain "minimal" NumPy
+ array_module = np.get_array_module(*arrays, request='minimal')
+
+To facilitate testing with NumPy and use with any valid duck array library,
+NumPy itself would return restricted versions of the ``numpy`` module when
+``get_array_module`` is called only on NumPy arrays. Omitted functions would
+simply not exist.
+
+Unfortunately, we have not yet figured out what these restricted subsets should
+be, so it doesn't make sense to do this yet. When/if we do, we could either add
+new keyword arguments to ``get_array_module`` or add new top level functions,
+e.g., ``get_minimal_array_module``. We would also need to add either a new
+protocol patterned off of ``__array_module__`` (e.g.,
+``__array_module_minimal__``), or could add an optional second argument to
+``__array_module__`` (catching errors with ``try``/``except``).
+
+A new namespace for implicit dispatch
+=====================================
+
+Instead of supporting overrides in the main `numpy` namespace with
+``__array_function__``, we could create a new opt-in namespace, e.g.,
+``numpy.api``, with versions of NumPy functions that support dispatching. These
+overrides would need new opt-in protocols, e.g., ``__array_function_api__``
+patterned off of ``__array_function__``.
+
+This would resolve the biggest limitations of ``__array_function__`` by being
+opt-in and would also allow for unambiguously overriding functions like
+``asarray``, because ``np.api.asarray`` would always mean "convert an
+array-like object." But it wouldn't solve all the dispatching needs met by
+``__array_module__``, and would leave us with supporting a considerably more
+complex protocol both for array users and implementors.
+
+We could potentially implement such a new namespace *via* the
+``__array_module__`` protocol. Certainly some users would find this convenient,
+because it is slightly less boilerplate. But this would leave users with a
+confusing choice: when should they use `get_array_module` vs.
+`np.api.something`. Also, we would have to add and maintain a whole new module,
+which is considerably more expensive than merely adding a function.
+
+Dispatching on both types and arrays instead of only types
+==========================================================
+
+Instead of supporting dispatch only via unique array types, we could also
+support dispatch via array objects, e.g., by passing an ``arrays`` argument as
+part of the ``__array_module__`` protocol. This could potentially be useful for
+dispatch for arrays with metadata, such provided by Dask and Pint, but would
+impose costs in terms of type safety and complexity.
+
+For example, a library that supports arrays on both CPUs and GPUs might decide
+on which device to create a new arrays from functions like ``ones`` based on
+input arguments:
+
+.. code:: python
+
+ class Array:
+ def __array_module__(self, types, arrays):
+ useful_arrays = tuple(a in arrays if isinstance(a, Array))
+ if not useful_arrays:
+ return NotImplemented
+ prefer_gpu = any(a.prefer_gpu for a in useful_arrays)
+ return ArrayModule(prefer_gpu)
+
+ class ArrayModule:
+ def __init__(self, prefer_gpu):
+ self.prefer_gpu = prefer_gpu
+
+ def __getattr__(self, name):
+ import base_module
+ base_func = getattr(base_module, name)
+ return functools.partial(base_func, prefer_gpu=self.prefer_gpu)
+
+This might be useful, but it's not clear if we really need it. Pint seems to
+get along OK without any explicit array creation routines (favoring
+multiplication by units, e.g., ``np.ones(5) * ureg.m``), and for the most part
+Dask is also OK with existing ``__array_function__`` style overides (e.g.,
+favoring ``np.ones_like`` over ``np.ones``). Choosing whether to place an array
+on the CPU or GPU could be solved by `making array creation lazy
+<https://github.com/google/jax/pull/1668>`_.
+
+.. _appendix-design-choices:
+
+Appendix: design choices for API overrides
+------------------------------------------
+
+There is a large range of possible design choices for overriding NumPy's API.
+Here we discuss three major axes of the design decision that guided our design
+for ``__array_module__``.
+
+Opt-in vs. opt-out for users
+============================
+
+The ``__array_ufunc__`` and ``__array_function__`` protocols provide a
+mechanism for overriding NumPy functions *within NumPy's existing namespace*.
+This means that users need to explicitly opt-out if they do not want any
+overridden behavior, e.g., by casting arrays with ``np.asarray()``.
+
+In theory, this approach lowers the barrier for adopting these protocols in
+user code and libraries, because code that uses the standard NumPy namespace is
+automatically compatible. But in practice, this hasn't worked out. For example,
+most well-maintained libraries that use NumPy follow the best practice of
+casting all inputs with ``np.asarray()``, which they would have to explicitly
+relax to use ``__array_function__``. Our experience has been that making a
+library compatible with a new duck array type typically requires at least a
+small amount of work to accommodate differences in the data model and operations
+that can be implemented efficiently.
+
+These opt-out approaches also considerably complicate backwards compatibility
+for libraries that adopt these protocols, because by opting in as a library
+they also opt-in their users, whether they expect it or not. For winning over
+libraries that have been unable to adopt ``__array_function__``, an opt-in
+approach seems like a must.
+
+Explicit vs. implicit choice of implementation
+==============================================
+
+Both ``__array_ufunc__`` and ``__array_function__`` have implicit control over
+dispatching: the dispatched functions are determined via the appropriate
+protocols in every function call. This generalizes well to handling many
+different types of objects, as evidenced by its use for implementing arithmetic
+operators in Python, but it has two downsides:
+
+1. *Speed*: it imposes additional overhead in every function call, because each
+ function call needs to inspect each of its arguments for overrides. This is
+ why arithmetic on builtin Python numbers is slow.
+2. *Readability*: it is not longer immediately evident to readers of code what
+ happens when a function is called, because the function's implementation
+ could be overridden by any of its arguments.
+
+In contrast, importing a new library (e.g., ``import dask.array as da``) with
+an API matching NumPy is entirely explicit. There is no overhead from dispatch
+or ambiguity about which implementation is being used.
+
+Explicit and implicit choice of implementations are not mutually exclusive
+options. Indeed, most implementations of NumPy API overrides via
+``__array_function__`` that we are familiar with (namely, dask, CuPy and
+sparse, but not Pint) also include an explicit way to use their version of
+NumPy's API by importing a module directly (``dask.array``, ``cupy`` or
+``sparse``, respectively).
+
+Local vs. non-local vs. global control
+======================================
+
+The final design axis is how users control the choice of API:
+
+- **Local control**, as exemplified by multiple dispatch and Python protocols for
+ arithmetic, determines which implementation to use either by checking types
+ or calling methods on the direct arguments of a function.
+- **Non-local control** such as `np.errstate
+ <https://docs.scipy.org/doc/numpy/reference/generated/numpy.errstate.html>`_
+ overrides behavior with global-state via function decorators or
+ context-managers. Control is determined hierarchically, via the inner-most
+ context.
+- **Global control** provides a mechanism for users to set default behavior,
+ either via function calls or configuration files. For example, matplotlib
+ allows setting a global choice of plotting backend.
+
+Local control is generally considered a best practice for API design, because
+control flow is entirely explicit, which makes it the easiest to understand.
+Non-local and global control are occasionally used, but generally either due to
+ignorance or a lack of better alternatives.
+
+In the case of duck typing for NumPy's public API, we think non-local or global
+control would be mistakes, mostly because they **don't compose well**. If one
+library sets/needs one set of overrides and then internally calls a routine
+that expects another set of overrides, the resulting behavior may be very
+surprising. Higher order functions are especially problematic, because the
+context in which functions are evaluated may not be the context in which they
+are defined.
+
+One class of override use cases where we think non-local and global control are
+appropriate is for choosing a backend system that is guaranteed to have an
+entirely consistent interface, such as a faster alternative implementation of
+``numpy.fft`` on NumPy arrays. However, these are out of scope for the current
+proposal, which is focused on duck arrays.
diff --git a/doc/release/upcoming_changes/15233.highlight.rst b/doc/release/upcoming_changes/15233.highlight.rst
new file mode 100644
index 000000000..df96ee871
--- /dev/null
+++ b/doc/release/upcoming_changes/15233.highlight.rst
@@ -0,0 +1,4 @@
+* Code compatibility with Python versions < 3.5 (including Python 2) was
+ dropped from both the python and C code. The shims in numpy.compat will
+ remain to support third-party packages, but they may be deprecated in a
+ future release.
diff --git a/doc/release/upcoming_changes/15251.c_api.rst b/doc/release/upcoming_changes/15251.c_api.rst
new file mode 100644
index 000000000..f391c904b
--- /dev/null
+++ b/doc/release/upcoming_changes/15251.c_api.rst
@@ -0,0 +1,10 @@
+Better support for ``const`` dimensions in API functions
+--------------------------------------------------------
+The following functions now accept a constant array of ``npy_intp``:
+
+* `PyArray_BroadcastToShape`
+* `PyArray_IntTupleFromIntp`
+* `PyArray_OverflowMultiplyList`
+
+Previously the caller would have to cast away the const-ness to call these
+functions.
diff --git a/doc/release/upcoming_changes/15255.compatibility.rst b/doc/release/upcoming_changes/15255.compatibility.rst
new file mode 100644
index 000000000..e360eeeb3
--- /dev/null
+++ b/doc/release/upcoming_changes/15255.compatibility.rst
@@ -0,0 +1,12 @@
+``numpy.distutils.compat`` has been removed
+-------------------------------------------
+This module contained only the function ``get_exception()``, which was used as::
+
+ try:
+ ...
+ except Exception:
+ e = get_exception()
+
+Its purpose was to handle the change in syntax introduced in Python 2.6, from
+``except Exception, e:`` to ``except Exception as e:``, meaning it was only
+necessary for codebases supporting Python 2.5 and older.
diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst
index c910efa60..2eaf3a27a 100644
--- a/doc/source/reference/c-api/array.rst
+++ b/doc/source/reference/c-api/array.rst
@@ -2514,7 +2514,7 @@ this useful approach to looping over an array.
stride and that axis will be used.
.. c:function:: PyObject *PyArray_BroadcastToShape( \
- PyObject* arr, npy_intp *dimensions, int nd)
+ PyObject* arr, npy_intp const *dimensions, int nd)
Return an array iterator that is broadcast to iterate as an array
of the shape provided by *dimensions* and *nd*.
diff --git a/doc/source/reference/random/multithreading.rst b/doc/source/reference/random/multithreading.rst
index a0a31d0ea..8502429ce 100644
--- a/doc/source/reference/random/multithreading.rst
+++ b/doc/source/reference/random/multithreading.rst
@@ -24,7 +24,7 @@ seed will produce the same outputs.
import concurrent.futures
import numpy as np
- class MultithreadedRNG(object):
+ class MultithreadedRNG:
def __init__(self, n, seed=None, threads=None):
rg = PCG64(seed)
if threads is None:
diff --git a/doc/source/reference/ufuncs.rst b/doc/source/reference/ufuncs.rst
index 361cf11b9..20c89e0b3 100644
--- a/doc/source/reference/ufuncs.rst
+++ b/doc/source/reference/ufuncs.rst
@@ -569,6 +569,7 @@ Math operations
add
subtract
multiply
+ matmul
divide
logaddexp
logaddexp2
@@ -577,6 +578,7 @@ Math operations
negative
positive
power
+ float_power
remainder
mod
fmod
@@ -635,6 +637,8 @@ The ratio of degrees to radians is :math:`180^{\circ}/\pi.`
arcsinh
arccosh
arctanh
+ degrees
+ radians
deg2rad
rad2deg
diff --git a/doc/source/release.rst b/doc/source/release.rst
index 9679ec6c8..7d12bae41 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -6,6 +6,7 @@ Release Notes
:maxdepth: 3
1.19.0 <release/1.19.0-notes>
+ 1.18.1 <release/1.18.1-notes>
1.18.0 <release/1.18.0-notes>
1.17.5 <release/1.17.5-notes>
1.17.4 <release/1.17.4-notes>
diff --git a/doc/source/release/1.18.0-notes.rst b/doc/source/release/1.18.0-notes.rst
index 24aa94445..d59f985cd 100644
--- a/doc/source/release/1.18.0-notes.rst
+++ b/doc/source/release/1.18.0-notes.rst
@@ -1,8 +1,8 @@
.. currentmodule:: numpy
-================================
-NumPy NumPy 1.18.0 Release Notes
-================================
+==========================
+NumPy 1.18.0 Release Notes
+==========================
In addition to the usual bug fixes, this NumPy release cleans up and documents
the new random C-API, expires a large number of old deprecations, and improves
diff --git a/doc/source/release/1.18.1-notes.rst b/doc/source/release/1.18.1-notes.rst
new file mode 100644
index 000000000..8bc502ecb
--- /dev/null
+++ b/doc/source/release/1.18.1-notes.rst
@@ -0,0 +1,52 @@
+.. currentmodule:: numpy
+
+==========================
+NumPy 1.18.1 Release Notes
+==========================
+
+This release contains fixes for bugs reported against NumPy 1.18.0. Two bugs
+in particular that caused widespread problems downstream were:
+
+- The cython random extension test was not using a temporary directory for
+ building, resulting in a permission violation. Fixed.
+
+- Numpy distutils was appending `-std=c99` to all C compiler runs, leading to
+ changed behavior and compile problems downstream. That flag is now only
+ applied when building numpy C code.
+
+The Python versions supported in this release are 3.5-3.8. Downstream
+developers should use Cython >= 0.29.14 for Python 3.8 support and OpenBLAS >=
+3.7 to avoid errors on the Skylake architecture.
+
+Contributors
+============
+
+A total of 7 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Charles Harris
+* Matti Picus
+* Maxwell Aladago
+* Pauli Virtanen
+* Ralf Gommers
+* Tyler Reddy
+* Warren Weckesser
+
+Pull requests merged
+====================
+
+A total of 13 pull requests were merged for this release.
+
+* `#15158 <https://github.com/numpy/numpy/pull/15158>`__: MAINT: Update pavement.py for towncrier.
+* `#15159 <https://github.com/numpy/numpy/pull/15159>`__: DOC: add moved modules to 1.18 release note
+* `#15161 <https://github.com/numpy/numpy/pull/15161>`__: MAINT, DOC: Minor backports and updates for 1.18.x
+* `#15176 <https://github.com/numpy/numpy/pull/15176>`__: TST: Add assert_array_equal test for big integer arrays
+* `#15184 <https://github.com/numpy/numpy/pull/15184>`__: BUG: use tmp dir and check version for cython test (#15170)
+* `#15220 <https://github.com/numpy/numpy/pull/15220>`__: BUG: distutils: fix msvc+gfortran openblas handling corner case
+* `#15221 <https://github.com/numpy/numpy/pull/15221>`__: BUG: remove -std=c99 for c++ compilation (#15194)
+* `#15222 <https://github.com/numpy/numpy/pull/15222>`__: MAINT: unskip test on win32
+* `#15223 <https://github.com/numpy/numpy/pull/15223>`__: TST: add BLAS ILP64 run in Travis & Azure
+* `#15245 <https://github.com/numpy/numpy/pull/15245>`__: MAINT: only add --std=c99 where needed
+* `#15246 <https://github.com/numpy/numpy/pull/15246>`__: BUG: lib: Fix handling of integer arrays by gradient.
+* `#15247 <https://github.com/numpy/numpy/pull/15247>`__: MAINT: Do not use private Python function in testing
+* `#15250 <https://github.com/numpy/numpy/pull/15250>`__: REL: Prepare for the NumPy 1.18.1 release.
diff --git a/doc/source/release/1.19.0-notes.rst b/doc/source/release/1.19.0-notes.rst
new file mode 100644
index 000000000..6e7fd69d4
--- /dev/null
+++ b/doc/source/release/1.19.0-notes.rst
@@ -0,0 +1,6 @@
+.. currentmodule:: numpy
+
+==========================
+NumPy 1.19.0 Release Notes
+==========================
+
diff --git a/numpy/_globals.py b/numpy/_globals.py
index 6361d94b0..9f44c7729 100644
--- a/numpy/_globals.py
+++ b/numpy/_globals.py
@@ -54,7 +54,7 @@ class VisibleDeprecationWarning(UserWarning):
VisibleDeprecationWarning.__module__ = 'numpy'
-class _NoValueType(object):
+class _NoValueType:
"""Special keyword value.
The instance of this class may be used as the default value assigned to a
diff --git a/numpy/_pytesttester.py b/numpy/_pytesttester.py
index 579d467c3..8b6e3217e 100644
--- a/numpy/_pytesttester.py
+++ b/numpy/_pytesttester.py
@@ -42,7 +42,7 @@ def _show_numpy_info():
print("NumPy relaxed strides checking option:", relaxed_strides)
-class PytestTester(object):
+class PytestTester:
"""
Pytest test runner.
@@ -164,9 +164,6 @@ class PytestTester(object):
# Ignore python2.7 -3 warnings
pytest_args += [
- r"-W ignore:sys\.exc_clear\(\) not supported in 3\.x:DeprecationWarning",
- r"-W ignore:in 3\.x, __setslice__:DeprecationWarning",
- r"-W ignore:in 3\.x, __getslice__:DeprecationWarning",
r"-W ignore:buffer\(\) not supported in 3\.x:DeprecationWarning",
r"-W ignore:CObject type is not supported in 3\.x:DeprecationWarning",
r"-W ignore:comparing unequal types not supported in 3\.x:DeprecationWarning",
diff --git a/numpy/compat/py3k.py b/numpy/compat/py3k.py
index f24a8af27..3d3002744 100644
--- a/numpy/compat/py3k.py
+++ b/numpy/compat/py3k.py
@@ -113,7 +113,7 @@ def is_pathlib_path(obj):
return Path is not None and isinstance(obj, Path)
# from Python 3.7
-class contextlib_nullcontext(object):
+class contextlib_nullcontext:
"""Context manager that does no additional processing.
Used as a stand-in for a normal context manager, when a particular
diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py
index 36fc9d7d6..f36c6941f 100644
--- a/numpy/core/_add_newdocs.py
+++ b/numpy/core/_add_newdocs.py
@@ -1034,7 +1034,7 @@ add_newdoc('numpy.core.multiarray', 'fromstring',
A string containing the data.
dtype : data-type, optional
The data type of the array; default: float. For binary input data,
- the data must be in exactly this format. Most builtin numeric types are
+ the data must be in exactly this format. Most builtin numeric types are
supported and extension types may be supported.
.. versionadded:: 1.18.0
@@ -1484,59 +1484,6 @@ add_newdoc('numpy.core.multiarray', 'promote_types',
""")
-if sys.version_info.major < 3:
- add_newdoc('numpy.core.multiarray', 'newbuffer',
- """
- newbuffer(size)
-
- Return a new uninitialized buffer object.
-
- Parameters
- ----------
- size : int
- Size in bytes of returned buffer object.
-
- Returns
- -------
- newbuffer : buffer object
- Returned, uninitialized buffer object of `size` bytes.
-
- """)
-
- add_newdoc('numpy.core.multiarray', 'getbuffer',
- """
- getbuffer(obj [,offset[, size]])
-
- Create a buffer object from the given object referencing a slice of
- length size starting at offset.
-
- Default is the entire buffer. A read-write buffer is attempted followed
- by a read-only buffer.
-
- Parameters
- ----------
- obj : object
-
- offset : int, optional
-
- size : int, optional
-
- Returns
- -------
- buffer_obj : buffer
-
- Examples
- --------
- >>> buf = np.getbuffer(np.ones(5), 1, 3)
- >>> len(buf)
- 3
- >>> buf[0]
- '\\x00'
- >>> buf
- <read-write buffer for 0x8af1e70, size 3, offset 1 at 0x8ba4ec0>
-
- """)
-
add_newdoc('numpy.core.multiarray', 'c_einsum',
"""
c_einsum(subscripts, *operands, out=None, dtype=None, order='K',
@@ -3951,7 +3898,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('tolist',
Examples
--------
- For a 1D array, ``a.tolist()`` is almost the same as ``list(a)``,
+ For a 1D array, ``a.tolist()`` is almost the same as ``list(a)``,
except that ``tolist`` changes numpy scalars to Python scalars:
>>> a = np.uint32([1, 2])
@@ -4122,21 +4069,26 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('var',
add_newdoc('numpy.core.multiarray', 'ndarray', ('view',
"""
- a.view(dtype=None, type=None)
+ a.view([dtype][, type])
New view of array with the same data.
+ .. note::
+ Passing None for ``dtype`` is different from omitting the parameter,
+ since the former invokes ``dtype(None)`` which is an alias for
+ ``dtype('float_')``.
+
Parameters
----------
dtype : data-type or ndarray sub-class, optional
- Data-type descriptor of the returned view, e.g., float32 or int16. The
- default, None, results in the view having the same data-type as `a`.
+ Data-type descriptor of the returned view, e.g., float32 or int16.
+ Omitting it results in the view having the same data-type as `a`.
This argument can also be specified as an ndarray sub-class, which
then specifies the type of the returned object (this is equivalent to
setting the ``type`` parameter).
type : Python type, optional
- Type of the returned view, e.g., ndarray or matrix. Again, the
- default None results in type preservation.
+ Type of the returned view, e.g., ndarray or matrix. Again, omission
+ of the parameter results in type preservation.
Notes
-----
diff --git a/numpy/core/_internal.py b/numpy/core/_internal.py
index d7a46c2d0..88cf10a38 100644
--- a/numpy/core/_internal.py
+++ b/numpy/core/_internal.py
@@ -201,7 +201,7 @@ def _commastring(astr):
return result
-class dummy_ctype(object):
+class dummy_ctype:
def __init__(self, cls):
self._cls = cls
def __mul__(self, other):
@@ -236,16 +236,16 @@ _getintp_ctype.cache = None
# Used for .ctypes attribute of ndarray
-class _missing_ctypes(object):
+class _missing_ctypes:
def cast(self, num, obj):
return num.value
- class c_void_p(object):
+ class c_void_p:
def __init__(self, ptr):
self.value = ptr
-class _ctypes(object):
+class _ctypes:
def __init__(self, array, ptr=None):
self._arr = array
@@ -521,7 +521,7 @@ _pep3118_unsupported_map = {
'X': 'function pointers',
}
-class _Stream(object):
+class _Stream:
def __init__(self, s):
self.s = s
self.byteorder = '@'
@@ -843,7 +843,7 @@ def npy_ctypes_check(cls):
return False
-class recursive(object):
+class recursive:
'''
A decorator class for recursive nested functions.
Naive recursive nested functions hold a reference to themselves:
diff --git a/numpy/core/_ufunc_config.py b/numpy/core/_ufunc_config.py
index 39ccd3aca..4872a5385 100644
--- a/numpy/core/_ufunc_config.py
+++ b/numpy/core/_ufunc_config.py
@@ -288,7 +288,7 @@ def seterrcall(func):
Log error message:
- >>> class Log(object):
+ >>> class Log:
... def write(self, msg):
... print("LOG: %s" % msg)
...
@@ -363,7 +363,7 @@ def geterrcall():
return umath.geterrobj()[2]
-class _unspecified(object):
+class _unspecified:
pass
@@ -430,8 +430,8 @@ class errstate(contextlib.ContextDecorator):
"""
- def __init__(self, **kwargs):
- self.call = kwargs.pop('call', _Unspecified)
+ def __init__(self, *, call=_Unspecified, **kwargs):
+ self.call = call
self.kwargs = kwargs
def __enter__(self):
diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py
index 696f64c6a..918da4a72 100644
--- a/numpy/core/arrayprint.py
+++ b/numpy/core/arrayprint.py
@@ -98,7 +98,7 @@ def _make_options_dict(precision=None, threshold=None, edgeitems=None,
@set_module('numpy')
def set_printoptions(precision=None, threshold=None, edgeitems=None,
linewidth=None, suppress=None, nanstr=None, infstr=None,
- formatter=None, sign=None, floatmode=None, **kwarg):
+ formatter=None, sign=None, floatmode=None, *, legacy=None):
"""
Set printing options.
@@ -247,11 +247,6 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None,
array([ 0. , 1.11, 2.22, ..., 7.78, 8.89, 10. ])
"""
- legacy = kwarg.pop('legacy', None)
- if kwarg:
- msg = "set_printoptions() got unexpected keyword argument '{}'"
- raise TypeError(msg.format(kwarg.popitem()[0]))
-
opt = _make_options_dict(precision, threshold, edgeitems, linewidth,
suppress, nanstr, infstr, sign, formatter,
floatmode, legacy)
@@ -367,23 +362,22 @@ def repr_format(x):
def str_format(x):
return str(x)
-def _get_formatdict(data, **opt):
- prec, fmode = opt['precision'], opt['floatmode']
- supp, sign = opt['suppress'], opt['sign']
- legacy = opt['legacy']
+def _get_formatdict(data, *, precision, floatmode, suppress, sign, legacy,
+ formatter, **kwargs):
+ # note: extra arguments in kwargs are ignored
# wrapped in lambdas to avoid taking a code path with the wrong type of data
formatdict = {
'bool': lambda: BoolFormat(data),
'int': lambda: IntegerFormat(data),
- 'float': lambda:
- FloatingFormat(data, prec, fmode, supp, sign, legacy=legacy),
- 'longfloat': lambda:
- FloatingFormat(data, prec, fmode, supp, sign, legacy=legacy),
- 'complexfloat': lambda:
- ComplexFloatingFormat(data, prec, fmode, supp, sign, legacy=legacy),
- 'longcomplexfloat': lambda:
- ComplexFloatingFormat(data, prec, fmode, supp, sign, legacy=legacy),
+ 'float': lambda: FloatingFormat(
+ data, precision, floatmode, suppress, sign, legacy=legacy),
+ 'longfloat': lambda: FloatingFormat(
+ data, precision, floatmode, suppress, sign, legacy=legacy),
+ 'complexfloat': lambda: ComplexFloatingFormat(
+ data, precision, floatmode, suppress, sign, legacy=legacy),
+ 'longcomplexfloat': lambda: ComplexFloatingFormat(
+ data, precision, floatmode, suppress, sign, legacy=legacy),
'datetime': lambda: DatetimeFormat(data, legacy=legacy),
'timedelta': lambda: TimedeltaFormat(data),
'object': lambda: _object_format,
@@ -396,7 +390,6 @@ def _get_formatdict(data, **opt):
def indirect(x):
return lambda: x
- formatter = opt['formatter']
if formatter is not None:
fkeys = [k for k in formatter.keys() if formatter[k] is not None]
if 'all' in fkeys:
@@ -523,7 +516,7 @@ def _array2string_dispatcher(
suppress_small=None, separator=None, prefix=None,
style=None, formatter=None, threshold=None,
edgeitems=None, sign=None, floatmode=None, suffix=None,
- **kwarg):
+ *, legacy=None):
return (a,)
@@ -532,7 +525,7 @@ def array2string(a, max_line_width=None, precision=None,
suppress_small=None, separator=' ', prefix="",
style=np._NoValue, formatter=None, threshold=None,
edgeitems=None, sign=None, floatmode=None, suffix="",
- **kwarg):
+ *, legacy=None):
"""
Return a string representation of an array.
@@ -677,10 +670,6 @@ def array2string(a, max_line_width=None, precision=None,
'[0x0 0x1 0x2]'
"""
- legacy = kwarg.pop('legacy', None)
- if kwarg:
- msg = "array2string() got unexpected keyword argument '{}'"
- raise TypeError(msg.format(kwarg.popitem()[0]))
overrides = _make_options_dict(precision, threshold, edgeitems,
max_line_width, suppress_small, None, None,
@@ -849,15 +838,15 @@ def _none_or_positive_arg(x, name):
raise ValueError("{} must be >= 0".format(name))
return x
-class FloatingFormat(object):
+class FloatingFormat:
""" Formatter for subtypes of np.floating """
def __init__(self, data, precision, floatmode, suppress_small, sign=False,
- **kwarg):
+ *, legacy=None):
# for backcompatibility, accept bools
if isinstance(sign, bool):
sign = '+' if sign else '-'
- self._legacy = kwarg.get('legacy', False)
+ self._legacy = legacy
if self._legacy == '1.13':
# when not 0d, legacy does not support '-'
if data.shape != () and sign == '-':
@@ -1138,7 +1127,7 @@ def format_float_positional(x, precision=None, unique=True,
pad_right=pad_right)
-class IntegerFormat(object):
+class IntegerFormat:
def __init__(self, data):
if data.size > 0:
max_str_len = max(len(str(np.max(data))),
@@ -1151,7 +1140,7 @@ class IntegerFormat(object):
return self.format % x
-class BoolFormat(object):
+class BoolFormat:
def __init__(self, data, **kwargs):
# add an extra space so " True" and "False" have the same length and
# array elements align nicely when printed, except in 0d arrays
@@ -1161,23 +1150,27 @@ class BoolFormat(object):
return self.truestr if x else "False"
-class ComplexFloatingFormat(object):
+class ComplexFloatingFormat:
""" Formatter for subtypes of np.complexfloating """
def __init__(self, x, precision, floatmode, suppress_small,
- sign=False, **kwarg):
+ sign=False, *, legacy=None):
# for backcompatibility, accept bools
if isinstance(sign, bool):
sign = '+' if sign else '-'
floatmode_real = floatmode_imag = floatmode
- if kwarg.get('legacy', False) == '1.13':
+ if legacy == '1.13':
floatmode_real = 'maxprec_equal'
floatmode_imag = 'maxprec'
- self.real_format = FloatingFormat(x.real, precision, floatmode_real,
- suppress_small, sign=sign, **kwarg)
- self.imag_format = FloatingFormat(x.imag, precision, floatmode_imag,
- suppress_small, sign='+', **kwarg)
+ self.real_format = FloatingFormat(
+ x.real, precision, floatmode_real, suppress_small,
+ sign=sign, legacy=legacy
+ )
+ self.imag_format = FloatingFormat(
+ x.imag, precision, floatmode_imag, suppress_small,
+ sign='+', legacy=legacy
+ )
def __call__(self, x):
r = self.real_format(x.real)
@@ -1190,7 +1183,7 @@ class ComplexFloatingFormat(object):
return r + i
-class _TimelikeFormat(object):
+class _TimelikeFormat:
def __init__(self, data):
non_nat = data[~isnat(data)]
if len(non_nat) > 0:
@@ -1253,7 +1246,7 @@ class TimedeltaFormat(_TimelikeFormat):
return str(x.astype('i8'))
-class SubArrayFormat(object):
+class SubArrayFormat:
def __init__(self, format_function):
self.format_function = format_function
@@ -1263,7 +1256,7 @@ class SubArrayFormat(object):
return "[" + ", ".join(self.__call__(a) for a in arr) + "]"
-class StructuredVoidFormat(object):
+class StructuredVoidFormat:
"""
Formatter for structured np.void objects.
diff --git a/numpy/core/code_generators/genapi.py b/numpy/core/code_generators/genapi.py
index 95939e7ec..4fa68a1f0 100644
--- a/numpy/core/code_generators/genapi.py
+++ b/numpy/core/code_generators/genapi.py
@@ -74,7 +74,7 @@ def _repl(str):
return str.replace('Bool', 'npy_bool')
-class StealRef(object):
+class StealRef:
def __init__(self, arg):
self.arg = arg # counting from 1
@@ -85,7 +85,7 @@ class StealRef(object):
return 'NPY_STEALS_REF_TO_ARG(%d)' % self.arg
-class NonNull(object):
+class NonNull:
def __init__(self, arg):
self.arg = arg # counting from 1
@@ -96,7 +96,7 @@ class NonNull(object):
return 'NPY_GCC_NONNULL(%d)' % self.arg
-class Function(object):
+class Function:
def __init__(self, name, return_type, args, doc=''):
self.name = name
self.return_type = _repl(return_type)
@@ -307,7 +307,7 @@ def write_file(filename, data):
# Those *Api classes instances know how to output strings for the generated code
-class TypeApi(object):
+class TypeApi:
def __init__(self, name, index, ptr_cast, api_name):
self.index = index
self.name = name
@@ -329,7 +329,7 @@ extern NPY_NO_EXPORT PyTypeObject %(type)s;
""" % {'type': self.name}
return astr
-class GlobalVarApi(object):
+class GlobalVarApi:
def __init__(self, name, index, type, api_name):
self.name = name
self.index = index
@@ -353,7 +353,7 @@ extern NPY_NO_EXPORT %(type)s %(name)s;
# Dummy to be able to consistently use *Api instances for all items in the
# array api
-class BoolValuesApi(object):
+class BoolValuesApi:
def __init__(self, name, index, api_name):
self.name = name
self.index = index
@@ -375,7 +375,7 @@ extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2];
"""
return astr
-class FunctionApi(object):
+class FunctionApi:
def __init__(self, name, index, annotations, return_type, args, api_name):
self.name = name
self.index = index
diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py
index 99e180477..f5691d950 100644
--- a/numpy/core/code_generators/generate_umath.py
+++ b/numpy/core/code_generators/generate_umath.py
@@ -19,16 +19,16 @@ ReorderableNone = "(Py_INCREF(Py_None), Py_None)"
# Sentinel value to specify using the full type description in the
# function name
-class FullTypeDescr(object):
+class FullTypeDescr:
pass
-class FuncNameSuffix(object):
+class FuncNameSuffix:
"""Stores the suffix to append when generating functions names.
"""
def __init__(self, suffix):
self.suffix = suffix
-class TypeDescription(object):
+class TypeDescription:
"""Type signature for a ufunc.
Attributes
@@ -118,7 +118,7 @@ def TD(types, f=None, astype=None, in_=None, out=None, simd=None):
tds.append(TypeDescription(t, f=fd, in_=i, out=o, astype=astype, simd=simdt))
return tds
-class Ufunc(object):
+class Ufunc:
"""Description of a ufunc.
Attributes
@@ -130,7 +130,7 @@ class Ufunc(object):
type_descriptions : list of TypeDescription objects
"""
def __init__(self, nin, nout, identity, docstring, typereso,
- *type_descriptions, **kwargs):
+ *type_descriptions, signature=None):
self.nin = nin
self.nout = nout
if identity is None:
@@ -139,13 +139,11 @@ class Ufunc(object):
self.docstring = docstring
self.typereso = typereso
self.type_descriptions = []
- self.signature = kwargs.pop('signature', None)
+ self.signature = signature
for td in type_descriptions:
self.type_descriptions.extend(td)
for td in self.type_descriptions:
td.finish_signature(self.nin, self.nout)
- if kwargs:
- raise ValueError('unknown kwargs %r' % str(kwargs))
# String-handling utilities to avoid locale-dependence.
@@ -304,17 +302,6 @@ defdict = {
],
TD(O, f='PyNumber_Multiply'),
),
-'divide':
- Ufunc(2, 1, None, # One is only a unit to the right, not the left
- docstrings.get('numpy.core.umath.divide'),
- 'PyUFunc_MixedDivisionTypeResolver',
- TD(intfltcmplx),
- [TypeDescription('m', FullTypeDescr, 'mq', 'm'),
- TypeDescription('m', FullTypeDescr, 'md', 'm'),
- TypeDescription('m', FullTypeDescr, 'mm', 'd'),
- ],
- TD(O, f='PyNumber_Divide'),
- ),
'floor_divide':
Ufunc(2, 1, None, # One is only a unit to the right, not the left
docstrings.get('numpy.core.umath.floor_divide'),
@@ -955,10 +942,6 @@ defdict = {
),
}
-if sys.version_info[0] >= 3:
- # Will be aliased to true_divide in umathmodule.c.src:InitOtherOperators
- del defdict['divide']
-
def indent(st, spaces):
indentation = ' '*spaces
indented = indentation + st.replace('\n', '\n'+indentation)
diff --git a/numpy/core/einsumfunc.py b/numpy/core/einsumfunc.py
index 8ae14ce30..ec3eb19d2 100644
--- a/numpy/core/einsumfunc.py
+++ b/numpy/core/einsumfunc.py
@@ -689,7 +689,7 @@ def _parse_einsum_input(operands):
return (input_subscripts, output_subscript, operands)
-def _einsum_path_dispatcher(*operands, **kwargs):
+def _einsum_path_dispatcher(*operands, optimize=None, einsum_call=None):
# NOTE: technically, we should only dispatch on array-like arguments, not
# subscripts (given as strings). But separating operands into
# arrays/subscripts is a little tricky/slow (given einsum's two supported
@@ -700,7 +700,7 @@ def _einsum_path_dispatcher(*operands, **kwargs):
@array_function_dispatch(_einsum_path_dispatcher, module='numpy')
-def einsum_path(*operands, **kwargs):
+def einsum_path(*operands, optimize='greedy', einsum_call=False):
"""
einsum_path(subscripts, *operands, optimize='greedy')
@@ -810,16 +810,8 @@ def einsum_path(*operands, **kwargs):
5 defg,hd->efgh efgh->efgh
"""
- # Make sure all keywords are valid
- valid_contract_kwargs = ['optimize', 'einsum_call']
- unknown_kwargs = [k for (k, v) in kwargs.items() if k
- not in valid_contract_kwargs]
- if len(unknown_kwargs):
- raise TypeError("Did not understand the following kwargs:"
- " %s" % unknown_kwargs)
-
# Figure out what the path really is
- path_type = kwargs.pop('optimize', True)
+ path_type = optimize
if path_type is True:
path_type = 'greedy'
if path_type is None:
@@ -845,7 +837,7 @@ def einsum_path(*operands, **kwargs):
raise TypeError("Did not understand the path: %s" % str(path_type))
# Hidden option, only einsum should call this
- einsum_call_arg = kwargs.pop("einsum_call", False)
+ einsum_call_arg = einsum_call
# Python side parsing
input_subscripts, output_subscript, operands = _parse_einsum_input(operands)
@@ -990,17 +982,17 @@ def einsum_path(*operands, **kwargs):
return (path, path_print)
-def _einsum_dispatcher(*operands, **kwargs):
+def _einsum_dispatcher(*operands, out=None, optimize=None, **kwargs):
# Arguably we dispatch on more arguments that we really should; see note in
# _einsum_path_dispatcher for why.
for op in operands:
yield op
- yield kwargs.get('out')
+ yield out
# Rewrite einsum to handle different cases
@array_function_dispatch(_einsum_dispatcher, module='numpy')
-def einsum(*operands, **kwargs):
+def einsum(*operands, out=None, optimize=False, **kwargs):
"""
einsum(subscripts, *operands, out=None, dtype=None, order='K',
casting='safe', optimize=False)
@@ -1345,39 +1337,29 @@ def einsum(*operands, **kwargs):
... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=path)
"""
-
- # Grab non-einsum kwargs; do not optimize by default.
- optimize_arg = kwargs.pop('optimize', False)
+ # Special handling if out is specified
+ specified_out = out is not None
# If no optimization, run pure einsum
- if optimize_arg is False:
+ if optimize is False:
+ if specified_out:
+ kwargs['out'] = out
return c_einsum(*operands, **kwargs)
- valid_einsum_kwargs = ['out', 'dtype', 'order', 'casting']
- einsum_kwargs = {k: v for (k, v) in kwargs.items() if
- k in valid_einsum_kwargs}
-
- # Make sure all keywords are valid
- valid_contract_kwargs = ['optimize'] + valid_einsum_kwargs
+ # Check the kwargs to avoid a more cryptic error later, without having to
+ # repeat default values here
+ valid_einsum_kwargs = ['dtype', 'order', 'casting']
unknown_kwargs = [k for (k, v) in kwargs.items() if
- k not in valid_contract_kwargs]
-
+ k not in valid_einsum_kwargs]
if len(unknown_kwargs):
raise TypeError("Did not understand the following kwargs: %s"
% unknown_kwargs)
- # Special handeling if out is specified
- specified_out = False
- out_array = einsum_kwargs.pop('out', None)
- if out_array is not None:
- specified_out = True
# Build the contraction list and operand
- operands, contraction_list = einsum_path(*operands, optimize=optimize_arg,
+ operands, contraction_list = einsum_path(*operands, optimize=optimize,
einsum_call=True)
- handle_out = False
-
# Start contraction loop
for num, contraction in enumerate(contraction_list):
inds, idx_rm, einsum_str, remaining, blas = contraction
@@ -1408,23 +1390,23 @@ def einsum(*operands, **kwargs):
# Build a new view if needed
if (tensor_result != results_index) or handle_out:
if handle_out:
- einsum_kwargs["out"] = out_array
- new_view = c_einsum(tensor_result + '->' + results_index, new_view, **einsum_kwargs)
+ kwargs["out"] = out
+ new_view = c_einsum(tensor_result + '->' + results_index, new_view, **kwargs)
# Call einsum
else:
# If out was specified
if handle_out:
- einsum_kwargs["out"] = out_array
+ kwargs["out"] = out
# Do the contraction
- new_view = c_einsum(einsum_str, *tmp_operands, **einsum_kwargs)
+ new_view = c_einsum(einsum_str, *tmp_operands, **kwargs)
# Append new items and dereference what we can
operands.append(new_view)
del tmp_operands, new_view
if specified_out:
- return out_array
+ return out
else:
return operands[0]
diff --git a/numpy/core/getlimits.py b/numpy/core/getlimits.py
index fda0933a3..b00ef64bd 100644
--- a/numpy/core/getlimits.py
+++ b/numpy/core/getlimits.py
@@ -29,12 +29,12 @@ def _fr1(a):
a.shape = ()
return a
-class MachArLike(object):
+class MachArLike:
""" Object to simulate MachAr instance """
def __init__(self,
ftype,
- **kwargs):
+ *, eps, epsneg, huge, tiny, ibeta, **kwargs):
params = _MACHAR_PARAMS[ftype]
float_conv = lambda v: array([v], ftype)
float_to_float = lambda v : _fr1(float_conv(v))
@@ -42,11 +42,11 @@ class MachArLike(object):
self.title = params['title']
# Parameter types same as for discovered MachAr object.
- self.epsilon = self.eps = float_to_float(kwargs.pop('eps'))
- self.epsneg = float_to_float(kwargs.pop('epsneg'))
- self.xmax = self.huge = float_to_float(kwargs.pop('huge'))
- self.xmin = self.tiny = float_to_float(kwargs.pop('tiny'))
- self.ibeta = params['itype'](kwargs.pop('ibeta'))
+ self.epsilon = self.eps = float_to_float(eps)
+ self.epsneg = float_to_float(epsneg)
+ self.xmax = self.huge = float_to_float(huge)
+ self.xmin = self.tiny = float_to_float(tiny)
+ self.ibeta = params['itype'](ibeta)
self.__dict__.update(kwargs)
self.precision = int(-log10(self.eps))
self.resolution = float_to_float(float_conv(10) ** (-self.precision))
@@ -289,7 +289,7 @@ def _discovered_machar(ftype):
@set_module('numpy')
-class finfo(object):
+class finfo:
"""
finfo(dtype)
@@ -440,7 +440,7 @@ class finfo(object):
@set_module('numpy')
-class iinfo(object):
+class iinfo:
"""
iinfo(type)
diff --git a/numpy/core/include/numpy/ndarraytypes.h b/numpy/core/include/numpy/ndarraytypes.h
index ad98d562b..bec6fcf30 100644
--- a/numpy/core/include/numpy/ndarraytypes.h
+++ b/numpy/core/include/numpy/ndarraytypes.h
@@ -353,21 +353,12 @@ struct NpyAuxData_tag {
#define NPY_USE_PYMEM 1
+
#if NPY_USE_PYMEM == 1
- /* numpy sometimes calls PyArray_malloc() with the GIL released. On Python
- 3.3 and older, it was safe to call PyMem_Malloc() with the GIL released.
- On Python 3.4 and newer, it's better to use PyMem_RawMalloc() to be able
- to use tracemalloc. On Python 3.6, calling PyMem_Malloc() with the GIL
- released is now a fatal error in debug mode. */
-# if PY_VERSION_HEX >= 0x03040000
-# define PyArray_malloc PyMem_RawMalloc
-# define PyArray_free PyMem_RawFree
-# define PyArray_realloc PyMem_RawRealloc
-# else
-# define PyArray_malloc PyMem_Malloc
-# define PyArray_free PyMem_Free
-# define PyArray_realloc PyMem_Realloc
-# endif
+/* use the Raw versions which are safe to call with the GIL released */
+#define PyArray_malloc PyMem_RawMalloc
+#define PyArray_free PyMem_RawFree
+#define PyArray_realloc PyMem_RawRealloc
#else
#define PyArray_malloc malloc
#define PyArray_free free
diff --git a/numpy/core/include/numpy/npy_3kcompat.h b/numpy/core/include/numpy/npy_3kcompat.h
index 6fe53caa1..dbb5bd506 100644
--- a/numpy/core/include/numpy/npy_3kcompat.h
+++ b/numpy/core/include/numpy/npy_3kcompat.h
@@ -60,13 +60,7 @@ static NPY_INLINE int PyInt_Check(PyObject *op) {
PySlice_GetIndicesEx((PySliceObject *)op, nop, start, end, step, slicelength)
#endif
-/* <2.7.11 and <3.4.4 have the wrong argument type for Py_EnterRecursiveCall */
-#if (PY_VERSION_HEX < 0x02070B00) || \
- ((0x03000000 <= PY_VERSION_HEX) && (PY_VERSION_HEX < 0x03040400))
- #define Npy_EnterRecursiveCall(x) Py_EnterRecursiveCall((char *)(x))
-#else
- #define Npy_EnterRecursiveCall(x) Py_EnterRecursiveCall(x)
-#endif
+#define Npy_EnterRecursiveCall(x) Py_EnterRecursiveCall(x)
/* Py_SETREF was added in 3.5.2, and only if Py_LIMITED_API is absent */
#if PY_VERSION_HEX < 0x03050200
@@ -488,8 +482,6 @@ PyObject_Cmp(PyObject *i1, PyObject *i2, int *cmp)
* The main job here is to get rid of the improved error handling
* of PyCapsules. It's a shame...
*/
-#if PY_VERSION_HEX >= 0x03000000
-
static NPY_INLINE PyObject *
NpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *))
{
@@ -534,41 +526,6 @@ NpyCapsule_Check(PyObject *ptr)
return PyCapsule_CheckExact(ptr);
}
-#else
-
-static NPY_INLINE PyObject *
-NpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(void *))
-{
- return PyCObject_FromVoidPtr(ptr, dtor);
-}
-
-static NPY_INLINE PyObject *
-NpyCapsule_FromVoidPtrAndDesc(void *ptr, void* context,
- void (*dtor)(void *, void *))
-{
- return PyCObject_FromVoidPtrAndDesc(ptr, context, dtor);
-}
-
-static NPY_INLINE void *
-NpyCapsule_AsVoidPtr(PyObject *ptr)
-{
- return PyCObject_AsVoidPtr(ptr);
-}
-
-static NPY_INLINE void *
-NpyCapsule_GetDesc(PyObject *obj)
-{
- return PyCObject_GetDesc(obj);
-}
-
-static NPY_INLINE int
-NpyCapsule_Check(PyObject *ptr)
-{
- return PyCObject_Check(ptr);
-}
-
-#endif
-
#ifdef __cplusplus
}
#endif
diff --git a/numpy/core/include/numpy/npy_common.h b/numpy/core/include/numpy/npy_common.h
index 27b83f7b5..c2e755958 100644
--- a/numpy/core/include/numpy/npy_common.h
+++ b/numpy/core/include/numpy/npy_common.h
@@ -369,18 +369,8 @@ typedef long npy_long;
typedef float npy_float;
typedef double npy_double;
-/*
- * Hash value compatibility.
- * As of Python 3.2 hash values are of type Py_hash_t.
- * Previous versions use C long.
- */
-#if PY_VERSION_HEX < 0x03020000
-typedef long npy_hash_t;
-#define NPY_SIZEOF_HASH_T NPY_SIZEOF_LONG
-#else
typedef Py_hash_t npy_hash_t;
#define NPY_SIZEOF_HASH_T NPY_SIZEOF_INTP
-#endif
/*
* Disabling C99 complex usage: a lot of C code in numpy/scipy rely on being
diff --git a/numpy/core/machar.py b/numpy/core/machar.py
index 202cbf623..a48dc3d50 100644
--- a/numpy/core/machar.py
+++ b/numpy/core/machar.py
@@ -14,7 +14,7 @@ from numpy.core.overrides import set_module
# Need to speed this up...especially for longfloat
@set_module('numpy')
-class MachAr(object):
+class MachAr:
"""
Diagnosing machine parameters.
diff --git a/numpy/core/multiarray.py b/numpy/core/multiarray.py
index 01fca1df5..5749afdcc 100644
--- a/numpy/core/multiarray.py
+++ b/numpy/core/multiarray.py
@@ -41,8 +41,6 @@ __all__ = [
'set_string_function', 'set_typeDict', 'shares_memory', 'test_interrupt',
'tracemalloc_domain', 'typeinfo', 'unpackbits', 'unravel_index', 'vdot',
'where', 'zeros']
-if sys.version_info.major < 3:
- __all__ += ['newbuffer', 'getbuffer']
# For backward compatibility, make sure pickle imports these functions from here
_reconstruct.__module__ = 'numpy.core.multiarray'
diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py
index ae3dcd07a..f18ab6336 100644
--- a/numpy/core/numeric.py
+++ b/numpy/core/numeric.py
@@ -19,8 +19,6 @@ from .multiarray import (
min_scalar_type, ndarray, nditer, nested_iters, promote_types,
putmask, result_type, set_numeric_ops, shares_memory, vdot, where,
zeros, normalize_axis_index)
-if sys.version_info[0] < 3:
- from .multiarray import newbuffer, getbuffer
from . import overrides
from . import umath
@@ -65,9 +63,6 @@ __all__ = [
'matmul', 'shares_memory', 'may_share_memory', 'MAY_SHARE_BOUNDS',
'MAY_SHARE_EXACT', 'TooHardError', 'AxisError']
-if sys.version_info[0] < 3:
- __all__.extend(['getbuffer', 'newbuffer'])
-
@set_module('numpy')
class ComplexWarning(RuntimeWarning):
@@ -1721,7 +1716,7 @@ def indices(dimensions, dtype=int, sparse=False):
@set_module('numpy')
-def fromfunction(function, shape, **kwargs):
+def fromfunction(function, shape, *, dtype=float, **kwargs):
"""
Construct an array by executing a function over each coordinate.
@@ -1772,7 +1767,6 @@ def fromfunction(function, shape, **kwargs):
[2, 3, 4]])
"""
- dtype = kwargs.pop('dtype', float)
args = indices(shape, dtype=dtype)
return function(*args, **kwargs)
diff --git a/numpy/core/records.py b/numpy/core/records.py
index b867d84d9..6717dc69b 100644
--- a/numpy/core/records.py
+++ b/numpy/core/records.py
@@ -96,7 +96,7 @@ def find_duplicate(list):
@set_module('numpy')
-class format_parser(object):
+class format_parser:
"""
Class to convert formats, names, titles description to a dtype.
diff --git a/numpy/core/setup.py b/numpy/core/setup.py
index 6e5d63aae..0d383b251 100644
--- a/numpy/core/setup.py
+++ b/numpy/core/setup.py
@@ -36,7 +36,7 @@ NPY_RELAXED_STRIDES_DEBUG = NPY_RELAXED_STRIDES_DEBUG and NPY_RELAXED_STRIDES_CH
# Use pickle in all cases, as cPickle is gone in python3 and the difference
# in time is only in build. -- Charles Harris, 2013-03-30
-class CallOnceOnly(object):
+class CallOnceOnly:
def __init__(self):
self._check_types = None
self._check_ieee_macros = None
diff --git a/numpy/core/src/common/array_assign.c b/numpy/core/src/common/array_assign.c
index 0ac1b01c6..d626d1260 100644
--- a/numpy/core/src/common/array_assign.c
+++ b/numpy/core/src/common/array_assign.c
@@ -27,9 +27,9 @@
/* See array_assign.h for parameter documentation */
NPY_NO_EXPORT int
-broadcast_strides(int ndim, npy_intp *shape,
- int strides_ndim, npy_intp *strides_shape, npy_intp *strides,
- char *strides_name,
+broadcast_strides(int ndim, npy_intp const *shape,
+ int strides_ndim, npy_intp const *strides_shape, npy_intp const *strides,
+ char const *strides_name,
npy_intp *out_strides)
{
int idim, idim_start = ndim - strides_ndim;
@@ -84,8 +84,8 @@ broadcast_error: {
/* See array_assign.h for parameter documentation */
NPY_NO_EXPORT int
-raw_array_is_aligned(int ndim, npy_intp *shape,
- char *data, npy_intp *strides, int alignment)
+raw_array_is_aligned(int ndim, npy_intp const *shape,
+ char *data, npy_intp const *strides, int alignment)
{
/*
diff --git a/numpy/core/src/common/array_assign.h b/numpy/core/src/common/array_assign.h
index 69ef56bb4..f5d884dd9 100644
--- a/numpy/core/src/common/array_assign.h
+++ b/numpy/core/src/common/array_assign.h
@@ -44,8 +44,8 @@ PyArray_AssignRawScalar(PyArrayObject *dst,
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
-raw_array_assign_scalar(int ndim, npy_intp *shape,
- PyArray_Descr *dst_dtype, char *dst_data, npy_intp *dst_strides,
+raw_array_assign_scalar(int ndim, npy_intp const *shape,
+ PyArray_Descr *dst_dtype, char *dst_data, npy_intp const *dst_strides,
PyArray_Descr *src_dtype, char *src_data);
/*
@@ -55,11 +55,11 @@ raw_array_assign_scalar(int ndim, npy_intp *shape,
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
-raw_array_wheremasked_assign_scalar(int ndim, npy_intp *shape,
- PyArray_Descr *dst_dtype, char *dst_data, npy_intp *dst_strides,
+raw_array_wheremasked_assign_scalar(int ndim, npy_intp const *shape,
+ PyArray_Descr *dst_dtype, char *dst_data, npy_intp const *dst_strides,
PyArray_Descr *src_dtype, char *src_data,
PyArray_Descr *wheremask_dtype, char *wheremask_data,
- npy_intp *wheremask_strides);
+ npy_intp const *wheremask_strides);
/******** LOW-LEVEL ARRAY MANIPULATION HELPERS ********/
@@ -80,9 +80,9 @@ raw_array_wheremasked_assign_scalar(int ndim, npy_intp *shape,
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
-broadcast_strides(int ndim, npy_intp *shape,
- int strides_ndim, npy_intp *strides_shape, npy_intp *strides,
- char *strides_name,
+broadcast_strides(int ndim, npy_intp const *shape,
+ int strides_ndim, npy_intp const *strides_shape, npy_intp const *strides,
+ char const *strides_name,
npy_intp *out_strides);
/*
@@ -93,8 +93,8 @@ broadcast_strides(int ndim, npy_intp *shape,
* cannot-be-aligned, in which case 0 (false) is always returned.
*/
NPY_NO_EXPORT int
-raw_array_is_aligned(int ndim, npy_intp *shape,
- char *data, npy_intp *strides, int alignment);
+raw_array_is_aligned(int ndim, npy_intp const *shape,
+ char *data, npy_intp const *strides, int alignment);
/*
* Checks if an array is aligned to its "true alignment"
diff --git a/numpy/core/src/common/get_attr_string.h b/numpy/core/src/common/get_attr_string.h
index d3401aea6..8b7cf1c5b 100644
--- a/numpy/core/src/common/get_attr_string.h
+++ b/numpy/core/src/common/get_attr_string.h
@@ -7,9 +7,6 @@ _is_basic_python_type(PyTypeObject *tp)
return (
/* Basic number types */
tp == &PyBool_Type ||
-#if !defined(NPY_PY3K)
- tp == &PyInt_Type ||
-#endif
tp == &PyLong_Type ||
tp == &PyFloat_Type ||
tp == &PyComplex_Type ||
@@ -22,9 +19,6 @@ _is_basic_python_type(PyTypeObject *tp)
tp == &PyFrozenSet_Type ||
tp == &PyUnicode_Type ||
tp == &PyBytes_Type ||
-#if !defined(NPY_PY3K)
- tp == &PyString_Type ||
-#endif
/* other builtins */
tp == &PySlice_Type ||
@@ -50,25 +44,21 @@ _is_basic_python_type(PyTypeObject *tp)
* there is no such attribute, and NULL with an exception on failure.
*/
static NPY_INLINE PyObject *
-maybe_get_attr(PyObject *obj, char *name)
+maybe_get_attr(PyObject *obj, char const *name)
{
PyTypeObject *tp = Py_TYPE(obj);
PyObject *res = (PyObject *)NULL;
/* Attribute referenced by (char *)name */
if (tp->tp_getattr != NULL) {
- res = (*tp->tp_getattr)(obj, name);
+ res = (*tp->tp_getattr)(obj, (char *)name);
if (res == NULL && PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
}
}
/* Attribute referenced by (PyObject *)name */
else if (tp->tp_getattro != NULL) {
-#if defined(NPY_PY3K)
PyObject *w = PyUnicode_InternFromString(name);
-#else
- PyObject *w = PyString_InternFromString(name);
-#endif
if (w == NULL) {
return (PyObject *)NULL;
}
@@ -91,7 +81,7 @@ maybe_get_attr(PyObject *obj, char *name)
* In future, could be made more like _Py_LookupSpecial
*/
static NPY_INLINE PyObject *
-PyArray_LookupSpecial(PyObject *obj, char *name)
+PyArray_LookupSpecial(PyObject *obj, char const *name)
{
PyTypeObject *tp = Py_TYPE(obj);
@@ -111,7 +101,7 @@ PyArray_LookupSpecial(PyObject *obj, char *name)
* Kept for backwards compatibility. In future, we should deprecate this.
*/
static NPY_INLINE PyObject *
-PyArray_LookupSpecial_OnInstance(PyObject *obj, char *name)
+PyArray_LookupSpecial_OnInstance(PyObject *obj, char const *name)
{
PyTypeObject *tp = Py_TYPE(obj);
diff --git a/numpy/core/src/common/lowlevel_strided_loops.h b/numpy/core/src/common/lowlevel_strided_loops.h
index bacd27473..9208d5499 100644
--- a/numpy/core/src/common/lowlevel_strided_loops.h
+++ b/numpy/core/src/common/lowlevel_strided_loops.h
@@ -306,30 +306,30 @@ PyArray_CastRawArrays(npy_intp count,
NPY_NO_EXPORT npy_intp
PyArray_TransferNDimToStrided(npy_intp ndim,
char *dst, npy_intp dst_stride,
- char *src, npy_intp *src_strides, npy_intp src_strides_inc,
- npy_intp *coords, npy_intp coords_inc,
- npy_intp *shape, npy_intp shape_inc,
+ char *src, npy_intp const *src_strides, npy_intp src_strides_inc,
+ npy_intp const *coords, npy_intp coords_inc,
+ npy_intp const *shape, npy_intp shape_inc,
npy_intp count, npy_intp src_itemsize,
PyArray_StridedUnaryOp *stransfer,
NpyAuxData *transferdata);
NPY_NO_EXPORT npy_intp
PyArray_TransferStridedToNDim(npy_intp ndim,
- char *dst, npy_intp *dst_strides, npy_intp dst_strides_inc,
+ char *dst, npy_intp const *dst_strides, npy_intp dst_strides_inc,
char *src, npy_intp src_stride,
- npy_intp *coords, npy_intp coords_inc,
- npy_intp *shape, npy_intp shape_inc,
+ npy_intp const *coords, npy_intp coords_inc,
+ npy_intp const *shape, npy_intp shape_inc,
npy_intp count, npy_intp src_itemsize,
PyArray_StridedUnaryOp *stransfer,
NpyAuxData *transferdata);
NPY_NO_EXPORT npy_intp
PyArray_TransferMaskedStridedToNDim(npy_intp ndim,
- char *dst, npy_intp *dst_strides, npy_intp dst_strides_inc,
+ char *dst, npy_intp const *dst_strides, npy_intp dst_strides_inc,
char *src, npy_intp src_stride,
npy_bool *mask, npy_intp mask_stride,
- npy_intp *coords, npy_intp coords_inc,
- npy_intp *shape, npy_intp shape_inc,
+ npy_intp const *coords, npy_intp coords_inc,
+ npy_intp const *shape, npy_intp shape_inc,
npy_intp count, npy_intp src_itemsize,
PyArray_MaskedStridedUnaryOp *stransfer,
NpyAuxData *data);
@@ -365,8 +365,8 @@ mapiter_set(PyArrayMapIterObject *mit);
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
-PyArray_PrepareOneRawArrayIter(int ndim, npy_intp *shape,
- char *data, npy_intp *strides,
+PyArray_PrepareOneRawArrayIter(int ndim, npy_intp const *shape,
+ char *data, npy_intp const *strides,
int *out_ndim, npy_intp *out_shape,
char **out_data, npy_intp *out_strides);
@@ -387,9 +387,9 @@ PyArray_PrepareOneRawArrayIter(int ndim, npy_intp *shape,
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
-PyArray_PrepareTwoRawArrayIter(int ndim, npy_intp *shape,
- char *dataA, npy_intp *stridesA,
- char *dataB, npy_intp *stridesB,
+PyArray_PrepareTwoRawArrayIter(int ndim, npy_intp const *shape,
+ char *dataA, npy_intp const *stridesA,
+ char *dataB, npy_intp const *stridesB,
int *out_ndim, npy_intp *out_shape,
char **out_dataA, npy_intp *out_stridesA,
char **out_dataB, npy_intp *out_stridesB);
@@ -411,10 +411,10 @@ PyArray_PrepareTwoRawArrayIter(int ndim, npy_intp *shape,
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
-PyArray_PrepareThreeRawArrayIter(int ndim, npy_intp *shape,
- char *dataA, npy_intp *stridesA,
- char *dataB, npy_intp *stridesB,
- char *dataC, npy_intp *stridesC,
+PyArray_PrepareThreeRawArrayIter(int ndim, npy_intp const *shape,
+ char *dataA, npy_intp const *stridesA,
+ char *dataB, npy_intp const *stridesB,
+ char *dataC, npy_intp const *stridesC,
int *out_ndim, npy_intp *out_shape,
char **out_dataA, npy_intp *out_stridesA,
char **out_dataB, npy_intp *out_stridesB,
diff --git a/numpy/core/src/common/npy_longdouble.c b/numpy/core/src/common/npy_longdouble.c
index c580e0cce..260e02a64 100644
--- a/numpy/core/src/common/npy_longdouble.c
+++ b/numpy/core/src/common/npy_longdouble.c
@@ -100,16 +100,12 @@ done:
static PyObject *
_PyLong_Bytes(PyObject *long_obj) {
PyObject *bytes;
-#if defined(NPY_PY3K)
PyObject *unicode = PyObject_Str(long_obj);
if (unicode == NULL) {
return NULL;
}
bytes = PyUnicode_AsUTF8String(unicode);
Py_DECREF(unicode);
-#else
- bytes = PyObject_Str(long_obj);
-#endif
return bytes;
}
diff --git a/numpy/core/src/common/ucsnarrow.c b/numpy/core/src/common/ucsnarrow.c
index 8e293e9f2..946a72257 100644
--- a/numpy/core/src/common/ucsnarrow.c
+++ b/numpy/core/src/common/ucsnarrow.c
@@ -31,7 +31,7 @@
* Values above 0xffff are converted to surrogate pairs.
*/
NPY_NO_EXPORT int
-PyUCS2Buffer_FromUCS4(Py_UNICODE *ucs2, npy_ucs4 *ucs4, int ucs4length)
+PyUCS2Buffer_FromUCS4(Py_UNICODE *ucs2, npy_ucs4 const *ucs4, int ucs4length)
{
int i;
int numucs2 = 0;
@@ -63,7 +63,7 @@ PyUCS2Buffer_FromUCS4(Py_UNICODE *ucs2, npy_ucs4 *ucs4, int ucs4length)
* The return value is the actual size of the used part of the ucs4 buffer.
*/
NPY_NO_EXPORT int
-PyUCS2Buffer_AsUCS4(Py_UNICODE *ucs2, npy_ucs4 *ucs4, int ucs2len, int ucs4len)
+PyUCS2Buffer_AsUCS4(Py_UNICODE const *ucs2, npy_ucs4 *ucs4, int ucs2len, int ucs4len)
{
int i;
npy_ucs4 chr;
@@ -107,11 +107,11 @@ PyUCS2Buffer_AsUCS4(Py_UNICODE *ucs2, npy_ucs4 *ucs4, int ucs2len, int ucs4len)
* new_reference: PyUnicodeObject
*/
NPY_NO_EXPORT PyUnicodeObject *
-PyUnicode_FromUCS4(char *src, Py_ssize_t size, int swap, int align)
+PyUnicode_FromUCS4(char const *src_char, Py_ssize_t size, int swap, int align)
{
Py_ssize_t ucs4len = size / sizeof(npy_ucs4);
- npy_ucs4 *buf = (npy_ucs4 *)src;
- int alloc = 0;
+ npy_ucs4 const *src = (npy_ucs4 const *)src_char;
+ npy_ucs4 *buf = NULL;
PyUnicodeObject *ret;
/* swap and align if needed */
@@ -121,22 +121,22 @@ PyUnicode_FromUCS4(char *src, Py_ssize_t size, int swap, int align)
PyErr_NoMemory();
goto fail;
}
- alloc = 1;
memcpy(buf, src, size);
if (swap) {
byte_swap_vector(buf, ucs4len, sizeof(npy_ucs4));
}
+ src = buf;
}
/* trim trailing zeros */
- while (ucs4len > 0 && buf[ucs4len - 1] == 0) {
+ while (ucs4len > 0 && src[ucs4len - 1] == 0) {
ucs4len--;
}
/* produce PyUnicode object */
#ifdef Py_UNICODE_WIDE
{
- ret = (PyUnicodeObject *)PyUnicode_FromUnicode((Py_UNICODE*)buf,
+ ret = (PyUnicodeObject *)PyUnicode_FromUnicode((Py_UNICODE const*)src,
(Py_ssize_t) ucs4len);
if (ret == NULL) {
goto fail;
@@ -152,7 +152,7 @@ PyUnicode_FromUCS4(char *src, Py_ssize_t size, int swap, int align)
PyErr_NoMemory();
goto fail;
}
- ucs2len = PyUCS2Buffer_FromUCS4(tmp, buf, ucs4len);
+ ucs2len = PyUCS2Buffer_FromUCS4(tmp, src, ucs4len);
ret = (PyUnicodeObject *)PyUnicode_FromUnicode(tmp, (Py_ssize_t) ucs2len);
free(tmp);
if (ret == NULL) {
@@ -161,13 +161,13 @@ PyUnicode_FromUCS4(char *src, Py_ssize_t size, int swap, int align)
}
#endif
- if (alloc) {
+ if (buf) {
free(buf);
}
return ret;
fail:
- if (alloc) {
+ if (buf) {
free(buf);
}
return NULL;
diff --git a/numpy/core/src/multiarray/_datetime.h b/numpy/core/src/multiarray/_datetime.h
index 3db1254d4..20f7a132c 100644
--- a/numpy/core/src/multiarray/_datetime.h
+++ b/numpy/core/src/multiarray/_datetime.h
@@ -1,7 +1,7 @@
#ifndef _NPY_PRIVATE__DATETIME_H_
#define _NPY_PRIVATE__DATETIME_H_
-extern NPY_NO_EXPORT char *_datetime_strings[NPY_DATETIME_NUMUNITS];
+extern NPY_NO_EXPORT char const *_datetime_strings[NPY_DATETIME_NUMUNITS];
extern NPY_NO_EXPORT int _days_per_month_table[2][12];
NPY_NO_EXPORT void
@@ -68,7 +68,7 @@ days_to_month_number(npy_datetime days);
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
-parse_datetime_metadata_from_metastr(char *metastr, Py_ssize_t len,
+parse_datetime_metadata_from_metastr(char const *metastr, Py_ssize_t len,
PyArray_DatetimeMetaData *out_meta);
@@ -78,7 +78,7 @@ parse_datetime_metadata_from_metastr(char *metastr, Py_ssize_t len,
* contain its string length.
*/
NPY_NO_EXPORT PyArray_Descr *
-parse_dtype_from_datetime_typestr(char *typestr, Py_ssize_t len);
+parse_dtype_from_datetime_typestr(char const *typestr, Py_ssize_t len);
/*
* Converts a substring given by 'str' and 'len' into
@@ -88,7 +88,7 @@ parse_dtype_from_datetime_typestr(char *typestr, Py_ssize_t len);
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT NPY_DATETIMEUNIT
-parse_datetime_unit_from_string(char *str, Py_ssize_t len, char *metastr);
+parse_datetime_unit_from_string(char const *str, Py_ssize_t len, char const *metastr);
/*
* Translate divisors into multiples of smaller units.
@@ -99,7 +99,7 @@ parse_datetime_unit_from_string(char *str, Py_ssize_t len, char *metastr);
*/
NPY_NO_EXPORT int
convert_datetime_divisor_to_multiple(PyArray_DatetimeMetaData *meta,
- int den, char *metastr);
+ int den, char const *metastr);
/*
* Determines whether the 'divisor' metadata divides evenly into
diff --git a/numpy/core/src/multiarray/_multiarray_tests.c.src b/numpy/core/src/multiarray/_multiarray_tests.c.src
index 5e2cf0edd..7007dd204 100644
--- a/numpy/core/src/multiarray/_multiarray_tests.c.src
+++ b/numpy/core/src/multiarray/_multiarray_tests.c.src
@@ -55,7 +55,7 @@ EXPORT(void*) forward_pointer(void *x)
* #typenum = NPY_DOUBLE, NPY_INT#
*/
static int copy_@name@(PyArrayIterObject *itx, PyArrayNeighborhoodIterObject *niterx,
- npy_intp *bounds,
+ npy_intp const *bounds,
PyObject **out)
{
npy_intp i, j;
@@ -97,7 +97,7 @@ static int copy_@name@(PyArrayIterObject *itx, PyArrayNeighborhoodIterObject *ni
/**end repeat**/
static int copy_object(PyArrayIterObject *itx, PyArrayNeighborhoodIterObject *niterx,
- npy_intp *bounds,
+ npy_intp const *bounds,
PyObject **out)
{
npy_intp i, j;
@@ -251,7 +251,7 @@ clean_ax:
static int
copy_double_double(PyArrayNeighborhoodIterObject *itx,
PyArrayNeighborhoodIterObject *niterx,
- npy_intp *bounds,
+ npy_intp const *bounds,
PyObject **out)
{
npy_intp i, j;
@@ -771,30 +771,6 @@ npy_discard(PyObject* NPY_UNUSED(self), PyObject* args)
Py_RETURN_NONE;
}
-#if !defined(NPY_PY3K)
-static PyObject *
-int_subclass(PyObject *dummy, PyObject *args)
-{
-
- PyObject *result = NULL;
- PyObject *scalar_object = NULL;
-
- if (!PyArg_UnpackTuple(args, "test_int_subclass", 1, 1, &scalar_object))
- return NULL;
-
- if (PyInt_Check(scalar_object))
- result = Py_True;
- else
- result = Py_False;
-
- Py_INCREF(result);
-
- return result;
-
-}
-#endif
-
-
/*
* Create python string from a FLAG and or the corresponding PyBuf flag
* for the use in get_buffer_info.
@@ -1210,11 +1186,7 @@ array_solve_diophantine(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject
for (j = 0; j < nterms; ++j) {
PyObject *obj;
-#if defined(NPY_PY3K)
obj = PyLong_FromSsize_t(x[j]);
-#else
- obj = PyInt_FromSsize_t(x[j]);
-#endif
if (obj == NULL) {
goto fail;
}
@@ -2018,11 +1990,6 @@ static PyMethodDef Multiarray_TestsMethods[] = {
{"npy_discard",
npy_discard,
METH_O, NULL},
-#if !defined(NPY_PY3K)
- {"test_int_subclass",
- int_subclass,
- METH_VARARGS, NULL},
-#endif
{"get_buffer_info",
get_buffer_info,
METH_VARARGS, NULL},
diff --git a/numpy/core/src/multiarray/alloc.c b/numpy/core/src/multiarray/alloc.c
index a7f34cbe5..c2b7e9ca7 100644
--- a/numpy/core/src/multiarray/alloc.c
+++ b/numpy/core/src/multiarray/alloc.c
@@ -48,11 +48,6 @@ static cache_bucket datacache[NBUCKETS];
static cache_bucket dimcache[NBUCKETS_DIM];
/* as the cache is managed in global variables verify the GIL is held */
-#if defined(NPY_PY3K)
-#define NPY_CHECK_GIL_HELD() PyGILState_Check()
-#else
-#define NPY_CHECK_GIL_HELD() 1
-#endif
/*
* very simplistic small memory block cache to avoid more expensive libc
@@ -67,7 +62,7 @@ _npy_alloc_cache(npy_uintp nelem, npy_uintp esz, npy_uint msz,
void * p;
assert((esz == 1 && cache == datacache) ||
(esz == sizeof(npy_intp) && cache == dimcache));
- assert(NPY_CHECK_GIL_HELD());
+ assert(PyGILState_Check());
if (nelem < msz) {
if (cache[nelem].available > 0) {
return cache[nelem].ptrs[--(cache[nelem].available)];
@@ -102,7 +97,7 @@ static NPY_INLINE void
_npy_free_cache(void * p, npy_uintp nelem, npy_uint msz,
cache_bucket * cache, void (*dealloc)(void *))
{
- assert(NPY_CHECK_GIL_HELD());
+ assert(PyGILState_Check());
if (p != NULL && nelem < msz) {
if (cache[nelem].available < NCACHE) {
cache[nelem].ptrs[cache[nelem].available++] = p;
diff --git a/numpy/core/src/multiarray/array_assign_array.c b/numpy/core/src/multiarray/array_assign_array.c
index 7ff33ebd7..e40b6c719 100644
--- a/numpy/core/src/multiarray/array_assign_array.c
+++ b/numpy/core/src/multiarray/array_assign_array.c
@@ -29,8 +29,8 @@
* elements, as required by the copy/casting code in lowlevel_strided_loops.c
*/
NPY_NO_EXPORT int
-copycast_isaligned(int ndim, npy_intp *shape,
- PyArray_Descr *dtype, char *data, npy_intp *strides)
+copycast_isaligned(int ndim, npy_intp const *shape,
+ PyArray_Descr *dtype, char *data, npy_intp const *strides)
{
int aligned;
int big_aln, small_aln;
@@ -72,9 +72,9 @@ copycast_isaligned(int ndim, npy_intp *shape,
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
-raw_array_assign_array(int ndim, npy_intp *shape,
- PyArray_Descr *dst_dtype, char *dst_data, npy_intp *dst_strides,
- PyArray_Descr *src_dtype, char *src_data, npy_intp *src_strides)
+raw_array_assign_array(int ndim, npy_intp const *shape,
+ PyArray_Descr *dst_dtype, char *dst_data, npy_intp const *dst_strides,
+ PyArray_Descr *src_dtype, char *src_data, npy_intp const *src_strides)
{
int idim;
npy_intp shape_it[NPY_MAXDIMS];
@@ -152,11 +152,11 @@ raw_array_assign_array(int ndim, npy_intp *shape,
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
-raw_array_wheremasked_assign_array(int ndim, npy_intp *shape,
- PyArray_Descr *dst_dtype, char *dst_data, npy_intp *dst_strides,
- PyArray_Descr *src_dtype, char *src_data, npy_intp *src_strides,
+raw_array_wheremasked_assign_array(int ndim, npy_intp const *shape,
+ PyArray_Descr *dst_dtype, char *dst_data, npy_intp const *dst_strides,
+ PyArray_Descr *src_dtype, char *src_data, npy_intp const *src_strides,
PyArray_Descr *wheremask_dtype, char *wheremask_data,
- npy_intp *wheremask_strides)
+ npy_intp const *wheremask_strides)
{
int idim;
npy_intp shape_it[NPY_MAXDIMS];
diff --git a/numpy/core/src/multiarray/array_assign_scalar.c b/numpy/core/src/multiarray/array_assign_scalar.c
index ecb5be47b..6bc9bcfee 100644
--- a/numpy/core/src/multiarray/array_assign_scalar.c
+++ b/numpy/core/src/multiarray/array_assign_scalar.c
@@ -30,8 +30,8 @@
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
-raw_array_assign_scalar(int ndim, npy_intp *shape,
- PyArray_Descr *dst_dtype, char *dst_data, npy_intp *dst_strides,
+raw_array_assign_scalar(int ndim, npy_intp const *shape,
+ PyArray_Descr *dst_dtype, char *dst_data, npy_intp const *dst_strides,
PyArray_Descr *src_dtype, char *src_data)
{
int idim;
@@ -101,11 +101,11 @@ raw_array_assign_scalar(int ndim, npy_intp *shape,
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
-raw_array_wheremasked_assign_scalar(int ndim, npy_intp *shape,
- PyArray_Descr *dst_dtype, char *dst_data, npy_intp *dst_strides,
+raw_array_wheremasked_assign_scalar(int ndim, npy_intp const *shape,
+ PyArray_Descr *dst_dtype, char *dst_data, npy_intp const *dst_strides,
PyArray_Descr *src_dtype, char *src_data,
PyArray_Descr *wheremask_dtype, char *wheremask_data,
- npy_intp *wheremask_strides)
+ npy_intp const *wheremask_strides)
{
int idim;
npy_intp shape_it[NPY_MAXDIMS], dst_strides_it[NPY_MAXDIMS];
diff --git a/numpy/core/src/multiarray/arrayobject.c b/numpy/core/src/multiarray/arrayobject.c
index a5cebfbd8..0c554d31b 100644
--- a/numpy/core/src/multiarray/arrayobject.c
+++ b/numpy/core/src/multiarray/arrayobject.c
@@ -388,7 +388,7 @@ PyArray_CopyObject(PyArrayObject *dest, PyObject *src_object)
/*NUMPY_API
*/
NPY_NO_EXPORT int
-PyArray_TypeNumFromName(char *str)
+PyArray_TypeNumFromName(char const *str)
{
int i;
PyArray_Descr *descr;
@@ -614,7 +614,7 @@ PyArray_SetDatetimeParseFunction(PyObject *NPY_UNUSED(op))
/*NUMPY_API
*/
NPY_NO_EXPORT int
-PyArray_CompareUCS4(npy_ucs4 *s1, npy_ucs4 *s2, size_t len)
+PyArray_CompareUCS4(npy_ucs4 const *s1, npy_ucs4 const *s2, size_t len)
{
npy_ucs4 c1, c2;
while(len-- > 0) {
@@ -703,35 +703,40 @@ PyArray_FailUnlessWriteable(PyArrayObject *obj, const char *name)
If they are NULL terminated, then stop comparison.
*/
static int
-_myunincmp(npy_ucs4 *s1, npy_ucs4 *s2, int len1, int len2)
+_myunincmp(npy_ucs4 const *s1, npy_ucs4 const *s2, int len1, int len2)
{
- npy_ucs4 *sptr;
- npy_ucs4 *s1t=s1, *s2t=s2;
+ npy_ucs4 const *sptr;
+ npy_ucs4 *s1t = NULL;
+ npy_ucs4 *s2t = NULL;
int val;
npy_intp size;
int diff;
+ /* Replace `s1` and `s2` with aligned copies if needed */
if ((npy_intp)s1 % sizeof(npy_ucs4) != 0) {
size = len1*sizeof(npy_ucs4);
s1t = malloc(size);
memcpy(s1t, s1, size);
+ s1 = s1t;
}
if ((npy_intp)s2 % sizeof(npy_ucs4) != 0) {
size = len2*sizeof(npy_ucs4);
s2t = malloc(size);
memcpy(s2t, s2, size);
+ s2 = s1t;
}
- val = PyArray_CompareUCS4(s1t, s2t, PyArray_MIN(len1,len2));
+
+ val = PyArray_CompareUCS4(s1, s2, PyArray_MIN(len1,len2));
if ((val != 0) || (len1 == len2)) {
goto finish;
}
if (len2 > len1) {
- sptr = s2t+len1;
+ sptr = s2+len1;
val = -1;
diff = len2-len1;
}
else {
- sptr = s1t+len2;
+ sptr = s1+len2;
val = 1;
diff=len1-len2;
}
@@ -744,10 +749,11 @@ _myunincmp(npy_ucs4 *s1, npy_ucs4 *s2, int len1, int len2)
val = 0;
finish:
- if (s1t != s1) {
+ /* Cleanup the aligned copies */
+ if (s1t) {
free(s1t);
}
- if (s2t != s2) {
+ if (s2t) {
free(s2t);
}
return val;
@@ -763,9 +769,9 @@ _myunincmp(npy_ucs4 *s1, npy_ucs4 *s2, int len1, int len2)
* If they are NULL terminated, then stop comparison.
*/
static int
-_mystrncmp(char *s1, char *s2, int len1, int len2)
+_mystrncmp(char const *s1, char const *s2, int len1, int len2)
{
- char *sptr;
+ char const *sptr;
int val;
int diff;
@@ -827,7 +833,7 @@ static void _unistripw(npy_ucs4 *s, int n)
static char *
-_char_copy_n_strip(char *original, char *temp, int nc)
+_char_copy_n_strip(char const *original, char *temp, int nc)
{
if (nc > SMALL_STRING) {
temp = malloc(nc);
@@ -850,7 +856,7 @@ _char_release(char *ptr, int nc)
}
static char *
-_uni_copy_n_strip(char *original, char *temp, int nc)
+_uni_copy_n_strip(char const *original, char *temp, int nc)
{
if (nc*sizeof(npy_ucs4) > SMALL_STRING) {
temp = malloc(nc*sizeof(npy_ucs4));
@@ -919,7 +925,7 @@ _compare_strings(PyArrayObject *result, PyArrayMultiIterObject *multi,
int N1, N2;
int (*compfunc)(void *, void *, int, int);
void (*relfunc)(char *, int);
- char* (*stripfunc)(char *, char *, int);
+ char* (*stripfunc)(char const *, char *, int);
compfunc = func;
dptr = (npy_bool *)PyArray_DATA(result);
@@ -998,22 +1004,18 @@ _strings_richcompare(PyArrayObject *self, PyArrayObject *other, int cmp_op,
{
PyArrayObject *result;
PyArrayMultiIterObject *mit;
- int val, cast = 0;
+ int val;
/* Cast arrays to a common type */
if (PyArray_TYPE(self) != PyArray_DESCR(other)->type_num) {
-#if defined(NPY_PY3K)
/*
* Comparison between Bytes and Unicode is not defined in Py3K;
* we follow.
*/
Py_INCREF(Py_NotImplemented);
return Py_NotImplemented;
-#else
- cast = 1;
-#endif /* define(NPY_PY3K) */
}
- if (cast || (PyArray_ISNOTSWAPPED(self) != PyArray_ISNOTSWAPPED(other))) {
+ if (PyArray_ISNOTSWAPPED(self) != PyArray_ISNOTSWAPPED(other)) {
PyObject *new;
if (PyArray_TYPE(self) == NPY_STRING &&
PyArray_DESCR(other)->type_num == NPY_UNICODE) {
@@ -1331,14 +1333,6 @@ _failed_comparison_workaround(PyArrayObject *self, PyObject *other, int cmp_op)
* get us the desired TypeError, but on python 2, one gets strange
* ordering, so we emit a warning.
*/
-#if !defined(NPY_PY3K)
- /* 2015-05-14, 1.10 */
- if (DEPRECATE(
- "unorderable dtypes; returning scalar but in "
- "the future this will be an error") < 0) {
- goto fail;
- }
-#endif
Py_XDECREF(exc);
Py_XDECREF(val);
Py_XDECREF(tb);
@@ -1601,7 +1595,7 @@ PyArray_ElementStrides(PyObject *obj)
/*NUMPY_API*/
NPY_NO_EXPORT npy_bool
PyArray_CheckStrides(int elsize, int nd, npy_intp numbytes, npy_intp offset,
- npy_intp *dims, npy_intp *newstrides)
+ npy_intp const *dims, npy_intp const *newstrides)
{
npy_intp begin, end;
npy_intp lower_offset;
@@ -1788,12 +1782,7 @@ array_free(PyObject * v)
NPY_NO_EXPORT PyTypeObject PyArray_Type = {
-#if defined(NPY_PY3K)
PyVarObject_HEAD_INIT(NULL, 0)
-#else
- PyObject_HEAD_INIT(NULL)
- 0, /* ob_size */
-#endif
"numpy.ndarray", /* tp_name */
NPY_SIZEOF_PYARRAYOBJECT, /* tp_basicsize */
0, /* tp_itemsize */
@@ -1802,11 +1791,7 @@ NPY_NO_EXPORT PyTypeObject PyArray_Type = {
(printfunc)NULL, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
-#if defined(NPY_PY3K)
0, /* tp_reserved */
-#else
- 0, /* tp_compare */
-#endif
(reprfunc)array_repr, /* tp_repr */
&array_as_number, /* tp_as_number */
&array_as_sequence, /* tp_as_sequence */
@@ -1821,12 +1806,7 @@ NPY_NO_EXPORT PyTypeObject PyArray_Type = {
(getattrofunc)0, /* tp_getattro */
(setattrofunc)0, /* tp_setattro */
&array_as_buffer, /* tp_as_buffer */
- (Py_TPFLAGS_DEFAULT
-#if !defined(NPY_PY3K)
- | Py_TPFLAGS_CHECKTYPES
- | Py_TPFLAGS_HAVE_NEWBUFFER
-#endif
- | Py_TPFLAGS_BASETYPE), /* tp_flags */
+ (Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE), /* tp_flags */
0, /* tp_doc */
(traverseproc)0, /* tp_traverse */
diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src
index 077fb0ec8..3d6a5eda8 100644
--- a/numpy/core/src/multiarray/arraytypes.c.src
+++ b/numpy/core/src/multiarray/arraytypes.c.src
@@ -330,11 +330,7 @@ string_to_long_double(PyObject*op)
/* Convert python long objects to a longdouble, without precision or range
* loss via a double.
*/
- if ((PyLong_Check(op) && !PyBool_Check(op))
-#if !defined(NPY_PY3K)
- || (PyInt_Check(op) && !PyBool_Check(op))
-#endif
- ) {
+ if ((PyLong_Check(op) && !PyBool_Check(op))) {
return npy_longdouble_from_PyLong(op);
}
@@ -470,7 +466,6 @@ UNICODE_setitem(PyObject *op, void *ov, void *vap)
"setting an array element with a sequence");
return -1;
}
-#if defined(NPY_PY3K)
if (PyBytes_Check(op)) {
/* Try to decode from ASCII */
temp = PyUnicode_FromEncodedObject(op, "ASCII", "strict");
@@ -479,9 +474,6 @@ UNICODE_setitem(PyObject *op, void *ov, void *vap)
}
}
else if ((temp=PyObject_Str(op)) == NULL) {
-#else
- if ((temp=PyObject_Unicode(op)) == NULL) {
-#endif
return -1;
}
ptr = PyUnicode_AS_UNICODE(temp);
@@ -561,7 +553,6 @@ STRING_setitem(PyObject *op, void *ov, void *vap)
"setting an array element with a sequence");
return -1;
}
-#if defined(NPY_PY3K)
if (PyUnicode_Check(op)) {
/* Assume ASCII codec -- function similarly as Python 2 */
temp = PyUnicode_AsASCIIString(op);
@@ -588,11 +579,6 @@ STRING_setitem(PyObject *op, void *ov, void *vap)
return -1;
}
}
-#else
- if ((temp = PyObject_Str(op)) == NULL) {
- return -1;
- }
-#endif
if (PyBytes_AsStringAndSize(temp, &ptr, &len) < 0) {
Py_DECREF(temp);
return -1;
@@ -919,7 +905,6 @@ VOID_setitem(PyObject *op, void *input, void *vap)
* undiscerning case: It interprets any object as a buffer
* and reads as many bytes as possible, padding with 0.
*/
-#if defined(NPY_PY3K)
{
Py_buffer view;
@@ -933,20 +918,6 @@ VOID_setitem(PyObject *op, void *input, void *vap)
PyBuffer_Release(&view);
_dealloc_cached_buffer_info(op);
}
-#else
- {
- const void *buffer;
- Py_ssize_t buflen;
-
- if (PyObject_AsReadBuffer(op, &buffer, &buflen) < 0) {
- return -1;
- }
- memcpy(ip, buffer, PyArray_MIN(buflen, itemsize));
- if (itemsize > buflen) {
- memset(ip + buflen, 0, itemsize - buflen);
- }
- }
-#endif
return 0;
}
@@ -1530,7 +1501,7 @@ OBJECT_to_@TOTYPE@(void *input, void *output, npy_intp n,
* #convert = 1*18, 0*3, 1*2,
* 1*18, 0*3, 1*2,
* 0*23#
- * #convstr = (Int*9, Long*2, Float*4, Complex*3, Tuple*3, Long*2)*3#
+ * #convstr = (Long*9, Long*2, Float*4, Complex*3, Tuple*3, Long*2)*3#
*/
#if @convert@
@@ -1556,7 +1527,7 @@ static void
return;
}
-#if defined(NPY_PY3K) && defined(IS_STRING)
+#if defined(IS_STRING)
/* Work around some Python 3K */
new = PyUnicode_FromEncodedObject(temp, "ascii", "strict");
Py_DECREF(temp);
@@ -1571,13 +1542,7 @@ static void
/* call out to the Python builtin given by convstr */
args = Py_BuildValue("(N)", temp);
-#if defined(NPY_PY3K)
-#define PyInt_Type PyLong_Type
-#endif
new = Py@convstr@_Type.tp_new(&Py@convstr@_Type, args, NULL);
-#if defined(NPY_PY3K)
-#undef PyInt_Type
-#endif
Py_DECREF(args);
temp = new;
if (temp == NULL) {
diff --git a/numpy/core/src/multiarray/buffer.c b/numpy/core/src/multiarray/buffer.c
index 2939a2e39..576186362 100644
--- a/numpy/core/src/multiarray/buffer.c
+++ b/numpy/core/src/multiarray/buffer.c
@@ -21,59 +21,6 @@
**************** Implement Buffer Protocol ****************************
*************************************************************************/
-/* removed multiple segment interface */
-
-#if !defined(NPY_PY3K)
-static Py_ssize_t
-array_getsegcount(PyArrayObject *self, Py_ssize_t *lenp)
-{
- if (lenp) {
- *lenp = PyArray_NBYTES(self);
- }
- if (PyArray_ISONESEGMENT(self)) {
- return 1;
- }
- if (lenp) {
- *lenp = 0;
- }
- return 0;
-}
-
-static Py_ssize_t
-array_getreadbuf(PyArrayObject *self, Py_ssize_t segment, void **ptrptr)
-{
- if (segment != 0) {
- PyErr_SetString(PyExc_ValueError,
- "accessing non-existing array segment");
- return -1;
- }
- if (PyArray_ISONESEGMENT(self)) {
- *ptrptr = PyArray_DATA(self);
- return PyArray_NBYTES(self);
- }
- PyErr_SetString(PyExc_ValueError, "array is not a single segment");
- *ptrptr = NULL;
- return -1;
-}
-
-
-static Py_ssize_t
-array_getwritebuf(PyArrayObject *self, Py_ssize_t segment, void **ptrptr)
-{
- if (PyArray_FailUnlessWriteable(self, "buffer source array") < 0) {
- return -1;
- }
- return array_getreadbuf(self, segment, (void **) ptrptr);
-}
-
-static Py_ssize_t
-array_getcharbuf(PyArrayObject *self, Py_ssize_t segment, constchar **ptrptr)
-{
- return array_getreadbuf(self, segment, (void **) ptrptr);
-}
-#endif /* !defined(NPY_PY3K) */
-
-
/*************************************************************************
* PEP 3118 buffer protocol
*
@@ -151,13 +98,8 @@ _append_field_name(_tmp_string_t *str, PyObject *name)
char *p;
Py_ssize_t len;
PyObject *tmp;
-#if defined(NPY_PY3K)
/* FIXME: XXX -- should it use UTF-8 here? */
tmp = PyUnicode_AsUTF8String(name);
-#else
- tmp = name;
- Py_INCREF(tmp);
-#endif
if (tmp == NULL || PyBytes_AsStringAndSize(tmp, &p, &len) < 0) {
PyErr_Clear();
PyErr_SetString(PyExc_ValueError, "invalid field name");
@@ -952,12 +894,6 @@ _dealloc_cached_buffer_info(PyObject *self)
/*************************************************************************/
NPY_NO_EXPORT PyBufferProcs array_as_buffer = {
-#if !defined(NPY_PY3K)
- (readbufferproc)array_getreadbuf, /*bf_getreadbuffer*/
- (writebufferproc)array_getwritebuf, /*bf_getwritebuffer*/
- (segcountproc)array_getsegcount, /*bf_getsegcount*/
- (charbufferproc)array_getcharbuf, /*bf_getcharbuffer*/
-#endif
(getbufferproc)array_getbuffer,
(releasebufferproc)0,
};
@@ -968,13 +904,13 @@ NPY_NO_EXPORT PyBufferProcs array_as_buffer = {
*/
static int
-_descriptor_from_pep3118_format_fast(char *s, PyObject **result);
+_descriptor_from_pep3118_format_fast(char const *s, PyObject **result);
static int
_pep3118_letter_to_type(char letter, int native, int complex);
NPY_NO_EXPORT PyArray_Descr*
-_descriptor_from_pep3118_format(char *s)
+_descriptor_from_pep3118_format(char const *s)
{
char *buf, *p;
int in_name = 0;
@@ -1059,7 +995,7 @@ _descriptor_from_pep3118_format(char *s)
*/
static int
-_descriptor_from_pep3118_format_fast(char *s, PyObject **result)
+_descriptor_from_pep3118_format_fast(char const *s, PyObject **result)
{
PyArray_Descr *descr;
diff --git a/numpy/core/src/multiarray/calculation.c b/numpy/core/src/multiarray/calculation.c
index 1d72a5227..b9ed5a9e8 100644
--- a/numpy/core/src/multiarray/calculation.c
+++ b/numpy/core/src/multiarray/calculation.c
@@ -772,11 +772,7 @@ PyArray_Mean(PyArrayObject *self, int axis, int rtype, PyArrayObject *out)
return NULL;
}
if (!out) {
-#if defined(NPY_PY3K)
ret = PyNumber_TrueDivide(obj1, obj2);
-#else
- ret = PyNumber_Divide(obj1, obj2);
-#endif
}
else {
ret = PyObject_CallFunction(n_ops.divide, "OOO", out, obj2, out);
diff --git a/numpy/core/src/multiarray/common.c b/numpy/core/src/multiarray/common.c
index c991f7428..3ec151368 100644
--- a/numpy/core/src/multiarray/common.c
+++ b/numpy/core/src/multiarray/common.c
@@ -165,22 +165,10 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims,
if ((temp = PyObject_Str(obj)) == NULL) {
goto fail;
}
-#if defined(NPY_PY3K)
- #if PY_VERSION_HEX >= 0x03030000
itemsize = PyUnicode_GetLength(temp);
- #else
- itemsize = PyUnicode_GET_SIZE(temp);
- #endif
-#else
- itemsize = PyString_GET_SIZE(temp);
-#endif
}
else if (string_type == NPY_UNICODE) {
-#if defined(NPY_PY3K)
if ((temp = PyObject_Str(obj)) == NULL) {
-#else
- if ((temp = PyObject_Unicode(obj)) == NULL) {
-#endif
goto fail;
}
itemsize = PyUnicode_GET_DATA_SIZE(temp);
@@ -221,22 +209,10 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims,
if ((temp = PyObject_Str(obj)) == NULL) {
goto fail;
}
-#if defined(NPY_PY3K)
- #if PY_VERSION_HEX >= 0x03030000
itemsize = PyUnicode_GetLength(temp);
- #else
- itemsize = PyUnicode_GET_SIZE(temp);
- #endif
-#else
- itemsize = PyString_GET_SIZE(temp);
-#endif
}
else if (string_type == NPY_UNICODE) {
-#if defined(NPY_PY3K)
if ((temp = PyObject_Str(obj)) == NULL) {
-#else
- if ((temp = PyObject_Unicode(obj)) == NULL) {
-#endif
goto fail;
}
itemsize = PyUnicode_GET_DATA_SIZE(temp);
@@ -340,24 +316,18 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims,
if (ip != NULL) {
if (PyDict_Check(ip)) {
PyObject *typestr;
-#if defined(NPY_PY3K)
PyObject *tmp = NULL;
-#endif
typestr = PyDict_GetItemString(ip, "typestr");
-#if defined(NPY_PY3K)
/* Allow unicode type strings */
if (typestr && PyUnicode_Check(typestr)) {
tmp = PyUnicode_AsASCIIString(typestr);
typestr = tmp;
}
-#endif
if (typestr && PyBytes_Check(typestr)) {
dtype =_array_typedescr_fromstr(PyBytes_AS_STRING(typestr));
-#if defined(NPY_PY3K)
if (tmp == typestr) {
Py_DECREF(tmp);
}
-#endif
Py_DECREF(ip);
if (dtype == NULL) {
goto fail;
@@ -397,19 +367,6 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims,
PyErr_Clear(); /* TODO[gh-14801]: propagate crashes during attribute access? */
}
- /* The old buffer interface */
-#if !defined(NPY_PY3K)
- if (PyBuffer_Check(obj)) {
- dtype = PyArray_DescrNewFromType(NPY_VOID);
- if (dtype == NULL) {
- goto fail;
- }
- dtype->elsize = Py_TYPE(obj)->tp_as_sequence->sq_length(obj);
- PyErr_Clear();
- goto promote_types;
- }
-#endif
-
/* The __array__ attribute */
ip = PyArray_LookupSpecial_OnInstance(obj, "__array__");
if (ip != NULL) {
@@ -478,9 +435,6 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims,
if (common_type != NULL && !string_type &&
(common_type == &PyFloat_Type ||
/* TODO: we could add longs if we add a range check */
-#if !defined(NPY_PY3K)
- common_type == &PyInt_Type ||
-#endif
common_type == &PyBool_Type ||
common_type == &PyComplex_Type)) {
size = 1;
@@ -554,7 +508,7 @@ fail:
/* new reference */
NPY_NO_EXPORT PyArray_Descr *
-_array_typedescr_fromstr(char *c_str)
+_array_typedescr_fromstr(char const *c_str)
{
PyArray_Descr *descr = NULL;
PyObject *stringobj = PyString_FromString(c_str);
@@ -612,12 +566,7 @@ NPY_NO_EXPORT npy_bool
_IsWriteable(PyArrayObject *ap)
{
PyObject *base = PyArray_BASE(ap);
-#if defined(NPY_PY3K)
Py_buffer view;
-#else
- void *dummy;
- Py_ssize_t n;
-#endif
/*
* C-data wrapping arrays may not own their data while not having a base;
@@ -661,7 +610,6 @@ _IsWriteable(PyArrayObject *ap)
assert(!PyArray_CHKFLAGS(ap, NPY_ARRAY_OWNDATA));
}
-#if defined(NPY_PY3K)
if (PyObject_GetBuffer(base, &view, PyBUF_WRITABLE|PyBUF_SIMPLE) < 0) {
PyErr_Clear();
return NPY_FALSE;
@@ -675,12 +623,6 @@ _IsWriteable(PyArrayObject *ap)
* _dealloc_cached_buffer_info, but in this case leave it in the cache to
* speed up future calls to _IsWriteable.
*/
-#else
- if (PyObject_AsWriteBuffer(base, &dummy, &n) < 0) {
- PyErr_Clear();
- return NPY_FALSE;
- }
-#endif
return NPY_TRUE;
}
@@ -695,7 +637,7 @@ _IsWriteable(PyArrayObject *ap)
* @return Python unicode string
*/
NPY_NO_EXPORT PyObject *
-convert_shape_to_string(npy_intp n, npy_intp *vals, char *ending)
+convert_shape_to_string(npy_intp n, npy_intp const *vals, char *ending)
{
npy_intp i;
PyObject *ret, *tmp;
diff --git a/numpy/core/src/multiarray/common.h b/numpy/core/src/multiarray/common.h
index 7eee9ddc5..e77e51f42 100644
--- a/numpy/core/src/multiarray/common.h
+++ b/numpy/core/src/multiarray/common.h
@@ -49,7 +49,7 @@ NPY_NO_EXPORT PyArray_Descr *
_array_find_python_scalar_type(PyObject *op);
NPY_NO_EXPORT PyArray_Descr *
-_array_typedescr_fromstr(char *str);
+_array_typedescr_fromstr(char const *str);
NPY_NO_EXPORT char *
index2ptr(PyArrayObject *mp, npy_intp i);
@@ -61,7 +61,7 @@ NPY_NO_EXPORT npy_bool
_IsWriteable(PyArrayObject *ap);
NPY_NO_EXPORT PyObject *
-convert_shape_to_string(npy_intp n, npy_intp *vals, char *ending);
+convert_shape_to_string(npy_intp n, npy_intp const *vals, char *ending);
/*
* Sets ValueError with "matrices not aligned" message for np.dot and friends
diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c
index cc37026b0..d4b9edd57 100644
--- a/numpy/core/src/multiarray/compiled_base.c
+++ b/numpy/core/src/multiarray/compiled_base.c
@@ -1164,7 +1164,7 @@ fail:
* order must be NPY_CORDER or NPY_FORTRANORDER
*/
static int
-unravel_index_loop(int unravel_ndim, npy_intp *unravel_dims,
+unravel_index_loop(int unravel_ndim, npy_intp const *unravel_dims,
npy_intp unravel_size, npy_intp count,
char *indices, npy_intp indices_stride,
npy_intp *coords, NPY_ORDER order)
@@ -1447,7 +1447,6 @@ arr_add_docstring(PyObject *NPY_UNUSED(dummy), PyObject *args)
}
}
-#if defined(NPY_PY3K)
if (!PyArg_ParseTuple(args, "OO!:add_docstring", &obj, &PyUnicode_Type, &str)) {
return NULL;
}
@@ -1456,13 +1455,6 @@ arr_add_docstring(PyObject *NPY_UNUSED(dummy), PyObject *args)
if (docstr == NULL) {
return NULL;
}
-#else
- if (!PyArg_ParseTuple(args, "OO!:add_docstring", &obj, &PyString_Type, &str)) {
- return NULL;
- }
-
- docstr = PyString_AS_STRING(str);
-#endif
#define _TESTDOC1(typebase) (Py_TYPE(obj) == &Py##typebase##_Type)
#define _TESTDOC2(typebase) (Py_TYPE(obj) == Py##typebase##_TypePtr)
diff --git a/numpy/core/src/multiarray/conversion_utils.c b/numpy/core/src/multiarray/conversion_utils.c
index ca126b4b1..484a13134 100644
--- a/numpy/core/src/multiarray/conversion_utils.c
+++ b/numpy/core/src/multiarray/conversion_utils.c
@@ -152,11 +152,7 @@ PyArray_IntpConverter(PyObject *obj, PyArray_Dims *seq)
NPY_NO_EXPORT int
PyArray_BufferConverter(PyObject *obj, PyArray_Chunk *buf)
{
-#if defined(NPY_PY3K)
Py_buffer view;
-#else
- Py_ssize_t buflen;
-#endif
buf->ptr = NULL;
buf->flags = NPY_ARRAY_BEHAVED;
@@ -165,7 +161,6 @@ PyArray_BufferConverter(PyObject *obj, PyArray_Chunk *buf)
return NPY_SUCCEED;
}
-#if defined(NPY_PY3K)
if (PyObject_GetBuffer(obj, &view,
PyBUF_ANY_CONTIGUOUS|PyBUF_WRITABLE|PyBUF_SIMPLE) != 0) {
PyErr_Clear();
@@ -192,22 +187,6 @@ PyArray_BufferConverter(PyObject *obj, PyArray_Chunk *buf)
if (PyMemoryView_Check(obj)) {
buf->base = PyMemoryView_GET_BASE(obj);
}
-#else
- if (PyObject_AsWriteBuffer(obj, &(buf->ptr), &buflen) < 0) {
- PyErr_Clear();
- buf->flags &= ~NPY_ARRAY_WRITEABLE;
- if (PyObject_AsReadBuffer(obj, (const void **)&(buf->ptr),
- &buflen) < 0) {
- return NPY_FAIL;
- }
- }
- buf->len = (npy_intp) buflen;
-
- /* Point to the base of the buffer object if present */
- if (PyBuffer_Check(obj)) {
- buf->base = ((PyArray_Chunk *)obj)->base;
- }
-#endif
if (buf->base == NULL) {
buf->base = obj;
}
@@ -812,18 +791,6 @@ PyArray_PyIntAsIntp_ErrMsg(PyObject *o, const char * msg)
* Since it is the usual case, first check if o is an integer. This is
* an exact check, since otherwise __index__ is used.
*/
-#if !defined(NPY_PY3K)
- if (PyInt_CheckExact(o)) {
- #if (NPY_SIZEOF_LONG <= NPY_SIZEOF_INTP)
- /* No overflow is possible, so we can just return */
- return PyInt_AS_LONG(o);
- #else
- long_value = PyInt_AS_LONG(o);
- goto overflow_check;
- #endif
- }
- else
-#endif
if (PyLong_CheckExact(o)) {
#if (NPY_SIZEOF_LONG < NPY_SIZEOF_INTP)
long_value = PyLong_AsLongLong(o);
@@ -1145,7 +1112,7 @@ PyArray_TypestrConvert(int itemsize, int gentype)
PyArray_IntTupleFromIntp
*/
NPY_NO_EXPORT PyObject *
-PyArray_IntTupleFromIntp(int len, npy_intp *vals)
+PyArray_IntTupleFromIntp(int len, npy_intp const *vals)
{
int i;
PyObject *intTuple = PyTuple_New(len);
diff --git a/numpy/core/src/multiarray/conversion_utils.h b/numpy/core/src/multiarray/conversion_utils.h
index cd43f25c3..9bf712c3b 100644
--- a/numpy/core/src/multiarray/conversion_utils.h
+++ b/numpy/core/src/multiarray/conversion_utils.h
@@ -37,7 +37,7 @@ NPY_NO_EXPORT int
PyArray_TypestrConvert(int itemsize, int gentype);
NPY_NO_EXPORT PyObject *
-PyArray_IntTupleFromIntp(int len, npy_intp *vals);
+PyArray_IntTupleFromIntp(int len, npy_intp const *vals);
NPY_NO_EXPORT int
PyArray_SelectkindConverter(PyObject *obj, NPY_SELECTKIND *selectkind);
diff --git a/numpy/core/src/multiarray/convert.c b/numpy/core/src/multiarray/convert.c
index aa4e40e66..e7cbeaa77 100644
--- a/numpy/core/src/multiarray/convert.c
+++ b/numpy/core/src/multiarray/convert.c
@@ -262,18 +262,12 @@ PyArray_ToFile(PyArrayObject *self, FILE *fp, char *sep, char *format)
return -1;
}
}
-#if defined(NPY_PY3K)
byteobj = PyUnicode_AsASCIIString(strobj);
-#else
- byteobj = strobj;
-#endif
NPY_BEGIN_ALLOW_THREADS;
n2 = PyBytes_GET_SIZE(byteobj);
n = fwrite(PyBytes_AS_STRING(byteobj), 1, n2, fp);
NPY_END_ALLOW_THREADS;
-#if defined(NPY_PY3K)
Py_DECREF(byteobj);
-#endif
if (n < n2) {
PyErr_Format(PyExc_IOError,
"problem writing element %" NPY_INTP_FMT
diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c
index 6921427ce..07e269b57 100644
--- a/numpy/core/src/multiarray/ctors.c
+++ b/numpy/core/src/multiarray/ctors.c
@@ -629,13 +629,7 @@ discover_itemsize(PyObject *s, int nd, int *itemsize, int string_type)
}
if ((nd == 0) || PyString_Check(s) ||
-#if defined(NPY_PY3K)
- PyMemoryView_Check(s) ||
-#else
- PyBuffer_Check(s) ||
-#endif
- PyUnicode_Check(s)) {
-
+ PyMemoryView_Check(s) || PyUnicode_Check(s)) {
/* If an object has no length, leave it be */
if (string_type && s != NULL &&
!PyString_Check(s) && !PyUnicode_Check(s)) {
@@ -644,11 +638,7 @@ discover_itemsize(PyObject *s, int nd, int *itemsize, int string_type)
s_string = PyObject_Str(s);
}
else {
-#if defined(NPY_PY3K)
s_string = PyObject_Str(s);
-#else
- s_string = PyObject_Unicode(s);
-#endif
}
if (s_string) {
n = PyObject_Length(s_string);
@@ -736,10 +726,6 @@ discover_dimensions(PyObject *obj, int *maxndim, npy_intp *d, int check_it,
/* obj is a String */
if (PyString_Check(obj) ||
-#if defined(NPY_PY3K)
-#else
- PyBuffer_Check(obj) ||
-#endif
PyUnicode_Check(obj)) {
if (stop_at_string) {
*maxndim = 0;
@@ -950,7 +936,7 @@ discover_dimensions(PyObject *obj, int *maxndim, npy_intp *d, int check_it,
}
static PyObject *
-raise_memory_error(int nd, npy_intp *dims, PyArray_Descr *descr)
+raise_memory_error(int nd, npy_intp const *dims, PyArray_Descr *descr)
{
static PyObject *exc_type = NULL;
@@ -2352,9 +2338,7 @@ PyArray_FromStructInterface(PyObject *input)
NPY_NO_EXPORT int
_is_default_descr(PyObject *descr, PyObject *typestr) {
PyObject *tuple, *name, *typestr2;
-#if defined(NPY_PY3K)
PyObject *tmp = NULL;
-#endif
int ret = 0;
if (!PyList_Check(descr) || PyList_GET_SIZE(descr) != 1) {
@@ -2369,7 +2353,6 @@ _is_default_descr(PyObject *descr, PyObject *typestr) {
return 0;
}
typestr2 = PyTuple_GET_ITEM(tuple, 1);
-#if defined(NPY_PY3K)
/* Allow unicode type strings */
if (PyUnicode_Check(typestr2)) {
tmp = PyUnicode_AsASCIIString(typestr2);
@@ -2378,14 +2361,11 @@ _is_default_descr(PyObject *descr, PyObject *typestr) {
}
typestr2 = tmp;
}
-#endif
if (PyBytes_Check(typestr2) &&
PyObject_RichCompareBool(typestr, typestr2, Py_EQ)) {
ret = 1;
}
-#if defined(NPY_PY3K)
Py_XDECREF(tmp);
-#endif
return ret;
}
@@ -2402,11 +2382,7 @@ PyArray_FromInterface(PyObject *origin)
PyArrayObject *ret;
PyArray_Descr *dtype = NULL;
char *data = NULL;
-#if defined(NPY_PY3K)
Py_buffer view;
-#else
- Py_ssize_t buffer_len;
-#endif
int res, i, n;
npy_intp dims[NPY_MAXDIMS], strides[NPY_MAXDIMS];
int dataflags = NPY_ARRAY_BEHAVED;
@@ -2434,7 +2410,7 @@ PyArray_FromInterface(PyObject *origin)
"Missing __array_interface__ typestr");
return NULL;
}
-#if defined(NPY_PY3K)
+
/* Allow unicode type strings */
if (PyUnicode_Check(attr)) {
PyObject *tmp = PyUnicode_AsASCIIString(attr);
@@ -2446,7 +2422,7 @@ PyArray_FromInterface(PyObject *origin)
else {
Py_INCREF(attr);
}
-#endif
+
if (!PyBytes_Check(attr)) {
PyErr_SetString(PyExc_TypeError,
"__array_interface__ typestr must be a string");
@@ -2474,9 +2450,7 @@ PyArray_FromInterface(PyObject *origin)
}
}
-#if defined(NPY_PY3K)
Py_DECREF(attr); /* Pairs with the unicode handling above */
-#endif
/* Get shape tuple from interface specification */
attr = PyDict_GetItemString(iface, "shape");
@@ -2557,7 +2531,6 @@ PyArray_FromInterface(PyObject *origin)
else {
base = origin;
}
-#if defined(NPY_PY3K)
if (PyObject_GetBuffer(base, &view,
PyBUF_WRITABLE|PyBUF_SIMPLE) < 0) {
PyErr_Clear();
@@ -2576,18 +2549,7 @@ PyArray_FromInterface(PyObject *origin)
*/
PyBuffer_Release(&view);
_dealloc_cached_buffer_info(base);
-#else
- res = PyObject_AsWriteBuffer(base, (void **)&data, &buffer_len);
- if (res < 0) {
- PyErr_Clear();
- res = PyObject_AsReadBuffer(
- base, (const void **)&data, &buffer_len);
- if (res < 0) {
- goto fail;
- }
- dataflags &= ~NPY_ARRAY_WRITEABLE;
- }
-#endif
+
/* Get offset number from interface specification */
attr = PyDict_GetItemString(iface, "offset");
if (attr) {
@@ -3590,7 +3552,7 @@ array_fromfile_binary(FILE *fp, PyArray_Descr *dtype, npy_intp num, size_t *nrea
*/
#define FROM_BUFFER_SIZE 4096
static PyArrayObject *
-array_from_text(PyArray_Descr *dtype, npy_intp num, char *sep, size_t *nread,
+array_from_text(PyArray_Descr *dtype, npy_intp num, char const *sep, size_t *nread,
void *stream, next_element next, skip_separator skip_sep,
void *stream_data)
{
@@ -3780,9 +3742,7 @@ PyArray_FromBuffer(PyObject *buf, PyArray_Descr *type,
{
PyArrayObject *ret;
char *data;
-#if defined(NPY_PY3K)
Py_buffer view;
-#endif
Py_ssize_t ts;
npy_intp s, n;
int itemsize;
@@ -3803,7 +3763,6 @@ PyArray_FromBuffer(PyObject *buf, PyArray_Descr *type,
return NULL;
}
-#if defined(NPY_PY3K)
if (PyObject_GetBuffer(buf, &view, PyBUF_WRITABLE|PyBUF_SIMPLE) < 0) {
writeable = 0;
PyErr_Clear();
@@ -3822,16 +3781,6 @@ PyArray_FromBuffer(PyObject *buf, PyArray_Descr *type,
*/
PyBuffer_Release(&view);
_dealloc_cached_buffer_info(buf);
-#else
- if (PyObject_AsWriteBuffer(buf, (void *)&data, &ts) == -1) {
- writeable = 0;
- PyErr_Clear();
- if (PyObject_AsReadBuffer(buf, (void *)&data, &ts) == -1) {
- Py_DECREF(type);
- return NULL;
- }
- }
-#endif
if ((offset < 0) || (offset > ts)) {
PyErr_Format(PyExc_ValueError,
diff --git a/numpy/core/src/multiarray/datetime.c b/numpy/core/src/multiarray/datetime.c
index 72a3df89c..67ed3ca85 100644
--- a/numpy/core/src/multiarray/datetime.c
+++ b/numpy/core/src/multiarray/datetime.c
@@ -71,7 +71,7 @@ numpy_pydatetime_import(void)
}
/* Exported as DATETIMEUNITS in multiarraymodule.c */
-NPY_NO_EXPORT char *_datetime_strings[NPY_DATETIME_NUMUNITS] = {
+NPY_NO_EXPORT char const *_datetime_strings[NPY_DATETIME_NUMUNITS] = {
"Y",
"M",
"W",
@@ -692,6 +692,14 @@ get_datetime_metadata_from_dtype(PyArray_Descr *dtype)
return &(((PyArray_DatetimeDTypeMetaData *)dtype->c_metadata)->meta);
}
+/* strtol does not know whether to put a const qualifier on endptr, wrap
+ * it so we can put this cast in one place.
+ */
+NPY_NO_EXPORT long int
+strtol_const(char const *str, char const **endptr, int base) {
+ return strtol(str, (char**)endptr, base);
+}
+
/*
* Converts a substring given by 'str' and 'len' into
* a date time unit multiplier + enum value, which are populated
@@ -702,15 +710,15 @@ get_datetime_metadata_from_dtype(PyArray_Descr *dtype)
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
-parse_datetime_extended_unit_from_string(char *str, Py_ssize_t len,
- char *metastr,
+parse_datetime_extended_unit_from_string(char const *str, Py_ssize_t len,
+ char const *metastr,
PyArray_DatetimeMetaData *out_meta)
{
- char *substr = str, *substrend = NULL;
+ char const *substr = str, *substrend = NULL;
int den = 1;
/* First comes an optional integer multiplier */
- out_meta->num = (int)strtol(substr, &substrend, 10);
+ out_meta->num = (int)strtol_const(substr, &substrend, 10);
if (substr == substrend) {
out_meta->num = 1;
}
@@ -735,7 +743,7 @@ parse_datetime_extended_unit_from_string(char *str, Py_ssize_t len,
/* Next comes an optional integer denominator */
if (substr-str < len && *substr == '/') {
substr++;
- den = (int)strtol(substr, &substrend, 10);
+ den = (int)strtol_const(substr, &substrend, 10);
/* If the '/' exists, there must be a number followed by ']' */
if (substr == substrend || *substrend != ']') {
goto bad_input;
@@ -776,10 +784,10 @@ bad_input:
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
-parse_datetime_metadata_from_metastr(char *metastr, Py_ssize_t len,
+parse_datetime_metadata_from_metastr(char const *metastr, Py_ssize_t len,
PyArray_DatetimeMetaData *out_meta)
{
- char *substr = metastr, *substrend = NULL;
+ char const *substr = metastr, *substrend = NULL;
/* Treat the empty string as generic units */
if (len == 0) {
@@ -837,10 +845,10 @@ bad_input:
* The "type" string should be NULL-terminated.
*/
NPY_NO_EXPORT PyArray_Descr *
-parse_dtype_from_datetime_typestr(char *typestr, Py_ssize_t len)
+parse_dtype_from_datetime_typestr(char const *typestr, Py_ssize_t len)
{
PyArray_DatetimeMetaData meta;
- char *metastr = NULL;
+ char const *metastr = NULL;
int is_timedelta = 0;
Py_ssize_t metalen = 0;
@@ -923,7 +931,7 @@ static NPY_DATETIMEUNIT _multiples_table[16][4] = {
*/
NPY_NO_EXPORT int
convert_datetime_divisor_to_multiple(PyArray_DatetimeMetaData *meta,
- int den, char *metastr)
+ int den, char const *metastr)
{
int i, num, ind;
NPY_DATETIMEUNIT *totry;
@@ -1671,7 +1679,7 @@ datetime_type_promotion(PyArray_Descr *type1, PyArray_Descr *type2)
* Returns NPY_DATETIMEUNIT on success, NPY_FR_ERROR on failure.
*/
NPY_NO_EXPORT NPY_DATETIMEUNIT
-parse_datetime_unit_from_string(char *str, Py_ssize_t len, char *metastr)
+parse_datetime_unit_from_string(char const *str, Py_ssize_t len, char const *metastr)
{
/* Use switch statements so the compiler can make it fast */
if (len == 1) {
@@ -1956,7 +1964,7 @@ append_metastr_to_string(PyArray_DatetimeMetaData *meta,
{
PyObject *res;
int num;
- char *basestr;
+ char const *basestr;
if (ret == NULL) {
return NULL;
diff --git a/numpy/core/src/multiarray/datetime_busdaycal.c b/numpy/core/src/multiarray/datetime_busdaycal.c
index 7a26868e8..eb6ef04be 100644
--- a/numpy/core/src/multiarray/datetime_busdaycal.c
+++ b/numpy/core/src/multiarray/datetime_busdaycal.c
@@ -493,12 +493,7 @@ static PyGetSetDef busdaycalendar_getsets[] = {
};
NPY_NO_EXPORT PyTypeObject NpyBusDayCalendar_Type = {
-#if defined(NPY_PY3K)
PyVarObject_HEAD_INIT(NULL, 0)
-#else
- PyObject_HEAD_INIT(NULL)
- 0, /* ob_size */
-#endif
"numpy.busdaycalendar", /* tp_name */
sizeof(NpyBusDayCalendar), /* tp_basicsize */
0, /* tp_itemsize */
@@ -507,11 +502,7 @@ NPY_NO_EXPORT PyTypeObject NpyBusDayCalendar_Type = {
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
-#if defined(NPY_PY3K)
0, /* tp_reserved */
-#else
- 0, /* tp_compare */
-#endif
0, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
diff --git a/numpy/core/src/multiarray/datetime_strings.c b/numpy/core/src/multiarray/datetime_strings.c
index dfc01494f..4574c05d8 100644
--- a/numpy/core/src/multiarray/datetime_strings.c
+++ b/numpy/core/src/multiarray/datetime_strings.c
@@ -218,7 +218,7 @@ convert_datetimestruct_utc_to_local(npy_datetimestruct *out_dts_local,
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
-parse_iso_8601_datetime(char *str, Py_ssize_t len,
+parse_iso_8601_datetime(char const *str, Py_ssize_t len,
NPY_DATETIMEUNIT unit,
NPY_CASTING casting,
npy_datetimestruct *out,
@@ -227,7 +227,7 @@ parse_iso_8601_datetime(char *str, Py_ssize_t len,
{
int year_leap = 0;
int i, numdigits;
- char *substr;
+ char const *substr;
Py_ssize_t sublen;
NPY_DATETIMEUNIT bestunit;
@@ -1487,7 +1487,6 @@ array_datetime_as_string(PyObject *NPY_UNUSED(self), PyObject *args,
/* Get a string size long enough for any datetimes we're given */
strsize = get_datetime_iso_8601_strlen(local, unit);
-#if defined(NPY_PY3K)
/*
* For Python3, allocate the output array as a UNICODE array, so
* that it will behave as strings properly
@@ -1504,7 +1503,6 @@ array_datetime_as_string(PyObject *NPY_UNUSED(self), PyObject *args,
op_dtypes[1] = NULL;
goto fail;
}
-#endif
/* Create the iteration string data type (always ASCII string) */
op_dtypes[1] = PyArray_DescrNewFromType(NPY_STRING);
if (op_dtypes[1] == NULL) {
diff --git a/numpy/core/src/multiarray/datetime_strings.h b/numpy/core/src/multiarray/datetime_strings.h
index 4e60ce929..148369595 100644
--- a/numpy/core/src/multiarray/datetime_strings.h
+++ b/numpy/core/src/multiarray/datetime_strings.h
@@ -33,7 +33,7 @@
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
-parse_iso_8601_datetime(char *str, Py_ssize_t len,
+parse_iso_8601_datetime(char const *str, Py_ssize_t len,
NPY_DATETIMEUNIT unit,
NPY_CASTING casting,
npy_datetimestruct *out,
diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c
index 89934bbd4..a76b6c1e7 100644
--- a/numpy/core/src/multiarray/descriptor.c
+++ b/numpy/core/src/multiarray/descriptor.c
@@ -39,10 +39,33 @@
static PyObject *typeDict = NULL; /* Must be explicitly loaded */
+/*
+ * Generate a vague error message when a function returned NULL but forgot
+ * to set an exception. We should aim to remove this eventually.
+ */
+static void
+_report_generic_error(void) {
+ PyErr_SetString(PyExc_TypeError, "data type not understood");
+}
+
static PyArray_Descr *
_use_inherit(PyArray_Descr *type, PyObject *newobj, int *errflag);
static PyArray_Descr *
+_convert_from_any(PyObject *obj, int align);
+
+static PyArray_Descr *
+_arraydescr_run_converter(PyObject *arg, int align)
+{
+ PyArray_Descr *type = _convert_from_any(arg, align);
+ /* TODO: fix the `_convert` functions so that this can never happen */
+ if (type == NULL && !PyErr_Occurred()) {
+ _report_generic_error();
+ }
+ return type;
+}
+
+static PyArray_Descr *
_arraydescr_from_ctypes_type(PyTypeObject *type)
{
PyObject *_numpy_dtype_ctypes;
@@ -199,7 +222,7 @@ _check_for_commastring(const char *type, Py_ssize_t len)
#undef _chk_byteorder
static int
-is_datetime_typestr(char *type, Py_ssize_t len)
+is_datetime_typestr(char const *type, Py_ssize_t len)
{
if (len < 2) {
return 0;
@@ -225,26 +248,17 @@ is_datetime_typestr(char *type, Py_ssize_t len)
static PyArray_Descr *
_convert_from_tuple(PyObject *obj, int align)
{
- PyArray_Descr *type, *res;
- PyObject *val;
- int errflag;
-
if (PyTuple_GET_SIZE(obj) != 2) {
return NULL;
}
- if (align) {
- if (!PyArray_DescrAlignConverter(PyTuple_GET_ITEM(obj, 0), &type)) {
- return NULL;
- }
- }
- else {
- if (!PyArray_DescrConverter(PyTuple_GET_ITEM(obj, 0), &type)) {
- return NULL;
- }
+ PyArray_Descr *type = _arraydescr_run_converter(PyTuple_GET_ITEM(obj, 0), align);
+ if (type == NULL) {
+ return NULL;
}
- val = PyTuple_GET_ITEM(obj,1);
+ PyObject *val = PyTuple_GET_ITEM(obj,1);
/* try to interpret next item as a type */
- res = _use_inherit(type, val, &errflag);
+ int errflag;
+ PyArray_Descr *res = _use_inherit(type, val, &errflag);
if (res || errflag) {
Py_DECREF(type);
return res;
@@ -291,11 +305,6 @@ _convert_from_tuple(PyObject *obj, int align)
* a new fields attribute.
*/
PyArray_Dims shape = {NULL, -1};
- PyArray_Descr *newdescr = NULL;
- npy_intp items;
- int i, overflowed;
- int nbytes;
-
if (!(PyArray_IntpConverter(val, &shape)) || (shape.len > NPY_MAXDIMS)) {
PyErr_SetString(PyExc_ValueError,
"invalid shape in fixed-type tuple.");
@@ -322,7 +331,7 @@ _convert_from_tuple(PyObject *obj, int align)
}
/* validate and set shape */
- for (i=0; i < shape.len; i++) {
+ for (int i=0; i < shape.len; i++) {
if (shape.ptr[i] < 0) {
PyErr_SetString(PyExc_ValueError,
"invalid shape in fixed-type tuple: "
@@ -336,7 +345,9 @@ _convert_from_tuple(PyObject *obj, int align)
goto fail;
}
}
- items = PyArray_OverflowMultiplyList(shape.ptr, shape.len);
+ npy_intp items = PyArray_OverflowMultiplyList(shape.ptr, shape.len);
+ int overflowed;
+ int nbytes;
if (items < 0 || items > NPY_MAX_INT) {
overflowed = 1;
}
@@ -350,13 +361,14 @@ _convert_from_tuple(PyObject *obj, int align)
"bytes must fit into a C int.");
goto fail;
}
- newdescr = PyArray_DescrNewFromType(NPY_VOID);
+ PyArray_Descr *newdescr = PyArray_DescrNewFromType(NPY_VOID);
if (newdescr == NULL) {
goto fail;
}
newdescr->elsize = nbytes;
newdescr->subarray = PyArray_malloc(sizeof(PyArray_ArrayDescr));
if (newdescr->subarray == NULL) {
+ Py_DECREF(newdescr);
PyErr_NoMemory();
goto fail;
}
@@ -375,13 +387,15 @@ _convert_from_tuple(PyObject *obj, int align)
*/
newdescr->subarray->shape = PyTuple_New(shape.len);
if (newdescr->subarray->shape == NULL) {
+ Py_DECREF(newdescr);
goto fail;
}
- for (i=0; i < shape.len; i++) {
+ for (int i=0; i < shape.len; i++) {
PyTuple_SET_ITEM(newdescr->subarray->shape, i,
PyInt_FromLong((long)shape.ptr[i]));
if (PyTuple_GET_ITEM(newdescr->subarray->shape, i) == NULL) {
+ Py_DECREF(newdescr);
goto fail;
}
}
@@ -391,7 +405,6 @@ _convert_from_tuple(PyObject *obj, int align)
fail:
Py_XDECREF(type);
- Py_XDECREF(newdescr);
npy_free_cache_dim_obj(shape);
return NULL;
}
@@ -410,30 +423,24 @@ _convert_from_tuple(PyObject *obj, int align)
static PyArray_Descr *
_convert_from_array_descr(PyObject *obj, int align)
{
- int n, i, totalsize;
- int ret;
- PyObject *fields, *item, *newobj;
- PyObject *name, *tup, *title;
- PyObject *nameslist;
- PyArray_Descr *new;
- PyArray_Descr *conv;
- /* Types with fields need the Python C API for field access */
- char dtypeflags = NPY_NEEDS_PYAPI;
- int maxalign = 0;
-
- n = PyList_GET_SIZE(obj);
- nameslist = PyTuple_New(n);
+ int n = PyList_GET_SIZE(obj);
+ PyObject *nameslist = PyTuple_New(n);
if (!nameslist) {
return NULL;
}
- totalsize = 0;
- fields = PyDict_New();
- for (i = 0; i < n; i++) {
- item = PyList_GET_ITEM(obj, i);
+
+ /* Types with fields need the Python C API for field access */
+ char dtypeflags = NPY_NEEDS_PYAPI;
+ int maxalign = 0;
+ int totalsize = 0;
+ PyObject *fields = PyDict_New();
+ for (int i = 0; i < n; i++) {
+ PyObject *item = PyList_GET_ITEM(obj, i);
if (!PyTuple_Check(item) || (PyTuple_GET_SIZE(item) < 2)) {
goto fail;
}
- name = PyTuple_GET_ITEM(item, 0);
+ PyObject *name = PyTuple_GET_ITEM(item, 0);
+ PyObject *title;
if (PyBaseString_Check(name)) {
title = NULL;
}
@@ -454,23 +461,11 @@ _convert_from_array_descr(PyObject *obj, int align)
/* Insert name into nameslist */
Py_INCREF(name);
-#if !defined(NPY_PY3K)
- /* convert unicode name to ascii on Python 2 if possible */
- if (PyUnicode_Check(name)) {
- PyObject *tmp = PyUnicode_AsASCIIString(name);
- Py_DECREF(name);
- if (tmp == NULL) {
- goto fail;
- }
- name = tmp;
- }
-#endif
if (PyUString_GET_SIZE(name) == 0) {
Py_DECREF(name);
if (title == NULL) {
name = PyUString_FromFormat("f%d", i);
}
-#if defined(NPY_PY3K)
/* On Py3, allow only non-empty Unicode strings as field names */
else if (PyUString_Check(title) && PyUString_GET_SIZE(title) > 0) {
name = title;
@@ -479,55 +474,36 @@ _convert_from_array_descr(PyObject *obj, int align)
else {
goto fail;
}
-#else
- else {
- name = title;
- Py_INCREF(name);
- }
-#endif
}
PyTuple_SET_ITEM(nameslist, i, name);
/* Process rest */
-
+ PyArray_Descr *conv;
if (PyTuple_GET_SIZE(item) == 2) {
- if (align) {
- ret = PyArray_DescrAlignConverter(PyTuple_GET_ITEM(item, 1),
- &conv);
- }
- else {
- ret = PyArray_DescrConverter(PyTuple_GET_ITEM(item, 1), &conv);
+ conv = _arraydescr_run_converter(PyTuple_GET_ITEM(item, 1), align);
+ if (conv == NULL) {
+ goto fail;
}
}
else if (PyTuple_GET_SIZE(item) == 3) {
- newobj = PyTuple_GetSlice(item, 1, 3);
- if (align) {
- ret = PyArray_DescrAlignConverter(newobj, &conv);
- }
- else {
- ret = PyArray_DescrConverter(newobj, &conv);
- }
+ PyObject *newobj = PyTuple_GetSlice(item, 1, 3);
+ conv = _arraydescr_run_converter(newobj, align);
Py_DECREF(newobj);
+ if (conv == NULL) {
+ goto fail;
+ }
}
else {
goto fail;
}
- if (ret == NPY_FAIL) {
- goto fail;
- }
-
if ((PyDict_GetItem(fields, name) != NULL)
|| (title
&& PyBaseString_Check(title)
&& (PyDict_GetItem(fields, title) != NULL))) {
-#if defined(NPY_PY3K)
name = PyUnicode_AsUTF8String(name);
-#endif
PyErr_Format(PyExc_ValueError,
"field '%s' occurs more than once", PyString_AsString(name));
-#if defined(NPY_PY3K)
Py_DECREF(name);
-#endif
Py_DECREF(conv);
goto fail;
}
@@ -541,7 +517,7 @@ _convert_from_array_descr(PyObject *obj, int align)
}
maxalign = PyArray_MAX(maxalign, _align);
}
- tup = PyTuple_New((title == NULL ? 2 : 3));
+ PyObject *tup = PyTuple_New((title == NULL ? 2 : 3));
PyTuple_SET_ITEM(tup, 0, (PyObject *)conv);
PyTuple_SET_ITEM(tup, 1, PyInt_FromLong((long) totalsize));
@@ -576,7 +552,7 @@ _convert_from_array_descr(PyObject *obj, int align)
totalsize = NPY_NEXT_ALIGNED_OFFSET(totalsize, maxalign);
}
- new = PyArray_DescrNewFromType(NPY_VOID);
+ PyArray_Descr *new = PyArray_DescrNewFromType(NPY_VOID);
if (new == NULL) {
Py_XDECREF(fields);
Py_XDECREF(nameslist);
@@ -609,47 +585,34 @@ _convert_from_array_descr(PyObject *obj, int align)
static PyArray_Descr *
_convert_from_list(PyObject *obj, int align)
{
- int n, i;
- int totalsize;
- PyObject *fields;
- PyArray_Descr *conv = NULL;
- PyArray_Descr *new;
- PyObject *key, *tup;
- PyObject *nameslist = NULL;
- int ret;
- int maxalign = 0;
- /* Types with fields need the Python C API for field access */
- char dtypeflags = NPY_NEEDS_PYAPI;
-
- n = PyList_GET_SIZE(obj);
+ int n = PyList_GET_SIZE(obj);
/*
* Ignore any empty string at end which _internal._commastring
* can produce
*/
- key = PyList_GET_ITEM(obj, n-1);
- if (PyBytes_Check(key) && PyBytes_GET_SIZE(key) == 0) {
+ PyObject *last_item = PyList_GET_ITEM(obj, n-1);
+ if (PyBytes_Check(last_item) && PyBytes_GET_SIZE(last_item) == 0) {
n = n - 1;
}
/* End ignore code.*/
- totalsize = 0;
if (n == 0) {
return NULL;
}
- nameslist = PyTuple_New(n);
+ PyObject *nameslist = PyTuple_New(n);
if (!nameslist) {
return NULL;
}
- fields = PyDict_New();
- for (i = 0; i < n; i++) {
- tup = PyTuple_New(2);
- key = PyUString_FromFormat("f%d", i);
- if (align) {
- ret = PyArray_DescrAlignConverter(PyList_GET_ITEM(obj, i), &conv);
- }
- else {
- ret = PyArray_DescrConverter(PyList_GET_ITEM(obj, i), &conv);
- }
- if (ret == NPY_FAIL) {
+ PyObject *fields = PyDict_New();
+
+ /* Types with fields need the Python C API for field access */
+ char dtypeflags = NPY_NEEDS_PYAPI;
+ int maxalign = 0;
+ int totalsize = 0;
+ for (int i = 0; i < n; i++) {
+ PyObject *tup = PyTuple_New(2);
+ PyObject *key = PyUString_FromFormat("f%d", i);
+ PyArray_Descr *conv = _arraydescr_run_converter(PyList_GET_ITEM(obj, i), align);
+ if (conv == NULL) {
Py_DECREF(tup);
Py_DECREF(key);
goto fail;
@@ -671,7 +634,7 @@ _convert_from_list(PyObject *obj, int align)
PyTuple_SET_ITEM(nameslist, i, key);
totalsize += conv->elsize;
}
- new = PyArray_DescrNewFromType(NPY_VOID);
+ PyArray_Descr *new = PyArray_DescrNewFromType(NPY_VOID);
new->fields = fields;
new->names = nameslist;
new->flags = dtypeflags;
@@ -1012,35 +975,21 @@ _use_fields_dict(PyObject *obj, int align)
static PyArray_Descr *
_convert_from_dict(PyObject *obj, int align)
{
- PyArray_Descr *new;
- PyObject *fields = NULL;
- PyObject *names = NULL;
- PyObject *offsets= NULL;
- PyObject *descrs = NULL;
- PyObject *titles = NULL;
- PyObject *metadata, *tmp;
- int n, i;
- int totalsize, itemsize;
- int maxalign = 0;
- /* Types with fields need the Python C API for field access */
- char dtypeflags = NPY_NEEDS_PYAPI;
- int has_out_of_order_fields = 0;
-
- fields = PyDict_New();
+ PyObject *fields = PyDict_New();
if (fields == NULL) {
return (PyArray_Descr *)PyErr_NoMemory();
}
/*
* Use PyMapping_GetItemString to support dictproxy objects as well.
*/
- names = PyMapping_GetItemString(obj, "names");
+ PyObject *names = PyMapping_GetItemString(obj, "names");
if (names == NULL) {
Py_DECREF(fields);
/* XXX should check this is a KeyError */
PyErr_Clear();
return _use_fields_dict(obj, align);
}
- descrs = PyMapping_GetItemString(obj, "formats");
+ PyObject *descrs = PyMapping_GetItemString(obj, "formats");
if (descrs == NULL) {
Py_DECREF(fields);
/* XXX should check this is a KeyError */
@@ -1048,12 +997,12 @@ _convert_from_dict(PyObject *obj, int align)
Py_DECREF(names);
return _use_fields_dict(obj, align);
}
- n = PyObject_Length(names);
- offsets = PyMapping_GetItemString(obj, "offsets");
+ int n = PyObject_Length(names);
+ PyObject *offsets = PyMapping_GetItemString(obj, "offsets");
if (!offsets) {
PyErr_Clear();
}
- titles = PyMapping_GetItemString(obj, "titles");
+ PyObject *titles = PyMapping_GetItemString(obj, "titles");
if (!titles) {
PyErr_Clear();
}
@@ -1071,7 +1020,7 @@ _convert_from_dict(PyObject *obj, int align)
* If a property 'aligned' is in the dict, it overrides the align flag
* to be True if it not already true.
*/
- tmp = PyMapping_GetItemString(obj, "aligned");
+ PyObject *tmp = PyMapping_GetItemString(obj, "aligned");
if (tmp == NULL) {
PyErr_Clear();
} else {
@@ -1088,16 +1037,16 @@ _convert_from_dict(PyObject *obj, int align)
Py_DECREF(tmp);
}
- totalsize = 0;
- for (i = 0; i < n; i++) {
- PyObject *tup, *descr, *ind, *title, *name, *off;
- int len, ret, _align = 1;
- PyArray_Descr *newdescr;
-
+ /* Types with fields need the Python C API for field access */
+ char dtypeflags = NPY_NEEDS_PYAPI;
+ int totalsize = 0;
+ int maxalign = 0;
+ int has_out_of_order_fields = 0;
+ for (int i = 0; i < n; i++) {
/* Build item to insert (descr, offset, [title])*/
- len = 2;
- title = NULL;
- ind = PyInt_FromLong(i);
+ int len = 2;
+ PyObject *title = NULL;
+ PyObject *ind = PyInt_FromLong(i);
if (titles) {
title=PyObject_GetItem(titles, ind);
if (title && title != Py_None) {
@@ -1108,39 +1057,34 @@ _convert_from_dict(PyObject *obj, int align)
}
PyErr_Clear();
}
- tup = PyTuple_New(len);
- descr = PyObject_GetItem(descrs, ind);
+ PyObject *tup = PyTuple_New(len);
+ PyObject *descr = PyObject_GetItem(descrs, ind);
if (!descr) {
Py_DECREF(tup);
Py_DECREF(ind);
goto fail;
}
- if (align) {
- ret = PyArray_DescrAlignConverter(descr, &newdescr);
- }
- else {
- ret = PyArray_DescrConverter(descr, &newdescr);
- }
+ PyArray_Descr *newdescr = _arraydescr_run_converter(descr, align);
Py_DECREF(descr);
- if (ret == NPY_FAIL) {
+ if (newdescr == NULL) {
Py_DECREF(tup);
Py_DECREF(ind);
goto fail;
}
PyTuple_SET_ITEM(tup, 0, (PyObject *)newdescr);
+ int _align = 1;
if (align) {
_align = newdescr->alignment;
maxalign = PyArray_MAX(maxalign,_align);
}
if (offsets) {
- long offset;
- off = PyObject_GetItem(offsets, ind);
+ PyObject *off = PyObject_GetItem(offsets, ind);
if (!off) {
Py_DECREF(tup);
Py_DECREF(ind);
goto fail;
}
- offset = PyArray_PyIntAsInt(off);
+ long offset = PyArray_PyIntAsInt(off);
if (error_converting(offset)) {
Py_DECREF(off);
Py_DECREF(tup);
@@ -1168,7 +1112,9 @@ _convert_from_dict(PyObject *obj, int align)
"not divisible by the field alignment %d "
"with align=True",
offset, newdescr->alignment);
- ret = NPY_FAIL;
+ Py_DECREF(ind);
+ Py_DECREF(tup);
+ goto fail;
}
else if (offset + newdescr->elsize > totalsize) {
totalsize = offset + newdescr->elsize;
@@ -1181,15 +1127,10 @@ _convert_from_dict(PyObject *obj, int align)
PyTuple_SET_ITEM(tup, 1, PyInt_FromLong(totalsize));
totalsize += newdescr->elsize;
}
- if (ret == NPY_FAIL) {
- Py_DECREF(ind);
- Py_DECREF(tup);
- goto fail;
- }
if (len == 3) {
PyTuple_SET_ITEM(tup, 2, title);
}
- name = PyObject_GetItem(names, ind);
+ PyObject *name = PyObject_GetItem(names, ind);
Py_DECREF(ind);
if (!name) {
Py_DECREF(tup);
@@ -1223,13 +1164,10 @@ _convert_from_dict(PyObject *obj, int align)
}
}
Py_DECREF(tup);
- if (ret == NPY_FAIL) {
- goto fail;
- }
dtypeflags |= (newdescr->flags & NPY_FROM_FIELDS);
}
- new = PyArray_DescrNewFromType(NPY_VOID);
+ PyArray_Descr *new = PyArray_DescrNewFromType(NPY_VOID);
if (new == NULL) {
goto fail;
}
@@ -1275,7 +1213,7 @@ _convert_from_dict(PyObject *obj, int align)
if (tmp == NULL) {
PyErr_Clear();
} else {
- itemsize = (int)PyArray_PyIntAsInt(tmp);
+ int itemsize = (int)PyArray_PyIntAsInt(tmp);
Py_DECREF(tmp);
if (error_converting(itemsize)) {
Py_DECREF(new);
@@ -1304,7 +1242,7 @@ _convert_from_dict(PyObject *obj, int align)
}
/* Add the metadata if provided */
- metadata = PyMapping_GetItemString(obj, "metadata");
+ PyObject *metadata = PyMapping_GetItemString(obj, "metadata");
if (metadata == NULL) {
PyErr_Clear();
@@ -1366,113 +1304,99 @@ PyArray_DescrConverter2(PyObject *obj, PyArray_Descr **at)
}
}
-/*NUMPY_API
- * Get typenum from an object -- None goes to NPY_DEFAULT_TYPE
- * This function takes a Python object representing a type and converts it
- * to a the correct PyArray_Descr * structure to describe the type.
- *
- * Many objects can be used to represent a data-type which in NumPy is
- * quite a flexible concept.
- *
- * This is the central code that converts Python objects to
- * Type-descriptor objects that are used throughout numpy.
- *
- * Returns a new reference in *at, but the returned should not be
- * modified as it may be one of the canonical immutable objects or
- * a reference to the input obj.
+/**
+ * Get a dtype instance from a python type
*/
-NPY_NO_EXPORT int
-PyArray_DescrConverter(PyObject *obj, PyArray_Descr **at)
-{
- int check_num = NPY_NOTYPE + 10;
- int elsize = 0;
- char endian = '=';
-
- *at = NULL;
+static PyArray_Descr *
+_convert_from_type(PyObject *obj) {
+ PyTypeObject *typ = (PyTypeObject*)obj;
- /* default */
- if (obj == Py_None) {
- *at = PyArray_DescrFromType(NPY_DEFAULT_TYPE);
- return NPY_SUCCEED;
+ if (PyType_IsSubtype(typ, &PyGenericArrType_Type)) {
+ return PyArray_DescrFromTypeObject(obj);
}
-
- if (PyArray_DescrCheck(obj)) {
- *at = (PyArray_Descr *)obj;
- Py_INCREF(*at);
- return NPY_SUCCEED;
- }
-
- if (PyType_Check(obj)) {
- if (PyType_IsSubtype((PyTypeObject *)obj, &PyGenericArrType_Type)) {
- *at = PyArray_DescrFromTypeObject(obj);
- return (*at) ? NPY_SUCCEED : NPY_FAIL;
- }
- check_num = NPY_OBJECT;
#if !defined(NPY_PY3K)
- if (obj == (PyObject *)(&PyInt_Type)) {
- check_num = NPY_LONG;
- }
- else if (obj == (PyObject *)(&PyLong_Type)) {
- check_num = NPY_LONGLONG;
- }
+ else if (typ == &PyInt_Type) {
+ return PyArray_DescrFromType(NPY_LONG);
+ }
+ else if (typ == &PyLong_Type) {
+ return PyArray_DescrFromType(NPY_LONGLONG);
+ }
#else
- if (obj == (PyObject *)(&PyLong_Type)) {
- check_num = NPY_LONG;
- }
+ else if (typ == &PyLong_Type) {
+ return PyArray_DescrFromType(NPY_LONG);
+ }
#endif
- else if (obj == (PyObject *)(&PyFloat_Type)) {
- check_num = NPY_DOUBLE;
- }
- else if (obj == (PyObject *)(&PyComplex_Type)) {
- check_num = NPY_CDOUBLE;
- }
- else if (obj == (PyObject *)(&PyBool_Type)) {
- check_num = NPY_BOOL;
- }
- else if (obj == (PyObject *)(&PyBytes_Type)) {
- check_num = NPY_STRING;
- }
- else if (obj == (PyObject *)(&PyUnicode_Type)) {
- check_num = NPY_UNICODE;
- }
+ else if (typ == &PyFloat_Type) {
+ return PyArray_DescrFromType(NPY_DOUBLE);
+ }
+ else if (typ == &PyComplex_Type) {
+ return PyArray_DescrFromType(NPY_CDOUBLE);
+ }
+ else if (typ == &PyBool_Type) {
+ return PyArray_DescrFromType(NPY_BOOL);
+ }
+ else if (typ == &PyBytes_Type) {
+ return PyArray_DescrFromType(NPY_STRING);
+ }
+ else if (typ == &PyUnicode_Type) {
+ return PyArray_DescrFromType(NPY_UNICODE);
+ }
#if defined(NPY_PY3K)
- else if (obj == (PyObject *)(&PyMemoryView_Type)) {
+ else if (typ == &PyMemoryView_Type) {
#else
- else if (obj == (PyObject *)(&PyBuffer_Type)) {
+ else if (typ == &PyBuffer_Type) {
#endif
- check_num = NPY_VOID;
- }
- else {
- if (_arraydescr_from_dtype_attr(obj, at)) {
- /*
- * Using dtype attribute, *at may be NULL if a
- * RecursionError occurred.
- */
- if (*at == NULL) {
- goto error;
- }
- return NPY_SUCCEED;
- }
+ return PyArray_DescrFromType(NPY_VOID);
+ }
+ else {
+ PyArray_Descr *at = NULL;
+ if (_arraydescr_from_dtype_attr(obj, &at)) {
/*
- * Note: this comes after _arraydescr_from_dtype_attr because the ctypes
- * type might override the dtype if numpy does not otherwise
- * support it.
+ * Using dtype attribute, *at may be NULL if a
+ * RecursionError occurred.
*/
- if (npy_ctypes_check((PyTypeObject *)obj)) {
- *at = _arraydescr_from_ctypes_type((PyTypeObject *)obj);
- return *at ? NPY_SUCCEED : NPY_FAIL;
+ if (at == NULL) {
+ return NULL;
}
+ return at;
}
- goto finish;
+ /*
+ * Note: this comes after _arraydescr_from_dtype_attr because the ctypes
+ * type might override the dtype if numpy does not otherwise
+ * support it.
+ */
+ if (npy_ctypes_check(typ)) {
+ return _arraydescr_from_ctypes_type(typ);
+ }
+
+ /* All other classes are treated as object */
+ return PyArray_DescrFromType(NPY_OBJECT);
}
+}
- /* or a typecode string */
- if (PyUnicode_Check(obj)) {
+static PyArray_Descr *
+_convert_from_bytes(PyObject *obj, int align);
+
+static PyArray_Descr *
+_convert_from_any(PyObject *obj, int align)
+{
+ /* default */
+ if (obj == Py_None) {
+ return PyArray_DescrFromType(NPY_DEFAULT_TYPE);
+ }
+ else if (PyArray_DescrCheck(obj)) {
+ PyArray_Descr *ret = (PyArray_Descr *)obj;
+ Py_INCREF(ret);
+ return ret;
+ }
+ else if (PyType_Check(obj)) {
+ return _convert_from_type(obj);
+ }
+ /* or a typecode string */
+ else if (PyUnicode_Check(obj)) {
/* Allow unicode format strings: convert to bytes */
- int retval;
- PyObject *obj2;
- obj2 = PyUnicode_AsASCIIString(obj);
+ PyObject *obj2 = PyUnicode_AsASCIIString(obj);
if (obj2 == NULL) {
/* Convert the exception into a TypeError */
PyObject *err = PyErr_Occurred();
@@ -1480,167 +1404,39 @@ PyArray_DescrConverter(PyObject *obj, PyArray_Descr **at)
PyErr_SetString(PyExc_TypeError,
"data type not understood");
}
- return NPY_FAIL;
+ return NULL;
}
- retval = PyArray_DescrConverter(obj2, at);
+ PyArray_Descr *ret = _convert_from_any(obj2, align);
Py_DECREF(obj2);
- return retval;
+ return ret;
}
-
- if (PyBytes_Check(obj)) {
- char *type = NULL;
- Py_ssize_t len = 0;
-
- /* Check for a string typecode. */
- if (PyBytes_AsStringAndSize(obj, &type, &len) < 0) {
- goto error;
- }
-
- /* Empty string is invalid */
- if (len == 0) {
- goto fail;
- }
-
- /* check for commas present or first (or second) element a digit */
- if (_check_for_commastring(type, len)) {
- *at = _convert_from_commastring(obj, 0);
- return (*at) ? NPY_SUCCEED : NPY_FAIL;
- }
-
- /* Process the endian character. '|' is replaced by '='*/
- switch (type[0]) {
- case '>':
- case '<':
- case '=':
- endian = type[0];
- ++type;
- --len;
- break;
-
- case '|':
- endian = '=';
- ++type;
- --len;
- break;
- }
-
- /* Just an endian character is invalid */
- if (len == 0) {
- goto fail;
- }
-
- /* Check for datetime format */
- if (is_datetime_typestr(type, len)) {
- *at = parse_dtype_from_datetime_typestr(type, len);
- if (*at == NULL) {
- return NPY_FAIL;
- }
- /* *at has byte order '=' at this point */
- if (!PyArray_ISNBO(endian)) {
- (*at)->byteorder = endian;
- }
- return NPY_SUCCEED;
- }
-
- /* A typecode like 'd' */
- if (len == 1) {
- /* Python byte string characters are unsigned */
- check_num = (unsigned char) type[0];
- }
- /* A kind + size like 'f8' */
- else {
- char *typeend = NULL;
- int kind;
-
- /* Parse the integer, make sure it's the rest of the string */
- elsize = (int)strtol(type + 1, &typeend, 10);
- if (typeend - type == len) {
-
- kind = type[0];
- switch (kind) {
- case NPY_STRINGLTR:
- case NPY_STRINGLTR2:
- check_num = NPY_STRING;
- break;
-
- /*
- * When specifying length of UNICODE
- * the number of characters is given to match
- * the STRING interface. Each character can be
- * more than one byte and itemsize must be
- * the number of bytes.
- */
- case NPY_UNICODELTR:
- check_num = NPY_UNICODE;
- elsize <<= 2;
- break;
-
- case NPY_VOIDLTR:
- check_num = NPY_VOID;
- break;
-
- default:
- if (elsize == 0) {
- check_num = NPY_NOTYPE+10;
- }
- /* Support for generic processing c8, i4, f8, etc...*/
- else {
- check_num = PyArray_TypestrConvert(elsize, kind);
- if (check_num == NPY_NOTYPE) {
- check_num += 10;
- }
- elsize = 0;
- }
- }
- }
- }
+ else if (PyBytes_Check(obj)) {
+ return _convert_from_bytes(obj, align);
}
else if (PyTuple_Check(obj)) {
/* or a tuple */
- *at = _convert_from_tuple(obj, 0);
- if (*at == NULL){
- if (PyErr_Occurred()) {
- return NPY_FAIL;
- }
- goto fail;
- }
- return NPY_SUCCEED;
+ return _convert_from_tuple(obj, align);
}
else if (PyList_Check(obj)) {
/* or a list */
- *at = _convert_from_array_descr(obj,0);
- if (*at == NULL) {
- if (PyErr_Occurred()) {
- return NPY_FAIL;
- }
- goto fail;
- }
- return NPY_SUCCEED;
+ return _convert_from_array_descr(obj, align);
}
else if (PyDict_Check(obj) || PyDictProxy_Check(obj)) {
/* or a dictionary */
- *at = _convert_from_dict(obj,0);
- if (*at == NULL) {
- if (PyErr_Occurred()) {
- return NPY_FAIL;
- }
- goto fail;
- }
- return NPY_SUCCEED;
+ return _convert_from_dict(obj, align);
}
else if (PyArray_Check(obj)) {
- goto fail;
+ _report_generic_error();
+ return NULL;
}
else {
- if (_arraydescr_from_dtype_attr(obj, at)) {
+ PyArray_Descr *ret;
+ if (_arraydescr_from_dtype_attr(obj, &ret)) {
/*
- * Using dtype attribute, *at may be NULL if a
+ * Using dtype attribute, ret may be NULL if a
* RecursionError occurred.
*/
- if (*at == NULL) {
- goto error;
- }
- return NPY_SUCCEED;
+ return ret;
}
/*
* Note: this comes after _arraydescr_from_dtype_attr because the ctypes
@@ -1648,103 +1444,217 @@ PyArray_DescrConverter(PyObject *obj, PyArray_Descr **at)
* support it.
*/
if (npy_ctypes_check(Py_TYPE(obj))) {
- *at = _arraydescr_from_ctypes_type(Py_TYPE(obj));
- return *at ? NPY_SUCCEED : NPY_FAIL;
+ return _arraydescr_from_ctypes_type(Py_TYPE(obj));
}
+ _report_generic_error();
+ return NULL;
+ }
+}
+
+
+/*NUMPY_API
+ * Get typenum from an object -- None goes to NPY_DEFAULT_TYPE
+ * This function takes a Python object representing a type and converts it
+ * to a the correct PyArray_Descr * structure to describe the type.
+ *
+ * Many objects can be used to represent a data-type which in NumPy is
+ * quite a flexible concept.
+ *
+ * This is the central code that converts Python objects to
+ * Type-descriptor objects that are used throughout numpy.
+ *
+ * Returns a new reference in *at, but the returned should not be
+ * modified as it may be one of the canonical immutable objects or
+ * a reference to the input obj.
+ */
+NPY_NO_EXPORT int
+PyArray_DescrConverter(PyObject *obj, PyArray_Descr **at)
+{
+ *at = _arraydescr_run_converter(obj, 0);
+ return (*at) ? NPY_SUCCEED : NPY_FAIL;
+}
+
+/** Convert a bytestring specification into a dtype */
+static PyArray_Descr *
+_convert_from_bytes(PyObject *obj, int align)
+{
+ /* Check for a string typecode. */
+ char *type = NULL;
+ Py_ssize_t len = 0;
+ if (PyBytes_AsStringAndSize(obj, &type, &len) < 0) {
+ return NULL;
+ }
+
+ /* Empty string is invalid */
+ if (len == 0) {
goto fail;
}
+
+ /* check for commas present or first (or second) element a digit */
+ if (_check_for_commastring(type, len)) {
+ return _convert_from_commastring(obj, align);
+ }
+
+ /* Process the endian character. '|' is replaced by '='*/
+ char endian = '=';
+ switch (type[0]) {
+ case '>':
+ case '<':
+ case '=':
+ endian = type[0];
+ ++type;
+ --len;
+ break;
+
+ case '|':
+ endian = '=';
+ ++type;
+ --len;
+ break;
+ }
+
+ /* Just an endian character is invalid */
+ if (len == 0) {
+ goto fail;
+ }
+
+ /* Check for datetime format */
+ if (is_datetime_typestr(type, len)) {
+ PyArray_Descr *ret = parse_dtype_from_datetime_typestr(type, len);
+ if (ret == NULL) {
+ return NULL;
+ }
+ /* ret has byte order '=' at this point */
+ if (!PyArray_ISNBO(endian)) {
+ ret->byteorder = endian;
+ }
+ return ret;
+ }
+
+ int check_num = NPY_NOTYPE + 10;
+ int elsize = 0;
+ /* A typecode like 'd' */
+ if (len == 1) {
+ /* Python byte string characters are unsigned */
+ check_num = (unsigned char) type[0];
+ }
+ /* A kind + size like 'f8' */
+ else {
+ char *typeend = NULL;
+ int kind;
+
+ /* Parse the integer, make sure it's the rest of the string */
+ elsize = (int)strtol(type + 1, &typeend, 10);
+ if (typeend - type == len) {
+
+ kind = type[0];
+ switch (kind) {
+ case NPY_STRINGLTR:
+ case NPY_STRINGLTR2:
+ check_num = NPY_STRING;
+ break;
+
+ /*
+ * When specifying length of UNICODE
+ * the number of characters is given to match
+ * the STRING interface. Each character can be
+ * more than one byte and itemsize must be
+ * the number of bytes.
+ */
+ case NPY_UNICODELTR:
+ check_num = NPY_UNICODE;
+ elsize <<= 2;
+ break;
+
+ case NPY_VOIDLTR:
+ check_num = NPY_VOID;
+ break;
+
+ default:
+ if (elsize == 0) {
+ check_num = NPY_NOTYPE+10;
+ }
+ /* Support for generic processing c8, i4, f8, etc...*/
+ else {
+ check_num = PyArray_TypestrConvert(elsize, kind);
+ if (check_num == NPY_NOTYPE) {
+ check_num += 10;
+ }
+ elsize = 0;
+ }
+ }
+ }
+ }
+
if (PyErr_Occurred()) {
goto fail;
}
-finish:
+ PyArray_Descr *ret;
if ((check_num == NPY_NOTYPE + 10) ||
- (*at = PyArray_DescrFromType(check_num)) == NULL) {
+ (ret = PyArray_DescrFromType(check_num)) == NULL) {
PyErr_Clear();
/* Now check to see if the object is registered in typeDict */
- if (typeDict != NULL) {
- PyObject *item = NULL;
-#if defined(NPY_PY3K)
- if (PyBytes_Check(obj)) {
- PyObject *tmp;
- tmp = PyUnicode_FromEncodedObject(obj, "ascii", "strict");
- if (tmp == NULL) {
+ if (typeDict == NULL) {
+ goto fail;
+ }
+ PyObject *item = NULL;
+ PyObject *tmp = PyUnicode_FromEncodedObject(obj, "ascii", "strict");
+ if (tmp == NULL) {
+ goto fail;
+ }
+ item = PyDict_GetItem(typeDict, tmp);
+ Py_DECREF(tmp);
+ if (item == NULL) {
+ goto fail;
+ }
+
+ /* Check for a deprecated Numeric-style typecode */
+ char *dep_tps[] = {"Bool", "Complex", "Float", "Int",
+ "Object0", "String0", "Timedelta64",
+ "Unicode0", "UInt", "Void0"};
+ int ndep_tps = sizeof(dep_tps) / sizeof(dep_tps[0]);
+ for (int i = 0; i < ndep_tps; ++i) {
+ char *dep_tp = dep_tps[i];
+
+ if (strncmp(type, dep_tp, strlen(dep_tp)) == 0) {
+ if (DEPRECATE("Numeric-style type codes are "
+ "deprecated and will result in "
+ "an error in the future.") < 0) {
goto fail;
}
- item = PyDict_GetItem(typeDict, tmp);
- Py_DECREF(tmp);
- }
- else {
- item = PyDict_GetItem(typeDict, obj);
- }
-#else
- item = PyDict_GetItem(typeDict, obj);
-#endif
- if (item) {
- /* Check for a deprecated Numeric-style typecode */
- if (PyBytes_Check(obj)) {
- char *type = NULL;
- Py_ssize_t len = 0;
- char *dep_tps[] = {"Bool", "Complex", "Float", "Int",
- "Object0", "String0", "Timedelta64",
- "Unicode0", "UInt", "Void0"};
- int ndep_tps = sizeof(dep_tps) / sizeof(dep_tps[0]);
- int i;
-
- if (PyBytes_AsStringAndSize(obj, &type, &len) < 0) {
- goto error;
- }
- for (i = 0; i < ndep_tps; ++i) {
- char *dep_tp = dep_tps[i];
-
- if (strncmp(type, dep_tp, strlen(dep_tp)) == 0) {
- if (DEPRECATE("Numeric-style type codes are "
- "deprecated and will result in "
- "an error in the future.") < 0) {
- goto fail;
- }
- }
- }
- }
- return PyArray_DescrConverter(item, at);
}
}
- goto fail;
+ /*
+ * Probably only ever dispatches to `_convert_from_type`, but who
+ * knows what users are injecting into `np.typeDict`.
+ */
+ return _convert_from_any(item, align);
}
- if (PyDataType_ISUNSIZED(*at) && (*at)->elsize != elsize) {
- PyArray_DESCR_REPLACE(*at);
- if (*at == NULL) {
- goto error;
+ if (PyDataType_ISUNSIZED(ret) && ret->elsize != elsize) {
+ PyArray_DESCR_REPLACE(ret);
+ if (ret == NULL) {
+ return NULL;
}
- (*at)->elsize = elsize;
+ ret->elsize = elsize;
}
if (endian != '=' && PyArray_ISNBO(endian)) {
endian = '=';
}
- if (endian != '=' && (*at)->byteorder != '|'
- && (*at)->byteorder != endian) {
- PyArray_DESCR_REPLACE(*at);
- if (*at == NULL) {
- goto error;
+ if (endian != '=' && ret->byteorder != '|' && ret->byteorder != endian) {
+ PyArray_DESCR_REPLACE(ret);
+ if (ret == NULL) {
+ return NULL;
}
- (*at)->byteorder = endian;
+ ret->byteorder = endian;
}
- return NPY_SUCCEED;
+ return ret;
fail:
- if (PyBytes_Check(obj)) {
- PyErr_Format(PyExc_TypeError,
- "data type \"%s\" not understood", PyBytes_AS_STRING(obj));
- }
- else {
- PyErr_SetString(PyExc_TypeError,
- "data type not understood");
- }
-
-error:
- *at = NULL;
- return NPY_FAIL;
+ PyErr_Format(PyExc_TypeError,
+ "data type \"%s\" not understood", PyBytes_AS_STRING(obj));
+ return NULL;
}
/** Array Descr Objects for dynamic types **/
@@ -2281,12 +2191,8 @@ arraydescr_new(PyTypeObject *NPY_UNUSED(subtype),
return NULL;
}
- if (align) {
- if (!PyArray_DescrAlignConverter(odescr, &conv)) {
- return NULL;
- }
- }
- else if (!PyArray_DescrConverter(odescr, &conv)) {
+ conv = _arraydescr_run_converter(odescr, align);
+ if (conv == NULL) {
return NULL;
}
@@ -2733,11 +2639,7 @@ arraydescr_setstate(PyArray_Descr *self, PyObject *args)
subarray_shape = PyTuple_GET_ITEM(subarray, 1);
if (PyNumber_Check(subarray_shape)) {
PyObject *tmp;
-#if defined(NPY_PY3K)
tmp = PyNumber_Long(subarray_shape);
-#else
- tmp = PyNumber_Int(subarray_shape);
-#endif
if (tmp == NULL) {
return NULL;
}
@@ -2792,7 +2694,6 @@ arraydescr_setstate(PyArray_Descr *self, PyObject *args)
}
}
else {
-#if defined(NPY_PY3K)
/*
* To support pickle.load(f, encoding='bytes') for loading Py2
* generated pickles on Py3, we need to be more lenient and convert
@@ -2837,11 +2738,6 @@ arraydescr_setstate(PyArray_Descr *self, PyObject *args)
return NULL;
}
}
-#else
- PyErr_Format(PyExc_ValueError,
- "non-string names in Numpy dtype unpickling");
- return NULL;
-#endif
}
}
@@ -2930,35 +2826,8 @@ arraydescr_setstate(PyArray_Descr *self, PyObject *args)
NPY_NO_EXPORT int
PyArray_DescrAlignConverter(PyObject *obj, PyArray_Descr **at)
{
- if (PyDict_Check(obj) || PyDictProxy_Check(obj)) {
- *at = _convert_from_dict(obj, 1);
- }
- else if (PyBytes_Check(obj)) {
- *at = _convert_from_commastring(obj, 1);
- }
- else if (PyUnicode_Check(obj)) {
- PyObject *tmp;
- tmp = PyUnicode_AsASCIIString(obj);
- *at = _convert_from_commastring(tmp, 1);
- Py_DECREF(tmp);
- }
- else if (PyTuple_Check(obj)) {
- *at = _convert_from_tuple(obj, 1);
- }
- else if (PyList_Check(obj)) {
- *at = _convert_from_array_descr(obj, 1);
- }
- else {
- return PyArray_DescrConverter(obj, at);
- }
- if (*at == NULL) {
- if (!PyErr_Occurred()) {
- PyErr_SetString(PyExc_ValueError,
- "data-type-descriptor not understood");
- }
- return NPY_FAIL;
- }
- return NPY_SUCCEED;
+ *at = _arraydescr_run_converter(obj, 1);
+ return (*at) ? NPY_SUCCEED : NPY_FAIL;
}
/*NUMPY_API
@@ -2969,32 +2838,13 @@ PyArray_DescrAlignConverter(PyObject *obj, PyArray_Descr **at)
NPY_NO_EXPORT int
PyArray_DescrAlignConverter2(PyObject *obj, PyArray_Descr **at)
{
- if (PyDict_Check(obj) || PyDictProxy_Check(obj)) {
- *at = _convert_from_dict(obj, 1);
- }
- else if (PyBytes_Check(obj)) {
- *at = _convert_from_commastring(obj, 1);
- }
- else if (PyUnicode_Check(obj)) {
- PyObject *tmp;
- tmp = PyUnicode_AsASCIIString(obj);
- *at = _convert_from_commastring(tmp, 1);
- Py_DECREF(tmp);
- }
- else if (PyList_Check(obj)) {
- *at = _convert_from_array_descr(obj, 1);
+ if (obj == Py_None) {
+ *at = NULL;
+ return NPY_SUCCEED;
}
else {
- return PyArray_DescrConverter2(obj, at);
+ return PyArray_DescrAlignConverter(obj, at);
}
- if (*at == NULL) {
- if (!PyErr_Occurred()) {
- PyErr_SetString(PyExc_ValueError,
- "data-type-descriptor not understood");
- }
- return NPY_FAIL;
- }
- return NPY_SUCCEED;
}
@@ -3293,10 +3143,6 @@ static PyNumberMethods descr_as_number = {
(binaryfunc)0, /* nb_add */
(binaryfunc)0, /* nb_subtract */
(binaryfunc)0, /* nb_multiply */
- #if defined(NPY_PY3K)
- #else
- (binaryfunc)0, /* nb_divide */
- #endif
(binaryfunc)0, /* nb_remainder */
(binaryfunc)0, /* nb_divmod */
(ternaryfunc)0, /* nb_power */
@@ -3349,13 +3195,11 @@ _check_has_fields(PyArray_Descr *self)
if (astr == NULL) {
return -1;
}
-#if defined(NPY_PY3K)
{
PyObject *bstr = PyUnicode_AsUnicodeEscapeString(astr);
Py_DECREF(astr);
astr = bstr;
}
-#endif
PyErr_Format(PyExc_KeyError,
"There are no fields in dtype %s.", PyBytes_AsString(astr));
Py_DECREF(astr);
@@ -3570,12 +3414,7 @@ static PyMappingMethods descr_as_mapping = {
/****************** End of Mapping Protocol ******************************/
NPY_NO_EXPORT PyTypeObject PyArrayDescr_Type = {
-#if defined(NPY_PY3K)
PyVarObject_HEAD_INIT(NULL, 0)
-#else
- PyObject_HEAD_INIT(NULL)
- 0, /* ob_size */
-#endif
"numpy.dtype", /* tp_name */
sizeof(PyArray_Descr), /* tp_basicsize */
0, /* tp_itemsize */
@@ -3584,11 +3423,7 @@ NPY_NO_EXPORT PyTypeObject PyArrayDescr_Type = {
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
-#if defined(NPY_PY3K)
(void *)0, /* tp_reserved */
-#else
- 0, /* tp_compare */
-#endif
(reprfunc)arraydescr_repr, /* tp_repr */
&descr_as_number, /* tp_as_number */
&descr_as_sequence, /* tp_as_sequence */
diff --git a/numpy/core/src/multiarray/descriptor.h b/numpy/core/src/multiarray/descriptor.h
index 6024c5e77..fe7dc6f9b 100644
--- a/numpy/core/src/multiarray/descriptor.h
+++ b/numpy/core/src/multiarray/descriptor.h
@@ -26,6 +26,6 @@ is_dtype_struct_simple_unaligned_layout(PyArray_Descr *dtype);
NPY_NO_EXPORT PyArray_Descr *
arraydescr_field_subset_view(PyArray_Descr *self, PyObject *ind);
-extern NPY_NO_EXPORT char *_datetime_strings[];
+extern NPY_NO_EXPORT char const *_datetime_strings[];
#endif
diff --git a/numpy/core/src/multiarray/dtype_transfer.c b/numpy/core/src/multiarray/dtype_transfer.c
index ef0dd4a01..b26d5ac89 100644
--- a/numpy/core/src/multiarray/dtype_transfer.c
+++ b/numpy/core/src/multiarray/dtype_transfer.c
@@ -3832,8 +3832,8 @@ PyArray_CastRawArrays(npy_intp count,
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
-PyArray_PrepareOneRawArrayIter(int ndim, npy_intp *shape,
- char *data, npy_intp *strides,
+PyArray_PrepareOneRawArrayIter(int ndim, npy_intp const *shape,
+ char *data, npy_intp const *strides,
int *out_ndim, npy_intp *out_shape,
char **out_data, npy_intp *out_strides)
{
@@ -3953,9 +3953,9 @@ PyArray_PrepareOneRawArrayIter(int ndim, npy_intp *shape,
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
-PyArray_PrepareTwoRawArrayIter(int ndim, npy_intp *shape,
- char *dataA, npy_intp *stridesA,
- char *dataB, npy_intp *stridesB,
+PyArray_PrepareTwoRawArrayIter(int ndim, npy_intp const *shape,
+ char *dataA, npy_intp const *stridesA,
+ char *dataB, npy_intp const *stridesB,
int *out_ndim, npy_intp *out_shape,
char **out_dataA, npy_intp *out_stridesA,
char **out_dataB, npy_intp *out_stridesB)
@@ -4077,10 +4077,10 @@ PyArray_PrepareTwoRawArrayIter(int ndim, npy_intp *shape,
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
-PyArray_PrepareThreeRawArrayIter(int ndim, npy_intp *shape,
- char *dataA, npy_intp *stridesA,
- char *dataB, npy_intp *stridesB,
- char *dataC, npy_intp *stridesC,
+PyArray_PrepareThreeRawArrayIter(int ndim, npy_intp const *shape,
+ char *dataA, npy_intp const *stridesA,
+ char *dataB, npy_intp const *stridesB,
+ char *dataC, npy_intp const *stridesC,
int *out_ndim, npy_intp *out_shape,
char **out_dataA, npy_intp *out_stridesA,
char **out_dataB, npy_intp *out_stridesB,
diff --git a/numpy/core/src/multiarray/einsum.c.src b/numpy/core/src/multiarray/einsum.c.src
index 58af44091..70af3fef9 100644
--- a/numpy/core/src/multiarray/einsum.c.src
+++ b/numpy/core/src/multiarray/einsum.c.src
@@ -107,7 +107,7 @@
*/
static void
@name@_sum_of_products_@noplabel@(int nop, char **dataptr,
- npy_intp *strides, npy_intp count)
+ npy_intp const *strides, npy_intp count)
{
#if (@nop@ == 1) || (@nop@ <= 3 && !@complex@)
char *data0 = dataptr[0];
@@ -206,7 +206,7 @@ static void
static void
@name@_sum_of_products_contig_one(int nop, char **dataptr,
- npy_intp *NPY_UNUSED(strides), npy_intp count)
+ npy_intp const *NPY_UNUSED(strides), npy_intp count)
{
@type@ *data0 = (@type@ *)dataptr[0];
@type@ *data_out = (@type@ *)dataptr[1];
@@ -268,7 +268,7 @@ finish_after_unrolled_loop:
static void
@name@_sum_of_products_contig_two(int nop, char **dataptr,
- npy_intp *NPY_UNUSED(strides), npy_intp count)
+ npy_intp const *NPY_UNUSED(strides), npy_intp count)
{
@type@ *data0 = (@type@ *)dataptr[0];
@type@ *data1 = (@type@ *)dataptr[1];
@@ -354,7 +354,7 @@ finish_after_unrolled_loop:
/* Some extra specializations for the two operand case */
static void
@name@_sum_of_products_stride0_contig_outcontig_two(int nop, char **dataptr,
- npy_intp *NPY_UNUSED(strides), npy_intp count)
+ npy_intp const *NPY_UNUSED(strides), npy_intp count)
{
@temptype@ value0 = @from@(*(@type@ *)dataptr[0]);
@type@ *data1 = (@type@ *)dataptr[1];
@@ -483,7 +483,7 @@ finish_after_unrolled_loop:
static void
@name@_sum_of_products_contig_stride0_outcontig_two(int nop, char **dataptr,
- npy_intp *NPY_UNUSED(strides), npy_intp count)
+ npy_intp const *NPY_UNUSED(strides), npy_intp count)
{
@type@ *data0 = (@type@ *)dataptr[0];
@temptype@ value1 = @from@(*(@type@ *)dataptr[1]);
@@ -567,7 +567,7 @@ finish_after_unrolled_loop:
static void
@name@_sum_of_products_contig_contig_outstride0_two(int nop, char **dataptr,
- npy_intp *NPY_UNUSED(strides), npy_intp count)
+ npy_intp const *NPY_UNUSED(strides), npy_intp count)
{
@type@ *data0 = (@type@ *)dataptr[0];
@type@ *data1 = (@type@ *)dataptr[1];
@@ -727,7 +727,7 @@ finish_after_unrolled_loop:
static void
@name@_sum_of_products_stride0_contig_outstride0_two(int nop, char **dataptr,
- npy_intp *NPY_UNUSED(strides), npy_intp count)
+ npy_intp const *NPY_UNUSED(strides), npy_intp count)
{
@temptype@ value0 = @from@(*(@type@ *)dataptr[0]);
@type@ *data1 = (@type@ *)dataptr[1];
@@ -826,7 +826,7 @@ finish_after_unrolled_loop:
static void
@name@_sum_of_products_contig_stride0_outstride0_two(int nop, char **dataptr,
- npy_intp *NPY_UNUSED(strides), npy_intp count)
+ npy_intp const *NPY_UNUSED(strides), npy_intp count)
{
@type@ *data0 = (@type@ *)dataptr[0];
@temptype@ value1 = @from@(*(@type@ *)dataptr[1]);
@@ -927,7 +927,7 @@ finish_after_unrolled_loop:
static void
@name@_sum_of_products_contig_three(int nop, char **dataptr,
- npy_intp *NPY_UNUSED(strides), npy_intp count)
+ npy_intp const *NPY_UNUSED(strides), npy_intp count)
{
@type@ *data0 = (@type@ *)dataptr[0];
@type@ *data1 = (@type@ *)dataptr[1];
@@ -971,7 +971,7 @@ static void
static void
@name@_sum_of_products_contig_@noplabel@(int nop, char **dataptr,
- npy_intp *NPY_UNUSED(strides), npy_intp count)
+ npy_intp const *NPY_UNUSED(strides), npy_intp count)
{
NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_@noplabel@ (%d)\n",
(int)count);
@@ -1024,7 +1024,7 @@ static void
static void
@name@_sum_of_products_contig_outstride0_one(int nop, char **dataptr,
- npy_intp *strides, npy_intp count)
+ npy_intp const *strides, npy_intp count)
{
#if @complex@
@temptype@ accum_re = 0, accum_im = 0;
@@ -1201,7 +1201,7 @@ finish_after_unrolled_loop:
static void
@name@_sum_of_products_outstride0_@noplabel@(int nop, char **dataptr,
- npy_intp *strides, npy_intp count)
+ npy_intp const *strides, npy_intp count)
{
#if @complex@
@temptype@ accum_re = 0, accum_im = 0;
@@ -1319,7 +1319,7 @@ static void
static void
bool_sum_of_products_@noplabel@(int nop, char **dataptr,
- npy_intp *strides, npy_intp count)
+ npy_intp const *strides, npy_intp count)
{
#if (@nop@ <= 3)
char *data0 = dataptr[0];
@@ -1376,7 +1376,7 @@ bool_sum_of_products_@noplabel@(int nop, char **dataptr,
static void
bool_sum_of_products_contig_@noplabel@(int nop, char **dataptr,
- npy_intp *strides, npy_intp count)
+ npy_intp const *strides, npy_intp count)
{
#if (@nop@ <= 3)
char *data0 = dataptr[0];
@@ -1484,7 +1484,7 @@ finish_after_unrolled_loop:
static void
bool_sum_of_products_outstride0_@noplabel@(int nop, char **dataptr,
- npy_intp *strides, npy_intp count)
+ npy_intp const *strides, npy_intp count)
{
npy_bool accum = 0;
@@ -1538,7 +1538,7 @@ bool_sum_of_products_outstride0_@noplabel@(int nop, char **dataptr,
/**end repeat**/
-typedef void (*sum_of_products_fn)(int, char **, npy_intp *, npy_intp);
+typedef void (*sum_of_products_fn)(int, char **, npy_intp const*, npy_intp);
/* These tables need to match up with the type enum */
static sum_of_products_fn
@@ -1720,7 +1720,7 @@ static sum_of_products_fn _unspecialized_table[NPY_NTYPES][4] = {
static sum_of_products_fn
get_sum_of_products_function(int nop, int type_num,
- npy_intp itemsize, npy_intp *fixed_strides)
+ npy_intp itemsize, npy_intp const *fixed_strides)
{
int iop;
diff --git a/numpy/core/src/multiarray/flagsobject.c b/numpy/core/src/multiarray/flagsobject.c
index a66b9d40d..6fe0eff4a 100644
--- a/numpy/core/src/multiarray/flagsobject.c
+++ b/numpy/core/src/multiarray/flagsobject.c
@@ -727,47 +727,25 @@ arrayflags_print(PyArrayFlagsObject *self)
);
}
-static int
-arrayflags_compare(PyArrayFlagsObject *self, PyArrayFlagsObject *other)
-{
- if (self->flags == other->flags) {
- return 0;
- }
- else if (self->flags < other->flags) {
- return -1;
- }
- else {
- return 1;
- }
-}
-
-
static PyObject*
arrayflags_richcompare(PyObject *self, PyObject *other, int cmp_op)
{
- PyObject *result = Py_NotImplemented;
- int cmp;
-
- if (cmp_op != Py_EQ && cmp_op != Py_NE) {
- PyErr_SetString(PyExc_TypeError,
- "undefined comparison for flag object");
- return NULL;
+ if (!PyObject_TypeCheck(other, &PyArrayFlags_Type)) {
+ Py_RETURN_NOTIMPLEMENTED;
}
- if (PyObject_TypeCheck(other, &PyArrayFlags_Type)) {
- cmp = arrayflags_compare((PyArrayFlagsObject *)self,
- (PyArrayFlagsObject *)other);
+ npy_bool eq = ((PyArrayFlagsObject*) self)->flags ==
+ ((PyArrayFlagsObject*) other)->flags;
- if (cmp_op == Py_EQ) {
- result = (cmp == 0) ? Py_True : Py_False;
- }
- else if (cmp_op == Py_NE) {
- result = (cmp != 0) ? Py_True : Py_False;
- }
+ if (cmp_op == Py_EQ) {
+ return PyBool_FromLong(eq);
+ }
+ else if (cmp_op == Py_NE) {
+ return PyBool_FromLong(!eq);
+ }
+ else {
+ Py_RETURN_NOTIMPLEMENTED;
}
-
- Py_INCREF(result);
- return result;
}
static PyMappingMethods arrayflags_as_mapping = {
@@ -793,12 +771,7 @@ arrayflags_new(PyTypeObject *NPY_UNUSED(self), PyObject *args, PyObject *NPY_UNU
}
NPY_NO_EXPORT PyTypeObject PyArrayFlags_Type = {
-#if defined(NPY_PY3K)
PyVarObject_HEAD_INIT(NULL, 0)
-#else
- PyObject_HEAD_INIT(NULL)
- 0, /* ob_size */
-#endif
"numpy.flagsobj",
sizeof(PyArrayFlagsObject),
0, /* tp_itemsize */
@@ -807,11 +780,7 @@ NPY_NO_EXPORT PyTypeObject PyArrayFlags_Type = {
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
-#if defined(NPY_PY3K)
0, /* tp_reserved */
-#else
- (cmpfunc)arrayflags_compare, /* tp_compare */
-#endif
(reprfunc)arrayflags_print, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
diff --git a/numpy/core/src/multiarray/getset.c b/numpy/core/src/multiarray/getset.c
index 6e5d480d0..9d39ee7a8 100644
--- a/numpy/core/src/multiarray/getset.c
+++ b/numpy/core/src/multiarray/getset.c
@@ -110,12 +110,7 @@ array_strides_set(PyArrayObject *self, PyObject *obj)
npy_intp offset = 0;
npy_intp lower_offset = 0;
npy_intp upper_offset = 0;
-#if defined(NPY_PY3K)
Py_buffer view;
-#else
- Py_ssize_t buf_len;
- char *buf;
-#endif
if (obj == NULL) {
PyErr_SetString(PyExc_AttributeError,
@@ -140,7 +135,6 @@ array_strides_set(PyArrayObject *self, PyObject *obj)
* Get the available memory through the buffer interface on
* PyArray_BASE(new) or if that fails from the current new
*/
-#if defined(NPY_PY3K)
if (PyArray_BASE(new) &&
PyObject_GetBuffer(PyArray_BASE(new), &view, PyBUF_SIMPLE) >= 0) {
offset = PyArray_BYTES(self) - (char *)view.buf;
@@ -148,14 +142,6 @@ array_strides_set(PyArrayObject *self, PyObject *obj)
PyBuffer_Release(&view);
_dealloc_cached_buffer_info((PyObject*)new);
}
-#else
- if (PyArray_BASE(new) &&
- PyObject_AsReadBuffer(PyArray_BASE(new), (const void **)&buf,
- &buf_len) >= 0) {
- offset = PyArray_BYTES(self) - buf;
- numbytes = buf_len + offset;
- }
-#endif
else {
PyErr_Clear();
offset_bounds_from_strides(PyArray_ITEMSIZE(new), PyArray_NDIM(new),
@@ -318,23 +304,7 @@ array_interface_get(PyArrayObject *self)
static PyObject *
array_data_get(PyArrayObject *self)
{
-#if defined(NPY_PY3K)
return PyMemoryView_FromObject((PyObject *)self);
-#else
- npy_intp nbytes;
- if (!(PyArray_ISONESEGMENT(self))) {
- PyErr_SetString(PyExc_AttributeError, "cannot get single-"\
- "segment buffer for discontiguous array");
- return NULL;
- }
- nbytes = PyArray_NBYTES(self);
- if (PyArray_ISWRITEABLE(self)) {
- return PyBuffer_FromReadWriteObject((PyObject *)self, 0, (Py_ssize_t) nbytes);
- }
- else {
- return PyBuffer_FromObject((PyObject *)self, 0, (Py_ssize_t) nbytes);
- }
-#endif
}
static int
@@ -343,9 +313,7 @@ array_data_set(PyArrayObject *self, PyObject *op)
void *buf;
Py_ssize_t buf_len;
int writeable=1;
-#if defined(NPY_PY3K)
Py_buffer view;
-#endif
/* 2016-19-02, 1.12 */
int ret = DEPRECATE("Assigning the 'data' attribute is an "
@@ -360,7 +328,6 @@ array_data_set(PyArrayObject *self, PyObject *op)
"Cannot delete array data");
return -1;
}
-#if defined(NPY_PY3K)
if (PyObject_GetBuffer(op, &view, PyBUF_WRITABLE|PyBUF_SIMPLE) < 0) {
writeable = 0;
PyErr_Clear();
@@ -378,18 +345,7 @@ array_data_set(PyArrayObject *self, PyObject *op)
*/
PyBuffer_Release(&view);
_dealloc_cached_buffer_info(op);
-#else
- if (PyObject_AsWriteBuffer(op, &buf, &buf_len) < 0) {
- PyErr_Clear();
- writeable = 0;
- if (PyObject_AsReadBuffer(op, (const void **)&buf, &buf_len) < 0) {
- PyErr_Clear();
- PyErr_SetString(PyExc_AttributeError,
- "object does not have single-segment buffer interface");
- return -1;
- }
- }
-#endif
+
if (!PyArray_ISONESEGMENT(self)) {
PyErr_SetString(PyExc_AttributeError,
"cannot set single-segment buffer for discontiguous array");
diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c
index e6867083f..54d9085b7 100644
--- a/numpy/core/src/multiarray/item_selection.c
+++ b/numpy/core/src/multiarray/item_selection.c
@@ -825,7 +825,7 @@ PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject *out,
*/
static int
_new_sortlike(PyArrayObject *op, int axis, PyArray_SortFunc *sort,
- PyArray_PartitionFunc *part, npy_intp *kth, npy_intp nkth)
+ PyArray_PartitionFunc *part, npy_intp const *kth, npy_intp nkth)
{
npy_intp N = PyArray_DIM(op, axis);
npy_intp elsize = (npy_intp)PyArray_ITEMSIZE(op);
@@ -953,7 +953,7 @@ fail:
static PyObject*
_new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort,
PyArray_ArgPartitionFunc *argpart,
- npy_intp *kth, npy_intp nkth)
+ npy_intp const *kth, npy_intp nkth)
{
npy_intp N = PyArray_DIM(op, axis);
npy_intp elsize = (npy_intp)PyArray_ITEMSIZE(op);
@@ -1059,12 +1059,10 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort,
if (argpart == NULL) {
ret = argsort(valptr, idxptr, N, op);
-#if defined(NPY_PY3K)
/* Object comparisons may raise an exception in Python 3 */
if (hasrefs && PyErr_Occurred()) {
ret = -1;
}
-#endif
if (ret < 0) {
goto fail;
}
@@ -1075,12 +1073,10 @@ _new_argsortlike(PyArrayObject *op, int axis, PyArray_ArgSortFunc *argsort,
for (i = 0; i < nkth; ++i) {
ret = argpart(valptr, idxptr, N, kth[i], pivots, &npiv, op);
-#if defined(NPY_PY3K)
/* Object comparisons may raise an exception in Python 3 */
if (hasrefs && PyErr_Occurred()) {
ret = -1;
}
-#endif
if (ret < 0) {
goto fail;
}
@@ -1566,12 +1562,8 @@ PyArray_LexSort(PyObject *sort_keys, int axis)
_strided_byte_swap(valbuffer, (npy_intp) elsize, N, elsize);
}
rcode = argsort(valbuffer, (npy_intp *)indbuffer, N, mps[j]);
-#if defined(NPY_PY3K)
if (rcode < 0 || (PyDataType_REFCHK(PyArray_DESCR(mps[j]))
&& PyErr_Occurred())) {
-#else
- if (rcode < 0) {
-#endif
PyDataMem_FREE(valbuffer);
PyDataMem_FREE(indbuffer);
free(swaps);
@@ -1601,12 +1593,8 @@ PyArray_LexSort(PyObject *sort_keys, int axis)
}
rcode = argsort(its[j]->dataptr,
(npy_intp *)rit->dataptr, N, mps[j]);
-#if defined(NPY_PY3K)
if (rcode < 0 || (PyDataType_REFCHK(PyArray_DESCR(mps[j]))
&& PyErr_Occurred())) {
-#else
- if (rcode < 0) {
-#endif
goto fail;
}
PyArray_ITER_NEXT(its[j]);
@@ -2028,7 +2016,7 @@ count_nonzero_bytes_384(const npy_uint64 * w)
* Returns -1 on error.
*/
NPY_NO_EXPORT npy_intp
-count_boolean_trues(int ndim, char *data, npy_intp *ashape, npy_intp *astrides)
+count_boolean_trues(int ndim, char *data, npy_intp const *ashape, npy_intp const *astrides)
{
int idim;
npy_intp shape[NPY_MAXDIMS], strides[NPY_MAXDIMS];
diff --git a/numpy/core/src/multiarray/item_selection.h b/numpy/core/src/multiarray/item_selection.h
index 2276b4db7..c1c8b5567 100644
--- a/numpy/core/src/multiarray/item_selection.h
+++ b/numpy/core/src/multiarray/item_selection.h
@@ -8,7 +8,7 @@
* Returns -1 on error.
*/
NPY_NO_EXPORT npy_intp
-count_boolean_trues(int ndim, char *data, npy_intp *ashape, npy_intp *astrides);
+count_boolean_trues(int ndim, char *data, npy_intp const *ashape, npy_intp const *astrides);
/*
* Gets a single item from the array, based on a single multi-index
diff --git a/numpy/core/src/multiarray/iterators.c b/numpy/core/src/multiarray/iterators.c
index 1ee0a4d60..14e70fe02 100644
--- a/numpy/core/src/multiarray/iterators.c
+++ b/numpy/core/src/multiarray/iterators.c
@@ -1102,12 +1102,7 @@ static PyGetSetDef iter_getsets[] = {
};
NPY_NO_EXPORT PyTypeObject PyArrayIter_Type = {
-#if defined(NPY_PY3K)
PyVarObject_HEAD_INIT(NULL, 0)
-#else
- PyObject_HEAD_INIT(NULL)
- 0, /* ob_size */
-#endif
"numpy.flatiter", /* tp_name */
sizeof(PyArrayIterObject), /* tp_basicsize */
0, /* tp_itemsize */
@@ -1116,11 +1111,7 @@ NPY_NO_EXPORT PyTypeObject PyArrayIter_Type = {
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
-#if defined(NPY_PY3K)
0, /* tp_reserved */
-#else
- 0, /* tp_compare */
-#endif
0, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
@@ -1560,12 +1551,7 @@ static PyMethodDef arraymultiter_methods[] = {
};
NPY_NO_EXPORT PyTypeObject PyArrayMultiIter_Type = {
-#if defined(NPY_PY3K)
PyVarObject_HEAD_INIT(NULL, 0)
-#else
- PyObject_HEAD_INIT(NULL)
- 0, /* ob_size */
-#endif
"numpy.broadcast", /* tp_name */
sizeof(PyArrayMultiIterObject), /* tp_basicsize */
0, /* tp_itemsize */
@@ -1574,11 +1560,7 @@ NPY_NO_EXPORT PyTypeObject PyArrayMultiIter_Type = {
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
-#if defined(NPY_PY3K)
0, /* tp_reserved */
-#else
- 0, /* tp_compare */
-#endif
0, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
@@ -1890,12 +1872,7 @@ static void neighiter_dealloc(PyArrayNeighborhoodIterObject* iter)
}
NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type = {
-#if defined(NPY_PY3K)
PyVarObject_HEAD_INIT(NULL, 0)
-#else
- PyObject_HEAD_INIT(NULL)
- 0, /* ob_size */
-#endif
"numpy.neigh_internal_iter", /* tp_name*/
sizeof(PyArrayNeighborhoodIterObject), /* tp_basicsize*/
0, /* tp_itemsize*/
@@ -1903,11 +1880,7 @@ NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type = {
0, /* tp_print*/
0, /* tp_getattr*/
0, /* tp_setattr*/
-#if defined(NPY_PY3K)
0, /* tp_reserved */
-#else
- 0, /* tp_compare */
-#endif
0, /* tp_repr*/
0, /* tp_as_number*/
0, /* tp_as_sequence*/
diff --git a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
index 63b2a8842..d234c366c 100644
--- a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
+++ b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
@@ -974,9 +974,9 @@ PyArray_GetStridedNumericCastFn(int aligned, npy_intp src_stride,
NPY_NO_EXPORT npy_intp
PyArray_TransferNDimToStrided(npy_intp ndim,
char *dst, npy_intp dst_stride,
- char *src, npy_intp *src_strides, npy_intp src_strides_inc,
- npy_intp *coords, npy_intp coords_inc,
- npy_intp *shape, npy_intp shape_inc,
+ char *src, npy_intp const *src_strides, npy_intp src_strides_inc,
+ npy_intp const *coords, npy_intp coords_inc,
+ npy_intp const *shape, npy_intp shape_inc,
npy_intp count, npy_intp src_itemsize,
PyArray_StridedUnaryOp *stransfer,
NpyAuxData *data)
@@ -1092,10 +1092,10 @@ PyArray_TransferNDimToStrided(npy_intp ndim,
/* See documentation of arguments in lowlevel_strided_loops.h */
NPY_NO_EXPORT npy_intp
PyArray_TransferStridedToNDim(npy_intp ndim,
- char *dst, npy_intp *dst_strides, npy_intp dst_strides_inc,
+ char *dst, npy_intp const *dst_strides, npy_intp dst_strides_inc,
char *src, npy_intp src_stride,
- npy_intp *coords, npy_intp coords_inc,
- npy_intp *shape, npy_intp shape_inc,
+ npy_intp const *coords, npy_intp coords_inc,
+ npy_intp const *shape, npy_intp shape_inc,
npy_intp count, npy_intp src_itemsize,
PyArray_StridedUnaryOp *stransfer,
NpyAuxData *data)
@@ -1211,11 +1211,11 @@ PyArray_TransferStridedToNDim(npy_intp ndim,
/* See documentation of arguments in lowlevel_strided_loops.h */
NPY_NO_EXPORT npy_intp
PyArray_TransferMaskedStridedToNDim(npy_intp ndim,
- char *dst, npy_intp *dst_strides, npy_intp dst_strides_inc,
+ char *dst, npy_intp const *dst_strides, npy_intp dst_strides_inc,
char *src, npy_intp src_stride,
npy_uint8 *mask, npy_intp mask_stride,
- npy_intp *coords, npy_intp coords_inc,
- npy_intp *shape, npy_intp shape_inc,
+ npy_intp const *coords, npy_intp coords_inc,
+ npy_intp const *shape, npy_intp shape_inc,
npy_intp count, npy_intp src_itemsize,
PyArray_MaskedStridedUnaryOp *stransfer,
NpyAuxData *data)
diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c
index 8dcd28c84..3efb3cb9d 100644
--- a/numpy/core/src/multiarray/mapping.c
+++ b/numpy/core/src/multiarray/mapping.c
@@ -227,11 +227,7 @@ unpack_indices(PyObject *index, PyObject **result, npy_intp result_n)
/* Obvious single-entry cases */
if (0 /* to aid macros below */
-#if !defined(NPY_PY3K)
- || PyInt_CheckExact(index)
-#else
|| PyLong_CheckExact(index)
-#endif
|| index == Py_None
|| PySlice_Check(index)
|| PyArray_Check(index)
@@ -481,11 +477,7 @@ prepare_index(PyArrayObject *self, PyObject *index,
*
* Check for integers first, purely for performance
*/
-#if !defined(NPY_PY3K)
- if (PyInt_CheckExact(obj) || !PyArray_Check(obj)) {
-#else
if (PyLong_CheckExact(obj) || !PyArray_Check(obj)) {
-#endif
npy_intp ind = PyArray_PyIntAsIntp(obj);
if (error_converting(ind)) {
@@ -3340,12 +3332,7 @@ arraymapiter_dealloc(PyArrayMapIterObject *mit)
* to a[indexobj].flat but the latter gets to use slice syntax.
*/
NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type = {
-#if defined(NPY_PY3K)
PyVarObject_HEAD_INIT(NULL, 0)
-#else
- PyObject_HEAD_INIT(NULL)
- 0, /* ob_size */
-#endif
"numpy.mapiter", /* tp_name */
sizeof(PyArrayMapIterObject), /* tp_basicsize */
0, /* tp_itemsize */
@@ -3354,11 +3341,7 @@ NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type = {
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
-#if defined(NPY_PY3K)
0, /* tp_reserved */
-#else
- 0, /* tp_compare */
-#endif
0, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c
index e5845f2f6..83c993425 100644
--- a/numpy/core/src/multiarray/methods.c
+++ b/numpy/core/src/multiarray/methods.c
@@ -1469,7 +1469,7 @@ array_argpartition(PyArrayObject *self, PyObject *args, PyObject *kwds)
static PyObject *
array_searchsorted(PyArrayObject *self, PyObject *args, PyObject *kwds)
{
- static char *kwlist[] = {"keys", "side", "sorter", NULL};
+ static char *kwlist[] = {"v", "side", "sorter", NULL};
PyObject *keys;
PyObject *sorter;
NPY_SEARCHSIDE side = NPY_SEARCHLEFT;
@@ -1954,7 +1954,6 @@ array_setstate(PyArrayObject *self, PyObject *args)
else {
Py_INCREF(rawdata);
-#if defined(NPY_PY3K)
/* Backward compatibility with Python 2 NumPy pickles */
if (PyUnicode_Check(rawdata)) {
PyObject *tmp;
@@ -1969,7 +1968,6 @@ array_setstate(PyArrayObject *self, PyObject *args)
return NULL;
}
}
-#endif
if (!PyBytes_Check(rawdata)) {
PyErr_SetString(PyExc_TypeError,
@@ -2030,14 +2028,9 @@ array_setstate(PyArrayObject *self, PyObject *args)
if (!PyDataType_FLAGCHK(typecode, NPY_LIST_PICKLE)) {
int swap = PyArray_ISBYTESWAPPED(self);
fa->data = datastr;
-#ifndef NPY_PY3K
- /* Check that the string is not interned */
- if (!IsAligned(self) || swap || PyString_CHECK_INTERNED(rawdata)) {
-#else
/* Bytes should always be considered immutable, but we just grab the
* pointer if they are large, to save memory. */
if (!IsAligned(self) || swap || (len <= 1000)) {
-#endif
npy_intp num = PyArray_NBYTES(self);
if (num == 0) {
Py_DECREF(rawdata);
@@ -2641,51 +2634,6 @@ array_complex(PyArrayObject *self, PyObject *NPY_UNUSED(args))
return c;
}
-#ifndef NPY_PY3K
-
-static PyObject *
-array_getslice(PyArrayObject *self, PyObject *args)
-{
- PyObject *start, *stop, *slice, *result;
- if (!PyArg_ParseTuple(args, "OO:__getslice__", &start, &stop)) {
- return NULL;
- }
-
- slice = PySlice_New(start, stop, NULL);
- if (slice == NULL) {
- return NULL;
- }
-
- /* Deliberately delegate to subclasses */
- result = PyObject_GetItem((PyObject *)self, slice);
- Py_DECREF(slice);
- return result;
-}
-
-static PyObject *
-array_setslice(PyArrayObject *self, PyObject *args)
-{
- PyObject *start, *stop, *value, *slice;
- if (!PyArg_ParseTuple(args, "OOO:__setslice__", &start, &stop, &value)) {
- return NULL;
- }
-
- slice = PySlice_New(start, stop, NULL);
- if (slice == NULL) {
- return NULL;
- }
-
- /* Deliberately delegate to subclasses */
- if (PyObject_SetItem((PyObject *)self, slice, value) < 0) {
- Py_DECREF(slice);
- return NULL;
- }
- Py_DECREF(slice);
- Py_RETURN_NONE;
-}
-
-#endif
-
NPY_NO_EXPORT PyMethodDef array_methods[] = {
/* for subtypes */
@@ -2705,12 +2653,6 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = {
(PyCFunction)array_function,
METH_VARARGS | METH_KEYWORDS, NULL},
-#ifndef NPY_PY3K
- {"__unicode__",
- (PyCFunction)array_unicode,
- METH_NOARGS, NULL},
-#endif
-
/* for the sys module */
{"__sizeof__",
(PyCFunction) array_sizeof,
@@ -2749,23 +2691,6 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = {
(PyCFunction) array_format,
METH_VARARGS, NULL},
-#ifndef NPY_PY3K
- /*
- * While we could put these in `tp_sequence`, its' easier to define them
- * in terms of PyObject* arguments.
- *
- * We must provide these for compatibility with code that calls them
- * directly. They are already deprecated at a language level in python 2.7,
- * but are removed outright in python 3.
- */
- {"__getslice__",
- (PyCFunction) array_getslice,
- METH_VARARGS, NULL},
- {"__setslice__",
- (PyCFunction) array_setslice,
- METH_VARARGS, NULL},
-#endif
-
/* Original and Extended methods added 2005 */
{"all",
(PyCFunction)array_all,
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index 58f64f0bb..b1b9c0051 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -161,7 +161,7 @@ PyArray_MultiplyList(npy_intp const *l1, int n)
* Multiply a List of Non-negative numbers with over-flow detection.
*/
NPY_NO_EXPORT npy_intp
-PyArray_OverflowMultiplyList(npy_intp *l1, int n)
+PyArray_OverflowMultiplyList(npy_intp const *l1, int n)
{
npy_intp prod = 1;
int i;
@@ -1920,7 +1920,6 @@ array_scalar(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds)
alloc = 1;
}
else {
-#if defined(NPY_PY3K)
/* Backward compatibility with Python 2 NumPy pickles */
if (PyUnicode_Check(obj)) {
tmpobj = PyUnicode_AsLatin1String(obj);
@@ -1934,8 +1933,6 @@ array_scalar(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds)
return NULL;
}
}
-#endif
-
if (!PyString_Check(obj)) {
PyErr_SetString(PyExc_TypeError,
"initializing object must be a string");
@@ -2020,11 +2017,7 @@ array_count_nonzero(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds)
if (count == -1) {
return NULL;
}
-#if defined(NPY_PY3K)
return PyLong_FromSsize_t(count);
-#else
- return PyInt_FromSsize_t(count);
-#endif
}
static PyObject *
@@ -2639,13 +2632,11 @@ array_einsum(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds)
while (PyDict_Next(kwds, &pos, &key, &value)) {
char *str = NULL;
-#if defined(NPY_PY3K)
Py_XDECREF(str_key_obj);
str_key_obj = PyUnicode_AsASCIIString(key);
if (str_key_obj != NULL) {
key = str_key_obj;
}
-#endif
str = PyBytes_AsString(key);
@@ -3293,42 +3284,6 @@ array_datetime_data(PyObject *NPY_UNUSED(dummy), PyObject *args)
return convert_datetime_metadata_to_tuple(meta);
}
-#if !defined(NPY_PY3K)
-static PyObject *
-new_buffer(PyObject *NPY_UNUSED(dummy), PyObject *args)
-{
- int size;
-
- if (!PyArg_ParseTuple(args, "i:buffer", &size)) {
- return NULL;
- }
- return PyBuffer_New(size);
-}
-
-static PyObject *
-buffer_buffer(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds)
-{
- PyObject *obj;
- Py_ssize_t offset = 0, n;
- Py_ssize_t size = Py_END_OF_BUFFER;
- void *unused;
- static char *kwlist[] = {"object", "offset", "size", NULL};
-
- if (!PyArg_ParseTupleAndKeywords(args, kwds,
- "O|" NPY_SSIZE_T_PYFMT NPY_SSIZE_T_PYFMT ":get_buffer", kwlist,
- &obj, &offset, &size)) {
- return NULL;
- }
- if (PyObject_AsWriteBuffer(obj, &unused, &n) < 0) {
- PyErr_Clear();
- return PyBuffer_FromObject(obj, offset, size);
- }
- else {
- return PyBuffer_FromReadWriteObject(obj, offset, size);
- }
-}
-#endif
-
/*
* Prints floating-point scalars using the Dragon4 algorithm, scientific mode.
* See docstring of `np.format_float_scientific` for description of arguments.
@@ -3909,11 +3864,6 @@ array_shares_memory_impl(PyObject *args, PyObject *kwds, Py_ssize_t default_max_
goto fail;
}
}
-#if !defined(NPY_PY3K)
- else if (PyInt_Check(max_work_obj)) {
- max_work = PyInt_AsSsize_t(max_work_obj);
- }
-#endif
else {
PyErr_SetString(PyExc_ValueError, "max_work must be an integer");
goto fail;
@@ -4137,14 +4087,6 @@ static struct PyMethodDef array_module_methods[] = {
{"is_busday",
(PyCFunction)array_is_busday,
METH_VARARGS | METH_KEYWORDS, NULL},
-#if !defined(NPY_PY3K)
- {"newbuffer",
- (PyCFunction)new_buffer,
- METH_VARARGS, NULL},
- {"getbuffer",
- (PyCFunction)buffer_buffer,
- METH_VARARGS | METH_KEYWORDS, NULL},
-#endif
{"format_longfloat",
(PyCFunction)format_longfloat,
METH_VARARGS | METH_KEYWORDS, NULL},
@@ -4224,11 +4166,6 @@ setup_scalartypes(PyObject *NPY_UNUSED(dict))
if (PyType_Ready(&PyBool_Type) < 0) {
return -1;
}
-#if !defined(NPY_PY3K)
- if (PyType_Ready(&PyInt_Type) < 0) {
- return -1;
- }
-#endif
if (PyType_Ready(&PyFloat_Type) < 0) {
return -1;
}
@@ -4279,27 +4216,6 @@ setup_scalartypes(PyObject *NPY_UNUSED(dict))
return -1; \
}
-/*
- * In Py3K, int is no longer a fixed-width integer type, so don't
- * inherit numpy.int_ from it.
- */
-#if defined(NPY_PY3K)
-#define INHERIT_INT(child, parent2) \
- SINGLE_INHERIT(child, parent2);
-#else
-#define INHERIT_INT(child, parent2) \
- Py##child##ArrType_Type.tp_flags |= Py_TPFLAGS_INT_SUBCLASS; \
- DUAL_INHERIT(child, Int, parent2);
-#endif
-
-#if defined(NPY_PY3K)
-#define DUAL_INHERIT_COMPARE(child, parent1, parent2)
-#else
-#define DUAL_INHERIT_COMPARE(child, parent1, parent2) \
- Py##child##ArrType_Type.tp_compare = \
- Py##parent1##_Type.tp_compare;
-#endif
-
#define DUAL_INHERIT2(child, parent1, parent2) \
Py##child##ArrType_Type.tp_base = &Py##parent1##_Type; \
Py##child##ArrType_Type.tp_bases = \
@@ -4307,7 +4223,6 @@ setup_scalartypes(PyObject *NPY_UNUSED(dict))
&Py##parent2##ArrType_Type); \
Py##child##ArrType_Type.tp_richcompare = \
Py##parent1##_Type.tp_richcompare; \
- DUAL_INHERIT_COMPARE(child, parent1, parent2) \
Py##child##ArrType_Type.tp_hash = Py##parent1##_Type.tp_hash; \
if (PyType_Ready(&Py##child##ArrType_Type) < 0) { \
PyErr_Print(); \
@@ -4320,20 +4235,9 @@ setup_scalartypes(PyObject *NPY_UNUSED(dict))
SINGLE_INHERIT(Bool, Generic);
SINGLE_INHERIT(Byte, SignedInteger);
SINGLE_INHERIT(Short, SignedInteger);
-
-#if NPY_SIZEOF_INT == NPY_SIZEOF_LONG
- INHERIT_INT(Int, SignedInteger);
-#else
SINGLE_INHERIT(Int, SignedInteger);
-#endif
-
- INHERIT_INT(Long, SignedInteger);
-
-#if NPY_SIZEOF_LONGLONG == NPY_SIZEOF_LONG
- INHERIT_INT(LongLong, SignedInteger);
-#else
+ SINGLE_INHERIT(Long, SignedInteger);
SINGLE_INHERIT(LongLong, SignedInteger);
-#endif
/* Datetime doesn't fit in any category */
SINGLE_INHERIT(Datetime, Generic);
@@ -4372,9 +4276,7 @@ setup_scalartypes(PyObject *NPY_UNUSED(dict))
#undef SINGLE_INHERIT
#undef DUAL_INHERIT
-#undef INHERIT_INT
#undef DUAL_INHERIT2
-#undef DUAL_INHERIT_COMPARE
/*
* Clean up string and unicode array types so they act more like
diff --git a/numpy/core/src/multiarray/nditer_constr.c b/numpy/core/src/multiarray/nditer_constr.c
index 5e770338d..e40a2d594 100644
--- a/numpy/core/src/multiarray/nditer_constr.c
+++ b/numpy/core/src/multiarray/nditer_constr.c
@@ -56,7 +56,7 @@ static int
npyiter_fill_axisdata(NpyIter *iter, npy_uint32 flags, npyiter_opitflags *op_itflags,
char **op_dataptr,
const npy_uint32 *op_flags, int **op_axes,
- npy_intp *itershape);
+ npy_intp const *itershape);
static void
npyiter_replace_axisdata(NpyIter *iter, int iop,
PyArrayObject *op,
@@ -80,7 +80,7 @@ npyiter_get_common_dtype(int nop, PyArrayObject **op,
static PyArrayObject *
npyiter_new_temp_array(NpyIter *iter, PyTypeObject *subtype,
npy_uint32 flags, npyiter_opitflags *op_itflags,
- int op_ndim, npy_intp *shape,
+ int op_ndim, npy_intp const *shape,
PyArray_Descr *op_dtype, const int *op_axes);
static int
npyiter_allocate_arrays(NpyIter *iter,
@@ -1424,7 +1424,7 @@ static int
npyiter_fill_axisdata(NpyIter *iter, npy_uint32 flags, npyiter_opitflags *op_itflags,
char **op_dataptr,
const npy_uint32 *op_flags, int **op_axes,
- npy_intp *itershape)
+ npy_intp const *itershape)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
int idim, ndim = NIT_NDIM(iter);
@@ -2476,7 +2476,7 @@ npyiter_get_common_dtype(int nop, PyArrayObject **op,
static PyArrayObject *
npyiter_new_temp_array(NpyIter *iter, PyTypeObject *subtype,
npy_uint32 flags, npyiter_opitflags *op_itflags,
- int op_ndim, npy_intp *shape,
+ int op_ndim, npy_intp const *shape,
PyArray_Descr *op_dtype, const int *op_axes)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
diff --git a/numpy/core/src/multiarray/nditer_pywrap.c b/numpy/core/src/multiarray/nditer_pywrap.c
index 246f9d382..b4b3c6704 100644
--- a/numpy/core/src/multiarray/nditer_pywrap.c
+++ b/numpy/core/src/multiarray/nditer_pywrap.c
@@ -2490,12 +2490,7 @@ NPY_NO_EXPORT PyMappingMethods npyiter_as_mapping = {
};
NPY_NO_EXPORT PyTypeObject NpyIter_Type = {
-#if defined(NPY_PY3K)
PyVarObject_HEAD_INIT(NULL, 0)
-#else
- PyObject_HEAD_INIT(NULL)
- 0, /* ob_size */
-#endif
"numpy.nditer", /* tp_name */
sizeof(NewNpyArrayIterObject), /* tp_basicsize */
0, /* tp_itemsize */
@@ -2504,11 +2499,7 @@ NPY_NO_EXPORT PyTypeObject NpyIter_Type = {
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
-#if defined(NPY_PY3K)
0, /* tp_reserved */
-#else
- 0, /* tp_compare */
-#endif
0, /* tp_repr */
0, /* tp_as_number */
&npyiter_as_sequence, /* tp_as_sequence */
diff --git a/numpy/core/src/multiarray/npy_buffer.h b/numpy/core/src/multiarray/npy_buffer.h
index fae413c85..2eb97c4b9 100644
--- a/numpy/core/src/multiarray/npy_buffer.h
+++ b/numpy/core/src/multiarray/npy_buffer.h
@@ -7,7 +7,7 @@ NPY_NO_EXPORT void
_dealloc_cached_buffer_info(PyObject *self);
NPY_NO_EXPORT PyArray_Descr*
-_descriptor_from_pep3118_format(char *s);
+_descriptor_from_pep3118_format(char const *s);
NPY_NO_EXPORT int
gentype_getbuffer(PyObject *obj, Py_buffer *view, int flags);
diff --git a/numpy/core/src/multiarray/number.c b/numpy/core/src/multiarray/number.c
index dabc866ff..8cdc502d6 100644
--- a/numpy/core/src/multiarray/number.c
+++ b/numpy/core/src/multiarray/number.c
@@ -32,10 +32,6 @@ static PyObject *
array_inplace_subtract(PyArrayObject *m1, PyObject *m2);
static PyObject *
array_inplace_multiply(PyArrayObject *m1, PyObject *m2);
-#if !defined(NPY_PY3K)
-static PyObject *
-array_inplace_divide(PyArrayObject *m1, PyObject *m2);
-#endif
static PyObject *
array_inplace_true_divide(PyArrayObject *m1, PyObject *m2);
static PyObject *
@@ -353,20 +349,6 @@ array_multiply(PyArrayObject *m1, PyObject *m2)
return PyArray_GenericBinaryFunction(m1, m2, n_ops.multiply);
}
-#if !defined(NPY_PY3K)
-static PyObject *
-array_divide(PyArrayObject *m1, PyObject *m2)
-{
- PyObject *res;
-
- BINOP_GIVE_UP_IF_NEEDED(m1, m2, nb_divide, array_divide);
- if (try_binary_elide(m1, m2, &array_inplace_divide, &res, 0)) {
- return res;
- }
- return PyArray_GenericBinaryFunction(m1, m2, n_ops.divide);
-}
-#endif
-
static PyObject *
array_remainder(PyArrayObject *m1, PyObject *m2)
{
@@ -381,7 +363,6 @@ array_divmod(PyArrayObject *m1, PyObject *m2)
return PyArray_GenericBinaryFunction(m1, m2, n_ops.divmod);
}
-#if PY_VERSION_HEX >= 0x03050000
/* Need this to be version dependent on account of the slot check */
static PyObject *
array_matrix_multiply(PyArrayObject *m1, PyObject *m2)
@@ -399,7 +380,6 @@ array_inplace_matrix_multiply(
"Use 'a = a @ b' instead of 'a @= b'.");
return NULL;
}
-#endif
/*
* Determine if object is a scalar and if so, convert the object
@@ -728,16 +708,6 @@ array_inplace_multiply(PyArrayObject *m1, PyObject *m2)
return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.multiply);
}
-#if !defined(NPY_PY3K)
-static PyObject *
-array_inplace_divide(PyArrayObject *m1, PyObject *m2)
-{
- INPLACE_GIVE_UP_IF_NEEDED(
- m1, m2, nb_inplace_divide, array_inplace_divide);
- return PyArray_GenericInplaceBinaryFunction(m1, m2, n_ops.divide);
-}
-#endif
-
static PyObject *
array_inplace_remainder(PyArrayObject *m1, PyObject *m2)
{
@@ -931,67 +901,12 @@ array_float(PyArrayObject *v)
return array_scalar_forward(v, &PyNumber_Float, " in ndarray.__float__");
}
-#if defined(NPY_PY3K)
-
NPY_NO_EXPORT PyObject *
array_int(PyArrayObject *v)
{
return array_scalar_forward(v, &PyNumber_Long, " in ndarray.__int__");
}
-#else
-
-NPY_NO_EXPORT PyObject *
-array_int(PyArrayObject *v)
-{
- return array_scalar_forward(v, &PyNumber_Int, " in ndarray.__int__");
-}
-
-NPY_NO_EXPORT PyObject *
-array_long(PyArrayObject *v)
-{
- return array_scalar_forward(v, &PyNumber_Long, " in ndarray.__long__");
-}
-
-/* hex and oct aren't exposed to the C api, but we need a function pointer */
-static PyObject *
-_PyNumber_Oct(PyObject *o) {
- PyObject *res;
- PyObject *mod = PyImport_ImportModule("__builtin__");
- if (mod == NULL) {
- return NULL;
- }
- res = PyObject_CallMethod(mod, "oct", "(O)", o);
- Py_DECREF(mod);
- return res;
-}
-
-static PyObject *
-_PyNumber_Hex(PyObject *o) {
- PyObject *res;
- PyObject *mod = PyImport_ImportModule("__builtin__");
- if (mod == NULL) {
- return NULL;
- }
- res = PyObject_CallMethod(mod, "hex", "(O)", o);
- Py_DECREF(mod);
- return res;
-}
-
-NPY_NO_EXPORT PyObject *
-array_oct(PyArrayObject *v)
-{
- return array_scalar_forward(v, &_PyNumber_Oct, " in ndarray.__oct__");
-}
-
-NPY_NO_EXPORT PyObject *
-array_hex(PyArrayObject *v)
-{
- return array_scalar_forward(v, &_PyNumber_Hex, " in ndarray.__hex__");
-}
-
-#endif
-
static PyObject *
array_index(PyArrayObject *v)
{
@@ -1008,9 +923,6 @@ NPY_NO_EXPORT PyNumberMethods array_as_number = {
(binaryfunc)array_add, /*nb_add*/
(binaryfunc)array_subtract, /*nb_subtract*/
(binaryfunc)array_multiply, /*nb_multiply*/
-#if !defined(NPY_PY3K)
- (binaryfunc)array_divide, /*nb_divide*/
-#endif
(binaryfunc)array_remainder, /*nb_remainder*/
(binaryfunc)array_divmod, /*nb_divmod*/
(ternaryfunc)array_power, /*nb_power*/
@@ -1024,20 +936,9 @@ NPY_NO_EXPORT PyNumberMethods array_as_number = {
(binaryfunc)array_bitwise_and, /*nb_and*/
(binaryfunc)array_bitwise_xor, /*nb_xor*/
(binaryfunc)array_bitwise_or, /*nb_or*/
-#if !defined(NPY_PY3K)
- 0, /*nb_coerce*/
-#endif
(unaryfunc)array_int, /*nb_int*/
-#if defined(NPY_PY3K)
0, /*nb_reserved*/
-#else
- (unaryfunc)array_long, /*nb_long*/
-#endif
(unaryfunc)array_float, /*nb_float*/
-#if !defined(NPY_PY3K)
- (unaryfunc)array_oct, /*nb_oct*/
- (unaryfunc)array_hex, /*nb_hex*/
-#endif
/*
* This code adds augmented assignment functionality
@@ -1046,9 +947,6 @@ NPY_NO_EXPORT PyNumberMethods array_as_number = {
(binaryfunc)array_inplace_add, /*nb_inplace_add*/
(binaryfunc)array_inplace_subtract, /*nb_inplace_subtract*/
(binaryfunc)array_inplace_multiply, /*nb_inplace_multiply*/
-#if !defined(NPY_PY3K)
- (binaryfunc)array_inplace_divide, /*nb_inplace_divide*/
-#endif
(binaryfunc)array_inplace_remainder, /*nb_inplace_remainder*/
(ternaryfunc)array_inplace_power, /*nb_inplace_power*/
(binaryfunc)array_inplace_left_shift, /*nb_inplace_lshift*/
@@ -1062,8 +960,7 @@ NPY_NO_EXPORT PyNumberMethods array_as_number = {
(binaryfunc)array_inplace_floor_divide, /*nb_inplace_floor_divide*/
(binaryfunc)array_inplace_true_divide, /*nb_inplace_true_divide*/
(unaryfunc)array_index, /*nb_index */
-#if PY_VERSION_HEX >= 0x03050000
+
(binaryfunc)array_matrix_multiply, /*nb_matrix_multiply*/
(binaryfunc)array_inplace_matrix_multiply, /*nb_inplace_matrix_multiply*/
-#endif
};
diff --git a/numpy/core/src/multiarray/scalarapi.c b/numpy/core/src/multiarray/scalarapi.c
index b669a3e76..3e5e01e01 100644
--- a/numpy/core/src/multiarray/scalarapi.c
+++ b/numpy/core/src/multiarray/scalarapi.c
@@ -656,7 +656,6 @@ PyArray_Scalar(void *data, PyArray_Descr *descr, PyObject *base)
itemsize = (((itemsize - 1) >> 2) + 1) << 2;
}
}
-#if PY_VERSION_HEX >= 0x03030000
if (type_num == NPY_UNICODE) {
PyObject *u, *args;
int byteorder;
@@ -684,7 +683,6 @@ PyArray_Scalar(void *data, PyArray_Descr *descr, PyObject *base)
Py_DECREF(args);
return obj;
}
-#endif
if (type->tp_itemsize != 0) {
/* String type */
obj = type->tp_alloc(type, itemsize);
@@ -710,85 +708,9 @@ PyArray_Scalar(void *data, PyArray_Descr *descr, PyObject *base)
if (type_num == NPY_STRING) {
destptr = PyString_AS_STRING(obj);
((PyStringObject *)obj)->ob_shash = -1;
-#if !defined(NPY_PY3K)
- ((PyStringObject *)obj)->ob_sstate = SSTATE_NOT_INTERNED;
-#endif
memcpy(destptr, data, itemsize);
return obj;
}
-#if PY_VERSION_HEX < 0x03030000
- else if (type_num == NPY_UNICODE) {
- /* tp_alloc inherited from Python PyBaseObject_Type */
- PyUnicodeObject *uni = (PyUnicodeObject*)obj;
- size_t length = itemsize >> 2;
- Py_UNICODE *dst;
-#ifndef Py_UNICODE_WIDE
- char *buffer;
- Py_UNICODE *tmp;
- int alloc = 0;
-
- length *= 2;
-#endif
- /* Set uni->str so that object can be deallocated on failure */
- uni->str = NULL;
- uni->defenc = NULL;
- uni->hash = -1;
- dst = PyObject_MALLOC(sizeof(Py_UNICODE) * (length + 1));
- if (dst == NULL) {
- Py_DECREF(obj);
- PyErr_NoMemory();
- return NULL;
- }
-#ifdef Py_UNICODE_WIDE
- memcpy(dst, data, itemsize);
- if (swap) {
- byte_swap_vector(dst, length, 4);
- }
- uni->str = dst;
- uni->str[length] = 0;
- uni->length = length;
-#else
- /* need aligned data buffer */
- if ((swap) || ((((npy_intp)data) % descr->alignment) != 0)) {
- buffer = malloc(itemsize);
- if (buffer == NULL) {
- PyObject_FREE(dst);
- Py_DECREF(obj);
- PyErr_NoMemory();
- }
- alloc = 1;
- memcpy(buffer, data, itemsize);
- if (swap) {
- byte_swap_vector(buffer, itemsize >> 2, 4);
- }
- }
- else {
- buffer = data;
- }
-
- /*
- * Allocated enough for 2-characters per itemsize.
- * Now convert from the data-buffer
- */
- length = PyUCS2Buffer_FromUCS4(dst,
- (npy_ucs4 *)buffer, itemsize >> 2);
- if (alloc) {
- free(buffer);
- }
- /* Resize the unicode result */
- tmp = PyObject_REALLOC(dst, sizeof(Py_UNICODE)*(length + 1));
- if (tmp == NULL) {
- PyObject_FREE(dst);
- Py_DECREF(obj);
- return NULL;
- }
- uni->str = tmp;
- uni->str[length] = 0;
- uni->length = length;
-#endif
- return obj;
- }
-#endif /* PY_VERSION_HEX < 0x03030000 */
else {
PyVoidScalarObject *vobj = (PyVoidScalarObject *)obj;
vobj->base = NULL;
diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src
index 4cef01f89..cd26d20fa 100644
--- a/numpy/core/src/multiarray/scalartypes.c.src
+++ b/numpy/core/src/multiarray/scalartypes.c.src
@@ -54,12 +54,7 @@ NPY_NO_EXPORT PyTypeObject PyTimeIntegerArrType_Type;
* Floating, ComplexFloating, Flexible, Character#
*/
NPY_NO_EXPORT PyTypeObject Py@NAME@ArrType_Type = {
-#if defined(NPY_PY3K)
PyVarObject_HEAD_INIT(NULL, 0)
-#else
- PyObject_HEAD_INIT(NULL)
- 0, /* ob_size */
-#endif
"numpy.@name@", /* tp_name*/
sizeof(PyObject), /* tp_basicsize*/
0, /* tp_itemsize */
@@ -68,11 +63,7 @@ NPY_NO_EXPORT PyTypeObject Py@NAME@ArrType_Type = {
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
-#if defined(NPY_PY3K)
0, /* tp_reserved */
-#else
- 0, /* tp_compare */
-#endif
0, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
@@ -226,20 +217,6 @@ gentype_@name@(PyObject *m1, PyObject *m2)
/**end repeat**/
-#if !defined(NPY_PY3K)
-/**begin repeat
- *
- * #name = divide#
- */
-static PyObject *
-gentype_@name@(PyObject *m1, PyObject *m2)
-{
- BINOP_GIVE_UP_IF_NEEDED(m1, m2, nb_@name@, gentype_@name@);
- return PyArray_Type.tp_as_number->nb_@name@(m1, m2);
-}
-/**end repeat**/
-#endif
-
/* Get a nested slot, or NULL if absent */
#define GET_NESTED_SLOT(type, group, slot) \
((type)->group == NULL ? NULL : (type)->group->slot)
@@ -293,27 +270,6 @@ gentype_@name@(PyObject *m1)
}
/**end repeat**/
-#if !defined(NPY_PY3K)
-/**begin repeat
- *
- * #name = long, oct, hex#
- */
-static PyObject *
-gentype_@name@(PyObject *m1)
-{
- PyObject *arr, *ret;
-
- arr = PyArray_FromScalar(m1, NULL);
- if (arr == NULL) {
- return NULL;
- }
- ret = Py_TYPE(arr)->tp_as_number->nb_@name@(arr);
- Py_DECREF(arr);
- return ret;
-}
-/**end repeat**/
-#endif
-
static int
gentype_nonzero_number(PyObject *m1)
{
@@ -324,11 +280,7 @@ gentype_nonzero_number(PyObject *m1)
if (arr == NULL) {
return -1;
}
-#if defined(NPY_PY3K)
ret = Py_TYPE(arr)->tp_as_number->nb_bool(arr);
-#else
- ret = Py_TYPE(arr)->tp_as_number->nb_nonzero(arr);
-#endif
Py_DECREF(arr);
return ret;
}
@@ -356,21 +308,9 @@ gentype_format(PyObject *self, PyObject *args)
PyObject *format_spec;
PyObject *obj, *ret;
-#if defined(NPY_PY3K)
if (!PyArg_ParseTuple(args, "U:__format__", &format_spec)) {
return NULL;
}
-#else
- if (!PyArg_ParseTuple(args, "O:__format__", &format_spec)) {
- return NULL;
- }
-
- if (!PyUnicode_Check(format_spec) && !PyString_Check(format_spec)) {
- PyErr_SetString(PyExc_TypeError,
- "format must be a string");
- return NULL;
- }
-#endif
/*
* Convert to an appropriate Python type and call its format.
@@ -381,11 +321,7 @@ gentype_format(PyObject *self, PyObject *args)
obj = PyBool_FromLong(((PyBoolScalarObject *)self)->obval);
}
else if (PyArray_IsScalar(self, Integer)) {
-#if defined(NPY_PY3K)
obj = Py_TYPE(self)->tp_as_number->nb_int(self);
-#else
- obj = Py_TYPE(self)->tp_as_number->nb_long(self);
-#endif
}
else if (PyArray_IsScalar(self, Floating)) {
obj = Py_TYPE(self)->tp_as_number->nb_float(self);
@@ -1077,36 +1013,12 @@ static PyObject *
return npy_longdouble_to_PyLong(val);
}
-#if !defined(NPY_PY3K)
-
-/**begin repeat1
- * #name = int, hex, oct#
- */
-static PyObject *
-@char@longdoubletype_@name@(PyObject *self)
-{
- PyObject *ret;
- PyObject *obj = @char@longdoubletype_long(self);
- if (obj == NULL) {
- return NULL;
- }
- ret = Py_TYPE(obj)->tp_as_number->nb_@name@(obj);
- Py_DECREF(obj);
- return ret;
-}
-/**end repeat1**/
-
-#endif /* !defined(NPY_PY3K) */
-
/**end repeat**/
static PyNumberMethods gentype_as_number = {
(binaryfunc)gentype_add, /*nb_add*/
(binaryfunc)gentype_subtract, /*nb_subtract*/
(binaryfunc)gentype_multiply, /*nb_multiply*/
-#if !defined(NPY_PY3K)
- (binaryfunc)gentype_divide, /*nb_divide*/
-#endif
(binaryfunc)gentype_remainder, /*nb_remainder*/
(binaryfunc)gentype_divmod, /*nb_divmod*/
(ternaryfunc)gentype_power, /*nb_power*/
@@ -1120,26 +1032,12 @@ static PyNumberMethods gentype_as_number = {
(binaryfunc)gentype_and, /*nb_and*/
(binaryfunc)gentype_xor, /*nb_xor*/
(binaryfunc)gentype_or, /*nb_or*/
-#if !defined(NPY_PY3K)
- 0, /*nb_coerce*/
-#endif
(unaryfunc)gentype_int, /*nb_int*/
-#if defined(NPY_PY3K)
0, /*nb_reserved*/
-#else
- (unaryfunc)gentype_long, /*nb_long*/
-#endif
(unaryfunc)gentype_float, /*nb_float*/
-#if !defined(NPY_PY3K)
- (unaryfunc)gentype_oct, /*nb_oct*/
- (unaryfunc)gentype_hex, /*nb_hex*/
-#endif
0, /*inplace_add*/
0, /*inplace_subtract*/
0, /*inplace_multiply*/
-#if !defined(NPY_PY3K)
- 0, /*inplace_divide*/
-#endif
0, /*inplace_remainder*/
0, /*inplace_power*/
0, /*inplace_lshift*/
@@ -1152,10 +1050,8 @@ static PyNumberMethods gentype_as_number = {
0, /*nb_inplace_floor_divide*/
0, /*nb_inplace_true_divide*/
(unaryfunc)NULL, /*nb_index*/
-#if PY_VERSION_HEX >= 0x03050000
0, /*np_matmul*/
0, /*np_inplace_matmul*/
-#endif
};
@@ -1246,11 +1142,7 @@ inttype_denominator_get(PyObject *self)
static PyObject *
gentype_data_get(PyObject *self)
{
-#if defined(NPY_PY3K)
return PyMemoryView_FromObject(self);
-#else
- return PyBuffer_FromObject(self, 0, Py_END_OF_BUFFER);
-#endif
}
@@ -1659,6 +1551,11 @@ gentype_itemset(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args))
return NULL;
}
+/* This function matches the Python 2.7 PyBufferProcs.bf_getreadbuffer
+ * interface, but no longer needs to. In the future we could consider
+ * rewriting callers to use `gentype_getbuffer`, or inline the function body
+ * at the caller.
+ */
static Py_ssize_t
gentype_getreadbuf(PyObject *, Py_ssize_t, void **);
@@ -1819,9 +1716,7 @@ static PyObject *
gentype_reduce(PyObject *self, PyObject *NPY_UNUSED(args))
{
PyObject *ret = NULL, *obj = NULL, *mod = NULL;
-#if defined(NPY_PY3K)
Py_buffer view;
-#endif
const char *buffer;
Py_ssize_t buflen;
@@ -1831,7 +1726,6 @@ gentype_reduce(PyObject *self, PyObject *NPY_UNUSED(args))
return NULL;
}
-#if defined(NPY_PY3K)
if (PyArray_IsScalar(self, Unicode)) {
/* Unicode on Python 3 does not expose the buffer interface */
buffer = PyUnicode_AS_DATA(self);
@@ -1853,12 +1747,6 @@ gentype_reduce(PyObject *self, PyObject *NPY_UNUSED(args))
Py_DECREF(ret);
return NULL;
}
-#else
- if (PyObject_AsReadBuffer(self, (const void **)&buffer, &buflen)<0) {
- Py_DECREF(ret);
- return NULL;
- }
-#endif
mod = PyImport_ImportModule("numpy.core._multiarray_umath");
if (mod == NULL) {
@@ -2277,12 +2165,10 @@ static PyMethodDef gentype_methods[] = {
{"round",
(PyCFunction)gentype_round,
METH_VARARGS | METH_KEYWORDS, NULL},
-#if defined(NPY_PY3K)
/* Hook for the round() builtin */
{"__round__",
(PyCFunction)gentype_round,
METH_VARARGS | METH_KEYWORDS, NULL},
-#endif
/* For the format function */
{"__format__",
gentype_format,
@@ -2588,68 +2474,17 @@ gentype_getreadbuf(PyObject *self, Py_ssize_t segment, void **ptrptr)
return numbytes;
}
-#if !defined(NPY_PY3K)
-static Py_ssize_t
-gentype_getsegcount(PyObject *self, Py_ssize_t *lenp)
-{
- PyArray_Descr *outcode;
-
- outcode = PyArray_DescrFromScalar(self);
- if (lenp) {
- *lenp = outcode->elsize;
-#ifndef Py_UNICODE_WIDE
- if (outcode->type_num == NPY_UNICODE) {
- *lenp >>= 1;
- }
-#endif
- }
- Py_DECREF(outcode);
- return 1;
-}
-
-static Py_ssize_t
-gentype_getcharbuf(PyObject *self, Py_ssize_t segment, constchar **ptrptr)
-{
- if (PyArray_IsScalar(self, String) ||
- PyArray_IsScalar(self, Unicode)) {
- return gentype_getreadbuf(self, segment, (void **)ptrptr);
- }
- else {
- PyErr_SetString(PyExc_TypeError,
- "Non-character array cannot be interpreted "\
- "as character buffer.");
- return -1;
- }
-}
-#endif /* !defined(NPY_PY3K) */
-
static PyBufferProcs gentype_as_buffer = {
-#if !defined(NPY_PY3K)
- gentype_getreadbuf, /* bf_getreadbuffer*/
- NULL, /* bf_getwritebuffer*/
- gentype_getsegcount, /* bf_getsegcount*/
- gentype_getcharbuf, /* bf_getcharbuffer*/
-#endif
gentype_getbuffer, /* bf_getbuffer */
NULL, /* bf_releasebuffer */
};
-#if defined(NPY_PY3K)
#define BASEFLAGS Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE
#define LEAFFLAGS Py_TPFLAGS_DEFAULT
-#else
-#define BASEFLAGS Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_CHECKTYPES
-#define LEAFFLAGS Py_TPFLAGS_DEFAULT | Py_TPFLAGS_CHECKTYPES
-#endif
NPY_NO_EXPORT PyTypeObject PyGenericArrType_Type = {
-#if defined(NPY_PY3K)
PyVarObject_HEAD_INIT(NULL, 0)
-#else
- PyObject_HEAD_INIT(NULL)
- 0, /* ob_size */
-#endif
"numpy.generic", /* tp_name*/
sizeof(PyObject), /* tp_basicsize*/
0, /* tp_itemsize */
@@ -2658,11 +2493,7 @@ NPY_NO_EXPORT PyTypeObject PyGenericArrType_Type = {
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
-#if defined(NPY_PY3K)
0, /* tp_reserved */
-#else
- 0, /* tp_compare */
-#endif
0, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
@@ -2877,11 +2708,7 @@ finish:
*((npy_@name@ *)dest) = *((npy_@name@ *)src);
#elif @default@ == 1 /* unicode and strings */
if (itemsize == 0) { /* unicode */
-#if PY_VERSION_HEX >= 0x03030000
itemsize = PyUnicode_GetLength(robj) * PyUnicode_KIND(robj);
-#else
- itemsize = ((PyUnicodeObject *)robj)->length * sizeof(Py_UNICODE);
-#endif
}
memcpy(dest, src, itemsize);
/* @default@ == 2 won't get here */
@@ -3070,10 +2897,6 @@ NPY_NO_EXPORT PyNumberMethods bool_arrtype_as_number = {
0, /* nb_add */
0, /* nb_subtract */
0, /* nb_multiply */
-#if defined(NPY_PY3K)
-#else
- 0, /* nb_divide */
-#endif
0, /* nb_remainder */
0, /* nb_divmod */
0, /* nb_power */
@@ -3087,30 +2910,13 @@ NPY_NO_EXPORT PyNumberMethods bool_arrtype_as_number = {
(binaryfunc)bool_arrtype_and, /* nb_and */
(binaryfunc)bool_arrtype_xor, /* nb_xor */
(binaryfunc)bool_arrtype_or, /* nb_or */
-#if defined(NPY_PY3K)
-#else
- 0, /* nb_coerce */
-#endif
0, /* nb_int */
-#if defined(NPY_PY3K)
0, /* nb_reserved */
-#else
- 0, /* nb_long */
-#endif
0, /* nb_float */
-#if defined(NPY_PY3K)
-#else
- 0, /* nb_oct */
- 0, /* nb_hex */
-#endif
/* Added in release 2.0 */
0, /* nb_inplace_add */
0, /* nb_inplace_subtract */
0, /* nb_inplace_multiply */
-#if defined(NPY_PY3K)
-#else
- 0, /* nb_inplace_divide */
-#endif
0, /* nb_inplace_remainder */
0, /* nb_inplace_power */
0, /* nb_inplace_lshift */
@@ -3146,11 +2952,7 @@ void_arrtype_new(PyTypeObject *type, PyObject *args, PyObject *NPY_UNUSED(kwds))
(PyArray_Check(obj) &&
PyArray_NDIM((PyArrayObject *)obj)==0 &&
PyArray_ISINTEGER((PyArrayObject *)obj))) {
-#if defined(NPY_PY3K)
new = Py_TYPE(obj)->tp_as_number->nb_int(obj);
-#else
- new = Py_TYPE(obj)->tp_as_number->nb_long(obj);
-#endif
}
if (new && PyLong_Check(new)) {
PyObject *ret;
@@ -3226,7 +3028,6 @@ ulong_arrtype_hash(PyObject *obj)
return x;
}
-#if (NPY_SIZEOF_INT != NPY_SIZEOF_LONG) || defined(NPY_PY3K)
static npy_hash_t
int_arrtype_hash(PyObject *obj)
{
@@ -3236,9 +3037,7 @@ int_arrtype_hash(PyObject *obj)
}
return x;
}
-#endif
-#if defined(NPY_PY3K)
static npy_hash_t
long_arrtype_hash(PyObject *obj)
{
@@ -3247,7 +3046,6 @@ long_arrtype_hash(PyObject *obj)
Py_DECREF(l);
return x;
}
-#endif
/**begin repeat
* #char = ,u#
@@ -3487,72 +3285,6 @@ static PyMappingMethods object_arrtype_as_mapping = {
(objobjargproc)object_arrtype_ass_subscript,
};
-#if !defined(NPY_PY3K)
-static Py_ssize_t
-object_arrtype_getsegcount(PyObjectScalarObject *self, Py_ssize_t *lenp)
-{
- Py_ssize_t newlen;
- int cnt;
- PyBufferProcs *pb = Py_TYPE(self->obval)->tp_as_buffer;
-
- if (pb == NULL ||
- pb->bf_getsegcount == NULL ||
- (cnt = (*pb->bf_getsegcount)(self->obval, &newlen)) != 1) {
- return 0;
- }
- if (lenp) {
- *lenp = newlen;
- }
- return cnt;
-}
-
-static Py_ssize_t
-object_arrtype_getreadbuf(PyObjectScalarObject *self, Py_ssize_t segment, void **ptrptr)
-{
- PyBufferProcs *pb = Py_TYPE(self->obval)->tp_as_buffer;
-
- if (pb == NULL ||
- pb->bf_getreadbuffer == NULL ||
- pb->bf_getsegcount == NULL) {
- PyErr_SetString(PyExc_TypeError,
- "expected a readable buffer object");
- return -1;
- }
- return (*pb->bf_getreadbuffer)(self->obval, segment, ptrptr);
-}
-
-static Py_ssize_t
-object_arrtype_getwritebuf(PyObjectScalarObject *self, Py_ssize_t segment, void **ptrptr)
-{
- PyBufferProcs *pb = Py_TYPE(self->obval)->tp_as_buffer;
-
- if (pb == NULL ||
- pb->bf_getwritebuffer == NULL ||
- pb->bf_getsegcount == NULL) {
- PyErr_SetString(PyExc_TypeError,
- "expected a writeable buffer object");
- return -1;
- }
- return (*pb->bf_getwritebuffer)(self->obval, segment, ptrptr);
-}
-
-static Py_ssize_t
-object_arrtype_getcharbuf(PyObjectScalarObject *self, Py_ssize_t segment,
- constchar **ptrptr)
-{
- PyBufferProcs *pb = Py_TYPE(self->obval)->tp_as_buffer;
-
- if (pb == NULL ||
- pb->bf_getcharbuffer == NULL ||
- pb->bf_getsegcount == NULL) {
- PyErr_SetString(PyExc_TypeError,
- "expected a character buffer object");
- return -1;
- }
- return (*pb->bf_getcharbuffer)(self->obval, segment, ptrptr);
-}
-#endif
-
static int
object_arrtype_getbuffer(PyObjectScalarObject *self, Py_buffer *view, int flags)
{
@@ -3580,12 +3312,6 @@ object_arrtype_releasebuffer(PyObjectScalarObject *self, Py_buffer *view)
}
static PyBufferProcs object_arrtype_as_buffer = {
-#if !defined(NPY_PY3K)
- (readbufferproc)object_arrtype_getreadbuf,
- (writebufferproc)object_arrtype_getwritebuf,
- (segcountproc)object_arrtype_getsegcount,
- (charbufferproc)object_arrtype_getcharbuf,
-#endif
(getbufferproc)object_arrtype_getbuffer,
(releasebufferproc)object_arrtype_releasebuffer,
};
@@ -3597,12 +3323,7 @@ object_arrtype_call(PyObjectScalarObject *obj, PyObject *args, PyObject *kwds)
}
NPY_NO_EXPORT PyTypeObject PyObjectArrType_Type = {
-#if defined(NPY_PY3K)
PyVarObject_HEAD_INIT(NULL, 0)
-#else
- PyObject_HEAD_INIT(NULL)
- 0, /* ob_size */
-#endif
"numpy.object_", /* tp_name*/
sizeof(PyObjectScalarObject), /* tp_basicsize*/
0, /* tp_itemsize */
@@ -3610,11 +3331,7 @@ NPY_NO_EXPORT PyTypeObject PyObjectArrType_Type = {
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
-#if defined(NPY_PY3K)
0, /* tp_reserved */
-#else
- 0, /* tp_compare */
-#endif
0, /* tp_repr */
0, /* tp_as_number */
&object_arrtype_as_sequence, /* tp_as_sequence */
@@ -3681,13 +3398,8 @@ gen_arrtype_subscript(PyObject *self, PyObject *key)
#define NAME_bool "bool"
#define NAME_void "void"
-#if defined(NPY_PY3K)
#define NAME_string "bytes"
#define NAME_unicode "str"
-#else
-#define NAME_string "string"
-#define NAME_unicode "unicode"
-#endif
/**begin repeat
* #name = bool, string, unicode, void#
@@ -3695,12 +3407,7 @@ gen_arrtype_subscript(PyObject *self, PyObject *key)
* #ex = _,_,_,#
*/
NPY_NO_EXPORT PyTypeObject Py@NAME@ArrType_Type = {
-#if defined(NPY_PY3K)
PyVarObject_HEAD_INIT(NULL, 0)
-#else
- PyObject_HEAD_INIT(NULL)
- 0, /* ob_size */
-#endif
"numpy." NAME_@name@ "@ex@", /* tp_name*/
sizeof(Py@NAME@ScalarObject), /* tp_basicsize*/
0, /* tp_itemsize */
@@ -3708,11 +3415,7 @@ NPY_NO_EXPORT PyTypeObject Py@NAME@ArrType_Type = {
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
-#if defined(NPY_PY3K)
0, /* tp_reserved */
-#else
- 0, /* tp_compare */
-#endif
0, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
@@ -3784,12 +3487,7 @@ NPY_NO_EXPORT PyTypeObject Py@NAME@ArrType_Type = {
#define _THIS_SIZE "256"
#endif
NPY_NO_EXPORT PyTypeObject Py@NAME@ArrType_Type = {
-#if defined(NPY_PY3K)
PyVarObject_HEAD_INIT(NULL, 0)
-#else
- PyObject_HEAD_INIT(NULL)
- 0, /* ob_size */
-#endif
"numpy.@name@" _THIS_SIZE, /* tp_name*/
sizeof(Py@NAME@ScalarObject), /* tp_basicsize*/
0, /* tp_itemsize */
@@ -3797,11 +3495,7 @@ NPY_NO_EXPORT PyTypeObject Py@NAME@ArrType_Type = {
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
-#if defined(NPY_PY3K)
0, /* tp_reserved */
-#else
- 0, /* tp_compare */
-#endif
0, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
@@ -3875,12 +3569,7 @@ static PyMappingMethods gentype_as_mapping = {
#endif
NPY_NO_EXPORT PyTypeObject Py@NAME@ArrType_Type = {
-#if defined(NPY_PY3K)
PyVarObject_HEAD_INIT(0, 0)
-#else
- PyObject_HEAD_INIT(0)
- 0, /* ob_size */
-#endif
"numpy.@name@" _THIS_SIZE, /* tp_name*/
sizeof(Py@NAME@ScalarObject), /* tp_basicsize*/
0, /* tp_itemsize*/
@@ -3888,11 +3577,7 @@ NPY_NO_EXPORT PyTypeObject Py@NAME@ArrType_Type = {
0, /* tp_print*/
0, /* tp_getattr*/
0, /* tp_setattr*/
-#if defined(NPY_PY3K)
0, /* tp_reserved */
-#else
- 0, /* tp_compare */
-#endif
0, /* tp_repr*/
0, /* tp_as_number*/
0, /* tp_as_sequence*/
@@ -4263,38 +3948,6 @@ initialize_casting_tables(void)
}
}
-#ifndef NPY_PY3K
-/*
- * In python2, the `float` and `complex` types still implement the obsolete
- * "tp_print" method, which uses CPython's float-printing routines to print the
- * float. Numpy's float_/cfloat inherit from Python float/complex, but
- * override its tp_repr and tp_str methods. In order to avoid an inconsistency
- * with the inherited tp_print, we need to override it too.
- *
- * In python3 the tp_print method is reserved/unused.
- */
-static int
-doubletype_print(PyObject *o, FILE *fp, int flags)
-{
- int ret;
- PyObject *to_print;
- if (flags & Py_PRINT_RAW) {
- to_print = PyObject_Str(o);
- }
- else {
- to_print = PyObject_Repr(o);
- }
-
- if (to_print == NULL) {
- return -1;
- }
-
- ret = PyObject_Print(to_print, fp, Py_PRINT_RAW);
- Py_DECREF(to_print);
- return ret;
-}
-#endif
-
static PyNumberMethods longdoubletype_as_number;
static PyNumberMethods clongdoubletype_as_number;
static void init_basetypes(void);
@@ -4346,12 +3999,6 @@ initialize_numeric_types(void)
/**end repeat**/
-#ifndef NPY_PY3K
- PyDoubleArrType_Type.tp_print = &doubletype_print;
- PyCDoubleArrType_Type.tp_print = &doubletype_print;
-#endif
-
-
PyBoolArrType_Type.tp_as_number->nb_index = (unaryfunc)bool_index;
PyStringArrType_Type.tp_alloc = NULL;
@@ -4429,20 +4076,14 @@ initialize_numeric_types(void)
/**end repeat**/
-#if (NPY_SIZEOF_INT != NPY_SIZEOF_LONG) || defined(NPY_PY3K)
/* We won't be inheriting from Python Int type. */
PyIntArrType_Type.tp_hash = int_arrtype_hash;
-#endif
-#if defined(NPY_PY3K)
/* We won't be inheriting from Python Int type. */
PyLongArrType_Type.tp_hash = long_arrtype_hash;
-#endif
-#if (NPY_SIZEOF_LONG != NPY_SIZEOF_LONGLONG) || defined(NPY_PY3K)
/* We won't be inheriting from Python Int type. */
PyLongLongArrType_Type.tp_hash = longlong_arrtype_hash;
-#endif
/**begin repeat
* #name = repr, str#
@@ -4485,14 +4126,7 @@ initialize_numeric_types(void)
* does not return a normal Python type
*/
@char@longdoubletype_as_number.nb_float = @char@longdoubletype_float;
-#if defined(NPY_PY3K)
@char@longdoubletype_as_number.nb_int = @char@longdoubletype_long;
-#else
- @char@longdoubletype_as_number.nb_int = @char@longdoubletype_int;
- @char@longdoubletype_as_number.nb_long = @char@longdoubletype_long;
- @char@longdoubletype_as_number.nb_hex = @char@longdoubletype_hex;
- @char@longdoubletype_as_number.nb_oct = @char@longdoubletype_oct;
-#endif
Py@CHAR@LongDoubleArrType_Type.tp_as_number = &@char@longdoubletype_as_number;
Py@CHAR@LongDoubleArrType_Type.tp_repr = @char@longdoubletype_repr;
diff --git a/numpy/core/src/multiarray/shape.c b/numpy/core/src/multiarray/shape.c
index 4e31f003b..127ac5134 100644
--- a/numpy/core/src/multiarray/shape.c
+++ b/numpy/core/src/multiarray/shape.c
@@ -981,7 +981,7 @@ PyArray_Flatten(PyArrayObject *a, NPY_ORDER order)
/* See shape.h for parameters documentation */
NPY_NO_EXPORT PyObject *
-build_shape_string(npy_intp n, npy_intp *vals)
+build_shape_string(npy_intp n, npy_intp const *vals)
{
npy_intp i;
PyObject *ret, *tmp;
diff --git a/numpy/core/src/multiarray/shape.h b/numpy/core/src/multiarray/shape.h
index 0451a463e..d25292556 100644
--- a/numpy/core/src/multiarray/shape.h
+++ b/numpy/core/src/multiarray/shape.h
@@ -6,7 +6,7 @@
* A negative value in 'vals' gets interpreted as newaxis.
*/
NPY_NO_EXPORT PyObject *
-build_shape_string(npy_intp n, npy_intp *vals);
+build_shape_string(npy_intp n, npy_intp const *vals);
/*
* Creates a sorted stride perm matching the KEEPORDER behavior
diff --git a/numpy/core/src/multiarray/strfuncs.c b/numpy/core/src/multiarray/strfuncs.c
index 495d897b2..b570aec08 100644
--- a/numpy/core/src/multiarray/strfuncs.c
+++ b/numpy/core/src/multiarray/strfuncs.c
@@ -64,7 +64,7 @@ extend_str(char **strp, Py_ssize_t n, Py_ssize_t *maxp)
static int
dump_data(char **string, Py_ssize_t *n, Py_ssize_t *max_n, char *data, int nd,
- npy_intp *dimensions, npy_intp *strides, PyArrayObject* self)
+ npy_intp const *dimensions, npy_intp const *strides, PyArrayObject* self)
{
PyObject *op = NULL, *sp = NULL;
char *ostring;
@@ -226,34 +226,3 @@ array_format(PyArrayObject *self, PyObject *args)
}
}
-#ifndef NPY_PY3K
-
-NPY_NO_EXPORT PyObject *
-array_unicode(PyArrayObject *self)
-{
- PyObject *uni;
-
- if (PyArray_NDIM(self) == 0) {
- PyObject *item = PyArray_ToScalar(PyArray_DATA(self), self);
- if (item == NULL){
- return NULL;
- }
-
- /* defer to invoking `unicode` on the scalar */
- uni = PyObject_CallFunctionObjArgs(
- (PyObject *)&PyUnicode_Type, item, NULL);
- Py_DECREF(item);
- }
- else {
- /* Do what unicode(self) would normally do */
- PyObject *str = PyObject_Str((PyObject *)self);
- if (str == NULL){
- return NULL;
- }
- uni = PyUnicode_FromObject(str);
- Py_DECREF(str);
- }
- return uni;
-}
-
-#endif
diff --git a/numpy/core/src/multiarray/strfuncs.h b/numpy/core/src/multiarray/strfuncs.h
index 7e869d926..5dd661a20 100644
--- a/numpy/core/src/multiarray/strfuncs.h
+++ b/numpy/core/src/multiarray/strfuncs.h
@@ -13,9 +13,4 @@ array_str(PyArrayObject *self);
NPY_NO_EXPORT PyObject *
array_format(PyArrayObject *self, PyObject *args);
-#ifndef NPY_PY3K
- NPY_NO_EXPORT PyObject *
- array_unicode(PyArrayObject *self);
-#endif
-
#endif
diff --git a/numpy/core/src/multiarray/typeinfo.c b/numpy/core/src/multiarray/typeinfo.c
index 14c4f27cb..30053887b 100644
--- a/numpy/core/src/multiarray/typeinfo.c
+++ b/numpy/core/src/multiarray/typeinfo.c
@@ -58,11 +58,7 @@ PyArray_typeinfo(
PyObject *entry = PyStructSequence_New(&PyArray_typeinfoType);
if (entry == NULL)
return NULL;
-#if defined(NPY_PY3K)
PyStructSequence_SET_ITEM(entry, 0, Py_BuildValue("C", typechar));
-#else
- PyStructSequence_SET_ITEM(entry, 0, Py_BuildValue("c", typechar));
-#endif
PyStructSequence_SET_ITEM(entry, 1, Py_BuildValue("i", typenum));
PyStructSequence_SET_ITEM(entry, 2, Py_BuildValue("i", nbits));
PyStructSequence_SET_ITEM(entry, 3, Py_BuildValue("i", align));
@@ -84,11 +80,7 @@ PyArray_typeinforanged(
PyObject *entry = PyStructSequence_New(&PyArray_typeinforangedType);
if (entry == NULL)
return NULL;
-#if defined(NPY_PY3K)
PyStructSequence_SET_ITEM(entry, 0, Py_BuildValue("C", typechar));
-#else
- PyStructSequence_SET_ITEM(entry, 0, Py_BuildValue("c", typechar));
-#endif
PyStructSequence_SET_ITEM(entry, 1, Py_BuildValue("i", typenum));
PyStructSequence_SET_ITEM(entry, 2, Py_BuildValue("i", nbits));
PyStructSequence_SET_ITEM(entry, 3, Py_BuildValue("i", align));
@@ -104,10 +96,8 @@ PyArray_typeinforanged(
return entry;
}
-/* Python version only needed for backport to 2.7 */
-#if (PY_VERSION_HEX < 0x03040000) \
- || (defined(PYPY_VERSION_NUM) && (PYPY_VERSION_NUM < 0x07020000))
-
+/* Python version needed for older PyPy */
+#if (defined(PYPY_VERSION_NUM) && (PYPY_VERSION_NUM < 0x07020000))
static int
PyStructSequence_InitType2(PyTypeObject *type, PyStructSequence_Desc *desc) {
PyStructSequence_InitType(type, desc);
diff --git a/numpy/core/src/umath/_rational_tests.c.src b/numpy/core/src/umath/_rational_tests.c.src
index b2b75da74..9ab3c651d 100644
--- a/numpy/core/src/umath/_rational_tests.c.src
+++ b/numpy/core/src/umath/_rational_tests.c.src
@@ -661,12 +661,7 @@ static PyGetSetDef pyrational_getset[] = {
};
static PyTypeObject PyRational_Type = {
-#if defined(NPY_PY3K)
PyVarObject_HEAD_INIT(NULL, 0)
-#else
- PyObject_HEAD_INIT(NULL)
- 0, /* ob_size */
-#endif
"rational", /* tp_name */
sizeof(PyRational), /* tp_basicsize */
0, /* tp_itemsize */
@@ -674,11 +669,7 @@ static PyTypeObject PyRational_Type = {
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
-#if defined(NPY_PY3K)
0, /* tp_reserved */
-#else
- 0, /* tp_compare */
-#endif
pyrational_repr, /* tp_repr */
&pyrational_as_number, /* tp_as_number */
0, /* tp_as_sequence */
diff --git a/numpy/core/src/umath/funcs.inc.src b/numpy/core/src/umath/funcs.inc.src
index 9c59cc8fb..273779ee8 100644
--- a/numpy/core/src/umath/funcs.inc.src
+++ b/numpy/core/src/umath/funcs.inc.src
@@ -38,11 +38,7 @@ Py_reciprocal(PyObject *o)
if (!one) {
return NULL;
}
-#if defined(NPY_PY3K)
result = PyNumber_TrueDivide(one, o);
-#else
- result = PyNumber_Divide(one, o);
-#endif
Py_DECREF(one);
return result;
}
@@ -197,8 +193,7 @@ npy_ObjectGCD(PyObject *i1, PyObject *i2)
{
PyObject *gcd = NULL;
- /* use math.gcd if available, and valid on the provided types */
-#if PY_VERSION_HEX >= 0x03050000
+ /* use math.gcd if valid on the provided types */
{
static PyObject *math_gcd_func = NULL;
@@ -213,7 +208,6 @@ npy_ObjectGCD(PyObject *i1, PyObject *i2)
/* silence errors, and fall back on pure-python gcd */
PyErr_Clear();
}
-#endif
/* otherwise, use our internal one, written in python */
{
diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src
index 8a2e5bc40..33d10da49 100644
--- a/numpy/core/src/umath/loops.c.src
+++ b/numpy/core/src/umath/loops.c.src
@@ -1824,6 +1824,7 @@ NPY_NO_EXPORT void
*((npy_bool *)op1) = in1 @OP@ in2;
}
}
+ npy_clear_floatstatus_barrier((char*)dimensions);
}
/**end repeat1**/
@@ -2068,6 +2069,7 @@ NPY_NO_EXPORT void
const @type@ in1 = *(@type@ *)ip1;
*((@type@ *)op1) = in1 > 0 ? 1 : (in1 < 0 ? -1 : (in1 == 0 ? 0 : in1));
}
+ npy_clear_floatstatus_barrier((char*)dimensions);
}
NPY_NO_EXPORT void
diff --git a/numpy/core/src/umath/reduction.h b/numpy/core/src/umath/reduction.h
index dfaeabcbb..0c2183ed6 100644
--- a/numpy/core/src/umath/reduction.h
+++ b/numpy/core/src/umath/reduction.h
@@ -100,8 +100,8 @@ typedef int (PyArray_AssignReduceIdentityFunc)(PyArrayObject *result,
*/
typedef int (PyArray_ReduceLoopFunc)(NpyIter *iter,
char **dataptr,
- npy_intp *strideptr,
- npy_intp *countptr,
+ npy_intp const *strideptr,
+ npy_intp const *countptr,
NpyIter_IterNextFunc *iternext,
int needs_api,
npy_intp skip_first_count,
diff --git a/numpy/core/src/umath/scalarmath.c.src b/numpy/core/src/umath/scalarmath.c.src
index d5d8d659b..b3826eef4 100644
--- a/numpy/core/src/umath/scalarmath.c.src
+++ b/numpy/core/src/umath/scalarmath.c.src
@@ -744,56 +744,50 @@ _@name@_convert2_to_ctypes(PyObject *a, @type@ *arg1,
/**end repeat**/
-#if defined(NPY_PY3K)
-#define CODEGEN_SKIP_divide_FLAG
-#endif
-
/**begin repeat
*
* #name = (byte, ubyte, short, ushort, int, uint,
- * long, ulong, longlong, ulonglong)*13,
+ * long, ulong, longlong, ulonglong)*12,
* (half, float, double, longdouble,
- * cfloat, cdouble, clongdouble)*6,
+ * cfloat, cdouble, clongdouble)*5,
* (half, float, double, longdouble)*2#
* #Name = (Byte, UByte, Short, UShort, Int, UInt,
- * Long, ULong,LongLong,ULongLong)*13,
+ * Long, ULong,LongLong,ULongLong)*12,
* (Half, Float, Double, LongDouble,
- * CFloat, CDouble, CLongDouble)*6,
+ * CFloat, CDouble, CLongDouble)*5,
* (Half, Float, Double, LongDouble)*2#
* #type = (npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint,
- * npy_long, npy_ulong, npy_longlong, npy_ulonglong)*13,
+ * npy_long, npy_ulong, npy_longlong, npy_ulonglong)*12,
* (npy_half, npy_float, npy_double, npy_longdouble,
- * npy_cfloat, npy_cdouble, npy_clongdouble)*6,
+ * npy_cfloat, npy_cdouble, npy_clongdouble)*5,
* (npy_half, npy_float, npy_double, npy_longdouble)*2#
*
- * #oper = add*10, subtract*10, multiply*10, divide*10, remainder*10,
+ * #oper = add*10, subtract*10, multiply*10, remainder*10,
* divmod*10, floor_divide*10, lshift*10, rshift*10, and*10,
* or*10, xor*10, true_divide*10,
- * add*7, subtract*7, multiply*7, divide*7, floor_divide*7, true_divide*7,
+ * add*7, subtract*7, multiply*7, floor_divide*7, true_divide*7,
* divmod*4, remainder*4#
*
- * #fperr = 1*70,0*50,1*10,
- * 1*42,
+ * #fperr = 1*60,0*50,1*10,
+ * 1*35,
* 1*8#
- * #twoout = 0*50,1*10,0*70,
- * 0*42,
+ * #twoout = 0*40,1*10,0*70,
+ * 0*35,
* 1*4,0*4#
* #otype = (npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint,
- * npy_long, npy_ulong, npy_longlong, npy_ulonglong)*12,
+ * npy_long, npy_ulong, npy_longlong, npy_ulonglong)*11,
* npy_float*4, npy_double*6,
* (npy_half, npy_float, npy_double, npy_longdouble,
- * npy_cfloat, npy_cdouble, npy_clongdouble)*6,
+ * npy_cfloat, npy_cdouble, npy_clongdouble)*5,
* (npy_half, npy_float, npy_double, npy_longdouble)*2#
* #OName = (Byte, UByte, Short, UShort, Int, UInt,
- * Long, ULong, LongLong, ULongLong)*12,
+ * Long, ULong, LongLong, ULongLong)*11,
* Float*4, Double*6,
* (Half, Float, Double, LongDouble,
- * CFloat, CDouble, CLongDouble)*6,
+ * CFloat, CDouble, CLongDouble)*5,
* (Half, Float, Double, LongDouble)*2#
*/
-#if !defined(CODEGEN_SKIP_@oper@_FLAG)
-
static PyObject *
@name@_@oper@(PyObject *a, PyObject *b)
{
@@ -904,12 +898,9 @@ static PyObject *
#endif
return ret;
}
-#endif
/**end repeat**/
-#undef CODEGEN_SKIP_divide_FLAG
-
#define _IS_ZERO(x) (x == 0)
/**begin repeat
@@ -1300,12 +1291,6 @@ static PyObject *
/**end repeat**/
-#if defined(NPY_PY3K)
-#define NONZERO_NAME(prefix) prefix##bool
-#else
-#define NONZERO_NAME(prefix) prefix##nonzero
-#endif
-
#define _IS_NONZERO(x) (x != 0)
/**begin repeat
*
@@ -1321,7 +1306,7 @@ static PyObject *
* #nonzero = _IS_NONZERO*10, !npy_half_iszero, _IS_NONZERO*6#
*/
static int
-NONZERO_NAME(@name@_)(PyObject *a)
+@name@_bool(PyObject *a)
{
int ret;
@type@ arg1;
@@ -1330,7 +1315,7 @@ NONZERO_NAME(@name@_)(PyObject *a)
if (PyErr_Occurred()) {
return -1;
}
- return PyGenericArrType_Type.tp_as_number->NONZERO_NAME(nb_)(a);
+ return PyGenericArrType_Type.tp_as_number->nb_bool(a);
}
/*
@@ -1410,15 +1395,6 @@ static PyObject *
return NULL;
}
-#ifndef NPY_PY3K
- /* Invoke long.__int__ to try to downcast */
- {
- PyObject *before_downcast = long_result;
- long_result = Py_TYPE(long_result)->tp_as_number->nb_int(long_result);
- Py_DECREF(before_downcast);
- }
-#endif
-
return long_result;
}
/**end repeat**/
@@ -1451,63 +1427,6 @@ static NPY_INLINE PyObject *
}
/**end repeat**/
-
-#if !defined(NPY_PY3K)
-
-/**begin repeat
- *
- * #name = (byte, ubyte, short, ushort, int, uint,
- * long, ulong, longlong, ulonglong,
- * half, float, double, longdouble,
- * cfloat, cdouble, clongdouble)#
- * #Name = (Byte, UByte, Short, UShort, Int, UInt,
- * Long, ULong, LongLong, ULongLong,
- * Half, Float, Double, LongDouble,
- * CFloat, CDouble, CLongDouble)#
- * #cmplx = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1)#
- * #to_ctype = (, , , , , , , , , , npy_half_to_double, , , , , , )#
- * #func = (PyLong_FromLongLong, PyLong_FromUnsignedLongLong)*5,
- * PyLong_FromDouble*3, npy_longdouble_to_PyLong,
- * PyLong_FromDouble*2, npy_longdouble_to_PyLong#
- */
-static NPY_INLINE PyObject *
-@name@_long(PyObject *obj)
-{
-#if @cmplx@
- if (emit_complexwarning() < 0) {
- return NULL;
- }
- return @func@(@to_ctype@(PyArrayScalar_VAL(obj, @Name@).real));
-#else
- return @func@(@to_ctype@(PyArrayScalar_VAL(obj, @Name@)));
-#endif
-}
-/**end repeat**/
-
-/**begin repeat
- *
- * #name = (byte, ubyte, short, ushort, int, uint,
- * long, ulong, longlong, ulonglong,
- * half, float, double, longdouble,
- * cfloat, cdouble, clongdouble)*2#
- * #oper = oct*17, hex*17#
- * #kind = (int*5, long*5, int*2, long*2, int, long*2)*2#
- * #cap = (Int*5, Long*5, Int*2, Long*2, Int, Long*2)*2#
- */
-static PyObject *
-@name@_@oper@(PyObject *obj)
-{
- PyObject *pyint;
- pyint = @name@_@kind@(obj);
- if (pyint == NULL) {
- return NULL;
- }
- return Py@cap@_Type.tp_as_number->nb_@oper@(pyint);
-}
-/**end repeat**/
-
-#endif
-
/**begin repeat
* #oper = le, ge, lt, gt, eq, ne#
* #op = <=, >=, <, >, ==, !=#
@@ -1597,46 +1516,25 @@ static PyNumberMethods @name@_as_number = {
(binaryfunc)@name@_add, /*nb_add*/
(binaryfunc)@name@_subtract, /*nb_subtract*/
(binaryfunc)@name@_multiply, /*nb_multiply*/
-#if !defined(NPY_PY3K)
- (binaryfunc)@name@_divide, /*nb_divide*/
-#endif
(binaryfunc)@name@_remainder, /*nb_remainder*/
(binaryfunc)@name@_divmod, /*nb_divmod*/
(ternaryfunc)@name@_power, /*nb_power*/
(unaryfunc)@name@_negative,
(unaryfunc)@name@_positive, /*nb_pos*/
(unaryfunc)@name@_absolute, /*nb_abs*/
-#if defined(NPY_PY3K)
(inquiry)@name@_bool, /*nb_bool*/
-#else
- (inquiry)@name@_nonzero, /*nb_nonzero*/
-#endif
(unaryfunc)@name@_invert, /*nb_invert*/
(binaryfunc)@name@_lshift, /*nb_lshift*/
(binaryfunc)@name@_rshift, /*nb_rshift*/
(binaryfunc)@name@_and, /*nb_and*/
(binaryfunc)@name@_xor, /*nb_xor*/
(binaryfunc)@name@_or, /*nb_or*/
-#if !defined(NPY_PY3K)
- 0, /*nb_coerce*/
-#endif
(unaryfunc)@name@_int, /*nb_int*/
-#if defined(NPY_PY3K)
(unaryfunc)0, /*nb_reserved*/
-#else
- (unaryfunc)@name@_long, /*nb_long*/
-#endif
(unaryfunc)@name@_float, /*nb_float*/
-#if !defined(NPY_PY3K)
- (unaryfunc)@name@_oct, /*nb_oct*/
- (unaryfunc)@name@_hex, /*nb_hex*/
-#endif
0, /*inplace_add*/
0, /*inplace_subtract*/
0, /*inplace_multiply*/
-#if !defined(NPY_PY3K)
- 0, /*inplace_divide*/
-#endif
0, /*inplace_remainder*/
0, /*inplace_power*/
0, /*inplace_lshift*/
diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c
index 7bf530b6b..fdbe8f2ad 100644
--- a/numpy/core/src/umath/ufunc_object.c
+++ b/numpy/core/src/umath/ufunc_object.c
@@ -3424,8 +3424,8 @@ reduce_type_resolver(PyUFuncObject *ufunc, PyArrayObject *arr,
}
static int
-reduce_loop(NpyIter *iter, char **dataptrs, npy_intp *strides,
- npy_intp *countptr, NpyIter_IterNextFunc *iternext,
+reduce_loop(NpyIter *iter, char **dataptrs, npy_intp const *strides,
+ npy_intp const *countptr, NpyIter_IterNextFunc *iternext,
int needs_api, npy_intp skip_first_count, void *data)
{
PyArray_Descr *dtypes[3], **iter_dtypes;
@@ -3502,7 +3502,11 @@ reduce_loop(NpyIter *iter, char **dataptrs, npy_intp *strides,
strides_copy[2] = strides[0];
if (!masked) {
- innerloop(dataptrs_copy, countptr,
+ /* gh-15252: The signature of the inner loop considers `countptr`
+ * mutable. Inner loops aren't actually allowed to modify this
+ * though, so it's fine to cast it.
+ */
+ innerloop(dataptrs_copy, (npy_intp *)countptr,
strides_copy, innerloopdata);
}
else {
@@ -6034,12 +6038,7 @@ static PyGetSetDef ufunc_getset[] = {
*****************************************************************************/
NPY_NO_EXPORT PyTypeObject PyUFunc_Type = {
-#if defined(NPY_PY3K)
PyVarObject_HEAD_INIT(NULL, 0)
-#else
- PyObject_HEAD_INIT(NULL)
- 0, /* ob_size */
-#endif
"numpy.ufunc", /* tp_name */
sizeof(PyUFuncObject), /* tp_basicsize */
0, /* tp_itemsize */
@@ -6048,11 +6047,7 @@ NPY_NO_EXPORT PyTypeObject PyUFunc_Type = {
0, /* tp_print */
0, /* tp_getattr */
0, /* tp_setattr */
-#if defined(NPY_PY3K)
0, /* tp_reserved */
-#else
- 0, /* tp_compare */
-#endif
(reprfunc)ufunc_repr, /* tp_repr */
0, /* tp_as_number */
0, /* tp_as_sequence */
diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c
index f93d8229e..0e71305b6 100644
--- a/numpy/core/src/umath/ufunc_type_resolution.c
+++ b/numpy/core/src/umath/ufunc_type_resolution.c
@@ -1347,37 +1347,6 @@ PyUFunc_TrueDivisionTypeResolver(PyUFuncObject *ufunc,
return PyUFunc_DivisionTypeResolver(ufunc, casting, operands,
type_tup, out_dtypes);
}
-/*
- * Function to check and report floor division warning when python2.x is
- * invoked with -3 switch
- * See PEP238 and #7949 for numpy
- * This function will not be hit for py3 or when __future__ imports division.
- * See generate_umath.py for reason
-*/
-NPY_NO_EXPORT int
-PyUFunc_MixedDivisionTypeResolver(PyUFuncObject *ufunc,
- NPY_CASTING casting,
- PyArrayObject **operands,
- PyObject *type_tup,
- PyArray_Descr **out_dtypes)
-{
- /* Deprecation checks needed only on python 2 */
-#if !defined(NPY_PY3K)
- int type_num1, type_num2;
-
- type_num1 = PyArray_DESCR(operands[0])->type_num;
- type_num2 = PyArray_DESCR(operands[1])->type_num;
-
- /* If both types are integer, warn the user, same as python does */
- if (Py_DivisionWarningFlag &&
- (PyTypeNum_ISINTEGER(type_num1) || PyTypeNum_ISBOOL(type_num1)) &&
- (PyTypeNum_ISINTEGER(type_num2) || PyTypeNum_ISBOOL(type_num2))) {
- PyErr_Warn(PyExc_DeprecationWarning, "numpy: classic int division");
- }
-#endif
- return PyUFunc_DivisionTypeResolver(ufunc, casting, operands,
- type_tup, out_dtypes);
-}
static int
find_userloop(PyUFuncObject *ufunc,
diff --git a/numpy/core/src/umath/ufunc_type_resolution.h b/numpy/core/src/umath/ufunc_type_resolution.h
index a4e670a8e..1d6ad3358 100644
--- a/numpy/core/src/umath/ufunc_type_resolution.h
+++ b/numpy/core/src/umath/ufunc_type_resolution.h
@@ -72,13 +72,6 @@ PyUFunc_MultiplicationTypeResolver(PyUFuncObject *ufunc,
PyArray_Descr **out_dtypes);
NPY_NO_EXPORT int
-PyUFunc_MixedDivisionTypeResolver(PyUFuncObject *ufunc,
- NPY_CASTING casting,
- PyArrayObject **operands,
- PyObject *type_tup,
- PyArray_Descr **out_dtypes);
-
-NPY_NO_EXPORT int
PyUFunc_TrueDivisionTypeResolver(PyUFuncObject *ufunc,
NPY_CASTING casting,
PyArrayObject **operands,
diff --git a/numpy/core/src/umath/umathmodule.c b/numpy/core/src/umath/umathmodule.c
index 6ec474376..e14006985 100644
--- a/numpy/core/src/umath/umathmodule.c
+++ b/numpy/core/src/umath/umathmodule.c
@@ -174,7 +174,6 @@ add_newdoc_ufunc(PyObject *NPY_UNUSED(dummy), PyObject *args)
PyObject *str, *tmp;
char *docstr, *newdocstr;
-#if defined(NPY_PY3K)
if (!PyArg_ParseTuple(args, "O!O!:_add_newdoc_ufunc", &PyUFunc_Type, &ufunc,
&PyUnicode_Type, &str)) {
return NULL;
@@ -184,20 +183,11 @@ add_newdoc_ufunc(PyObject *NPY_UNUSED(dummy), PyObject *args)
return NULL;
}
docstr = PyBytes_AS_STRING(tmp);
-#else
- if (!PyArg_ParseTuple(args, "O!O!:_add_newdoc_ufunc", &PyUFunc_Type, &ufunc,
- &PyString_Type, &str)) {
- return NULL;
- }
- docstr = PyString_AS_STRING(str);
-#endif
if (NULL != ufunc->doc) {
PyErr_SetString(PyExc_ValueError,
"Cannot change docstring of ufunc with non-NULL docstring");
-#if defined(NPY_PY3K)
Py_DECREF(tmp);
-#endif
return NULL;
}
@@ -211,9 +201,7 @@ add_newdoc_ufunc(PyObject *NPY_UNUSED(dummy), PyObject *args)
strcpy(newdocstr, docstr);
ufunc->doc = newdocstr;
-#if defined(NPY_PY3K)
Py_DECREF(tmp);
-#endif
Py_RETURN_NONE;
}
@@ -324,10 +312,8 @@ int initumath(PyObject *m)
PyModule_AddObject(m, "NZERO", PyFloat_FromDouble(NPY_NZERO));
PyModule_AddObject(m, "NAN", PyFloat_FromDouble(NPY_NAN));
-#if defined(NPY_PY3K)
s = PyDict_GetItemString(d, "true_divide");
PyDict_SetItemString(d, "divide", s);
-#endif
s = PyDict_GetItemString(d, "conjugate");
s2 = PyDict_GetItemString(d, "remainder");
diff --git a/numpy/core/tests/_locales.py b/numpy/core/tests/_locales.py
index 4739e6c10..ce7b81f00 100644
--- a/numpy/core/tests/_locales.py
+++ b/numpy/core/tests/_locales.py
@@ -43,7 +43,7 @@ def find_comma_decimal_point_locale():
return old_locale, new_locale
-class CommaDecimalPointLocale(object):
+class CommaDecimalPointLocale:
"""Sets LC_NUMERIC to a locale with comma as decimal point.
Classes derived from this class have setup and teardown methods that run
diff --git a/numpy/core/tests/test_abc.py b/numpy/core/tests/test_abc.py
index 4c5a6e42c..30e5748af 100644
--- a/numpy/core/tests/test_abc.py
+++ b/numpy/core/tests/test_abc.py
@@ -5,7 +5,7 @@ import numbers
import numpy as np
from numpy.core.numerictypes import sctypes
-class TestABC(object):
+class TestABC:
def test_abstract(self):
assert_(issubclass(np.number, numbers.Number))
diff --git a/numpy/core/tests/test_arrayprint.py b/numpy/core/tests/test_arrayprint.py
index d532c96f1..eac4647c9 100644
--- a/numpy/core/tests/test_arrayprint.py
+++ b/numpy/core/tests/test_arrayprint.py
@@ -10,7 +10,7 @@ from numpy.testing import (
)
import textwrap
-class TestArrayRepr(object):
+class TestArrayRepr:
def test_nan_inf(self):
x = np.array([np.nan, np.inf])
assert_equal(repr(x), 'array([nan, inf])')
@@ -158,7 +158,7 @@ class TestArrayRepr(object):
assert_equal(repr(arr_no_fields), 'array([(), (), (), ()], dtype=[])')
-class TestComplexArray(object):
+class TestComplexArray:
def test_str(self):
rvals = [0, 1, -1, np.inf, -np.inf, np.nan]
cvals = [complex(rp, ip) for rp in rvals for ip in rvals]
@@ -205,7 +205,7 @@ class TestComplexArray(object):
for res, val in zip(actual, wanted):
assert_equal(res, val)
-class TestArray2String(object):
+class TestArray2String:
def test_basic(self):
"""Basic test of array2string."""
a = np.arange(3)
@@ -411,7 +411,7 @@ class TestArray2String(object):
gc.enable()
assert_(r1 == r2)
-class TestPrintOptions(object):
+class TestPrintOptions:
"""Test getting and setting global print options."""
def setup(self):
@@ -854,7 +854,7 @@ def test_unicode_object_array():
assert_equal(repr(x), expected)
-class TestContextManager(object):
+class TestContextManager:
def test_ctx_mgr(self):
# test that context manager actuall works
with np.printoptions(precision=2):
diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py
index 4fb0bb916..438d52f97 100644
--- a/numpy/core/tests/test_datetime.py
+++ b/numpy/core/tests/test_datetime.py
@@ -22,7 +22,7 @@ except NameError:
RecursionError = RuntimeError # python < 3.5
-class TestDateTime(object):
+class TestDateTime:
def test_datetime_dtype_creation(self):
for unit in ['Y', 'M', 'W', 'D',
'h', 'm', 's', 'ms', 'us',
@@ -2366,7 +2366,7 @@ class TestDateTime(object):
assert limit_via_str == limit
-class TestDateTimeData(object):
+class TestDateTimeData:
def test_basic(self):
a = np.array(['1980-03-23'], dtype=np.datetime64)
diff --git a/numpy/core/tests/test_defchararray.py b/numpy/core/tests/test_defchararray.py
index 2bfd568b8..d2a1e86d2 100644
--- a/numpy/core/tests/test_defchararray.py
+++ b/numpy/core/tests/test_defchararray.py
@@ -10,7 +10,7 @@ from numpy.testing import (
kw_unicode_true = {'unicode': True} # make 2to3 work properly
kw_unicode_false = {'unicode': False}
-class TestBasic(object):
+class TestBasic:
def test_from_object_array(self):
A = np.array([['abc', 2],
['long ', '0123456789']], dtype='O')
@@ -81,7 +81,7 @@ class TestBasic(object):
assert_equal(A.itemsize, 4)
assert_(issubclass(A.dtype.type, np.unicode_))
-class TestVecString(object):
+class TestVecString:
def test_non_existent_method(self):
def fail():
@@ -132,7 +132,7 @@ class TestVecString(object):
assert_raises(ValueError, fail)
-class TestWhitespace(object):
+class TestWhitespace:
def setup(self):
self.A = np.array([['abc ', '123 '],
['789 ', 'xyz ']]).view(np.chararray)
@@ -147,7 +147,7 @@ class TestWhitespace(object):
assert_(not np.any(self.A < self.B))
assert_(not np.any(self.A != self.B))
-class TestChar(object):
+class TestChar:
def setup(self):
self.A = np.array('abc1', dtype='c').view(np.chararray)
@@ -155,7 +155,7 @@ class TestChar(object):
assert_equal(self.A.shape, (4,))
assert_equal(self.A.upper()[:2].tobytes(), b'AB')
-class TestComparisons(object):
+class TestComparisons:
def setup(self):
self.A = np.array([['abc', '123'],
['789', 'xyz']]).view(np.chararray)
@@ -196,7 +196,7 @@ class TestComparisonsMixed2(TestComparisons):
self.A = np.array([['abc', '123'],
['789', 'xyz']], np.unicode_).view(np.chararray)
-class TestInformation(object):
+class TestInformation:
def setup(self):
self.A = np.array([[' abc ', ''],
['12345', 'MixedCase'],
@@ -302,7 +302,7 @@ class TestInformation(object):
assert_raises(TypeError, fail)
-class TestMethods(object):
+class TestMethods:
def setup(self):
self.A = np.array([[' abc ', ''],
['12345', 'MixedCase'],
@@ -597,7 +597,7 @@ class TestMethods(object):
[False, False], [True, False], [False, False]])
-class TestOperations(object):
+class TestOperations:
def setup(self):
self.A = np.array([['abc', '123'],
['789', 'xyz']]).view(np.chararray)
diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py
index 050d3b0e2..7232b5949 100644
--- a/numpy/core/tests/test_deprecations.py
+++ b/numpy/core/tests/test_deprecations.py
@@ -25,7 +25,7 @@ except ImportError:
_has_pytz = False
-class _DeprecationTestCase(object):
+class _DeprecationTestCase:
# Just as warning: warnings uses re.match, so the start of this message
# must match.
message = ''
@@ -135,7 +135,7 @@ class _VisibleDeprecationTestCase(_DeprecationTestCase):
warning_cls = np.VisibleDeprecationWarning
-class TestNonTupleNDIndexDeprecation(object):
+class TestNonTupleNDIndexDeprecation:
def test_basic(self):
a = np.zeros((5, 5))
with warnings.catch_warnings():
@@ -187,7 +187,7 @@ class TestComparisonDeprecations(_DeprecationTestCase):
assert_warns(FutureWarning, lambda: a == [])
def test_void_dtype_equality_failures(self):
- class NotArray(object):
+ class NotArray:
def __array__(self):
raise TypeError
@@ -340,7 +340,7 @@ class TestNumericStyleTypecodes(_DeprecationTestCase):
args=(dt,))
-class TestTestDeprecated(object):
+class TestTestDeprecated:
def test_assert_deprecated(self):
test_case_instance = _DeprecationTestCase()
test_case_instance.setup()
diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py
index 9c279963d..1d24d8a3d 100644
--- a/numpy/core/tests/test_dtype.py
+++ b/numpy/core/tests/test_dtype.py
@@ -21,7 +21,7 @@ def assert_dtype_not_equal(a, b):
assert_(hash(a) != hash(b),
"two different types hash to the same value !")
-class TestBuiltin(object):
+class TestBuiltin:
@pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object,
np.compat.unicode])
def test_run(self, t):
@@ -144,7 +144,7 @@ class TestBuiltin(object):
'offsets': [4, 0]})
assert_equal(x == y, False)
-class TestRecord(object):
+class TestRecord:
def test_equivalent_record(self):
"""Test whether equivalent record dtypes hash the same."""
a = np.dtype([('yo', int)])
@@ -443,7 +443,7 @@ class TestRecord(object):
np.ones((1, 2), dtype=bool))
-class TestSubarray(object):
+class TestSubarray:
def test_single_subarray(self):
a = np.dtype((int, (2)))
b = np.dtype((int, (2,)))
@@ -519,7 +519,7 @@ class TestSubarray(object):
assert_(isinstance(dt['a'].shape, tuple))
#
- class IntLike(object):
+ class IntLike:
def __index__(self):
return 3
@@ -709,7 +709,7 @@ class TestStructuredObjectRefcounting:
assert after_repeat - after == count * 2 * 10
-class TestStructuredDtypeSparseFields(object):
+class TestStructuredDtypeSparseFields:
"""Tests subarray fields which contain sparse dtypes so that
not all memory is used by the dtype work. Such dtype's should
leave the underlying memory unchanged.
@@ -739,7 +739,7 @@ class TestStructuredDtypeSparseFields(object):
assert_array_equal(arr["a"]["aa"], np.zeros((3, 2, 3)))
-class TestMonsterType(object):
+class TestMonsterType:
"""Test deeply nested subtypes."""
def test1(self):
@@ -757,7 +757,7 @@ class TestMonsterType(object):
('yi', np.dtype((a, (3, 2))))])
assert_dtype_equal(c, d)
-class TestMetadata(object):
+class TestMetadata:
def test_no_metadata(self):
d = np.dtype(int)
assert_(d.metadata is None)
@@ -779,7 +779,7 @@ class TestMetadata(object):
d = np.dtype((np.void, np.dtype('i4,i4', metadata={'datum': 1})))
assert_(d.metadata == {'datum': 1})
-class TestString(object):
+class TestString:
def test_complex_dtype_str(self):
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
('rtile', '>f4', (64, 36))], (3,)),
@@ -932,7 +932,7 @@ class TestString(object):
assert_equal(dt.name, 'record16')
-class TestDtypeAttributeDeletion(object):
+class TestDtypeAttributeDeletion:
def test_dtype_non_writable_attributes_deletion(self):
dt = np.dtype(np.double)
@@ -950,7 +950,7 @@ class TestDtypeAttributeDeletion(object):
assert_raises(AttributeError, delattr, dt, s)
-class TestDtypeAttributes(object):
+class TestDtypeAttributes:
def test_descr_has_trailing_void(self):
# see gh-6359
dtype = np.dtype({
@@ -968,7 +968,7 @@ class TestDtypeAttributes(object):
assert_equal(np.dtype(user_def_subcls).name, 'user_def_subcls')
-class TestPickling(object):
+class TestPickling:
def check_pickling(self, dtype):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
@@ -1052,7 +1052,7 @@ def test_invalid_dtype_string():
assert_raises(TypeError, np.dtype, u'Fl\xfcgel')
-class TestFromDTypeAttribute(object):
+class TestFromDTypeAttribute:
def test_simple(self):
class dt:
dtype = "f8"
@@ -1096,7 +1096,7 @@ class TestFromDTypeAttribute(object):
with pytest.raises(RecursionError):
np.dtype(dt(1))
-class TestFromCTypes(object):
+class TestFromCTypes:
@staticmethod
def check(ctype, dtype):
diff --git a/numpy/core/tests/test_einsum.py b/numpy/core/tests/test_einsum.py
index 3d19edbfc..6ec61fb1d 100644
--- a/numpy/core/tests/test_einsum.py
+++ b/numpy/core/tests/test_einsum.py
@@ -12,7 +12,7 @@ sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3])
global_size_dict = dict(zip(chars, sizes))
-class TestEinsum(object):
+class TestEinsum:
def test_einsum_errors(self):
for do_opt in [True, False]:
# Need enough arguments
@@ -866,7 +866,7 @@ class TestEinsum(object):
self.optimize_compare('obk,ijk->ioj', operands=[g, g])
-class TestEinsumPath(object):
+class TestEinsumPath:
def build_operands(self, string, size_dict=global_size_dict):
# Builds views based off initial operands
diff --git a/numpy/core/tests/test_errstate.py b/numpy/core/tests/test_errstate.py
index 9e0993290..7c1780607 100644
--- a/numpy/core/tests/test_errstate.py
+++ b/numpy/core/tests/test_errstate.py
@@ -5,7 +5,7 @@ import numpy as np
from numpy.testing import assert_, assert_raises
-class TestErrstate(object):
+class TestErrstate:
@pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.")
def test_invalid(self):
with np.errstate(all='raise', under='ignore'):
diff --git a/numpy/core/tests/test_function_base.py b/numpy/core/tests/test_function_base.py
index 8d9ef0520..7e5ea1cc6 100644
--- a/numpy/core/tests/test_function_base.py
+++ b/numpy/core/tests/test_function_base.py
@@ -40,7 +40,7 @@ class PhysicalQuantity2(ndarray):
__array_priority__ = 10
-class TestLogspace(object):
+class TestLogspace:
def test_basic(self):
y = logspace(0, 6)
@@ -90,7 +90,7 @@ class TestLogspace(object):
assert_equal(ls, logspace(1.0, 7.0, 1))
-class TestGeomspace(object):
+class TestGeomspace:
def test_basic(self):
y = geomspace(1, 1e6)
@@ -220,7 +220,7 @@ class TestGeomspace(object):
assert_raises(ValueError, geomspace, 0, 0)
-class TestLinspace(object):
+class TestLinspace:
def test_basic(self):
y = linspace(0, 10)
@@ -307,7 +307,7 @@ class TestLinspace(object):
# Ensure that start/stop can be objects that implement
# __array_interface__ and are convertible to numeric scalars
- class Arrayish(object):
+ class Arrayish:
"""
A generic object that supports the __array_interface__ and hence
can in principle be converted to a numeric scalar, but is not
diff --git a/numpy/core/tests/test_getlimits.py b/numpy/core/tests/test_getlimits.py
index 598a481ed..bcf8cf659 100644
--- a/numpy/core/tests/test_getlimits.py
+++ b/numpy/core/tests/test_getlimits.py
@@ -9,37 +9,37 @@ from numpy.core.getlimits import _discovered_machar, _float_ma
##################################################
-class TestPythonFloat(object):
+class TestPythonFloat:
def test_singleton(self):
ftype = finfo(float)
ftype2 = finfo(float)
assert_equal(id(ftype), id(ftype2))
-class TestHalf(object):
+class TestHalf:
def test_singleton(self):
ftype = finfo(half)
ftype2 = finfo(half)
assert_equal(id(ftype), id(ftype2))
-class TestSingle(object):
+class TestSingle:
def test_singleton(self):
ftype = finfo(single)
ftype2 = finfo(single)
assert_equal(id(ftype), id(ftype2))
-class TestDouble(object):
+class TestDouble:
def test_singleton(self):
ftype = finfo(double)
ftype2 = finfo(double)
assert_equal(id(ftype), id(ftype2))
-class TestLongdouble(object):
+class TestLongdouble:
def test_singleton(self):
ftype = finfo(longdouble)
ftype2 = finfo(longdouble)
assert_equal(id(ftype), id(ftype2))
-class TestFinfo(object):
+class TestFinfo:
def test_basic(self):
dts = list(zip(['f2', 'f4', 'f8', 'c8', 'c16'],
[np.float16, np.float32, np.float64, np.complex64,
@@ -52,7 +52,7 @@ class TestFinfo(object):
getattr(finfo(dt2), attr), attr)
assert_raises(ValueError, finfo, 'i4')
-class TestIinfo(object):
+class TestIinfo:
def test_basic(self):
dts = list(zip(['i1', 'i2', 'i4', 'i8',
'u1', 'u2', 'u4', 'u8'],
@@ -69,7 +69,7 @@ class TestIinfo(object):
for T in types:
assert_equal(iinfo(T).max, T(-1))
-class TestRepr(object):
+class TestRepr:
def test_iinfo_repr(self):
expected = "iinfo(min=-32768, max=32767, dtype=int16)"
assert_equal(repr(np.iinfo(np.int16)), expected)
diff --git a/numpy/core/tests/test_half.py b/numpy/core/tests/test_half.py
index 7a12698e4..c6d046be1 100644
--- a/numpy/core/tests/test_half.py
+++ b/numpy/core/tests/test_half.py
@@ -16,7 +16,7 @@ def assert_raises_fpe(strmatch, callable, *args, **kwargs):
assert_(False,
"Did not raise floating point %s error" % strmatch)
-class TestHalf(object):
+class TestHalf:
def setup(self):
# An array of all possible float16 values
self.all_f16 = np.arange(0x10000, dtype=uint16)
diff --git a/numpy/core/tests/test_indexerrors.py b/numpy/core/tests/test_indexerrors.py
index 3fd76b91b..9d2433fc5 100644
--- a/numpy/core/tests/test_indexerrors.py
+++ b/numpy/core/tests/test_indexerrors.py
@@ -1,7 +1,7 @@
import numpy as np
from numpy.testing import assert_raises
-class TestIndexErrors(object):
+class TestIndexErrors:
'''Tests to exercise indexerrors not covered by other tests.'''
def test_arraytypes_fasttake(self):
diff --git a/numpy/core/tests/test_indexing.py b/numpy/core/tests/test_indexing.py
index 7f2b1dff4..237e381a7 100644
--- a/numpy/core/tests/test_indexing.py
+++ b/numpy/core/tests/test_indexing.py
@@ -9,11 +9,11 @@ from numpy.core._multiarray_tests import array_indexing
from itertools import product
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal, assert_warns,
- HAS_REFCOUNT, suppress_warnings,
+ HAS_REFCOUNT,
)
-class TestIndexing(object):
+class TestIndexing:
def test_index_no_floats(self):
a = np.array([[[5]]])
@@ -395,14 +395,14 @@ class TestIndexing(object):
def test_scalar_return_type(self):
# Full scalar indices should return scalars and object
# arrays should not call PyArray_Return on their items
- class Zero(object):
+ class Zero:
# The most basic valid indexing
def __index__(self):
return 0
z = Zero()
- class ArrayLike(object):
+ class ArrayLike:
# Simple array, should behave like the array
def __array__(self):
return np.array(0)
@@ -482,7 +482,7 @@ class TestIndexing(object):
# on item getting, this should not be converted to an nd-index (tuple)
# If this object happens to be a valid index otherwise, it should work
# This object here is very dubious and probably bad though:
- class SequenceLike(object):
+ class SequenceLike:
def __index__(self):
return 0
@@ -525,7 +525,7 @@ class TestIndexing(object):
arr[slices] = 10
assert_array_equal(arr, 10.)
-class TestFieldIndexing(object):
+class TestFieldIndexing:
def test_scalar_return_type(self):
# Field access on an array should return an array, even if it
# is 0-d.
@@ -534,7 +534,7 @@ class TestFieldIndexing(object):
assert_(isinstance(a[['a']], np.ndarray))
-class TestBroadcastedAssignments(object):
+class TestBroadcastedAssignments:
def assign(self, a, ind, val):
a[ind] = val
return a
@@ -585,7 +585,7 @@ class TestBroadcastedAssignments(object):
assert_((a[::-1] == v).all())
-class TestSubclasses(object):
+class TestSubclasses:
def test_basic(self):
# Test that indexing in various ways produces SubClass instances,
# and that the base is set up correctly: the original subclass
@@ -648,56 +648,8 @@ class TestSubclasses(object):
assert_array_equal(new_s.finalize_status, new_s)
assert_array_equal(new_s.old, s)
- @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
- def test_slice_decref_getsetslice(self):
- # See gh-10066, a temporary slice object should be discarted.
- # This test is only really interesting on Python 2 since
- # it goes through `__set/getslice__` here and can probably be
- # removed. Use 0:7 to make sure it is never None:7.
- class KeepIndexObject(np.ndarray):
- def __getitem__(self, indx):
- self.indx = indx
- if indx == slice(0, 7):
- raise ValueError
-
- def __setitem__(self, indx, val):
- self.indx = indx
- if indx == slice(0, 4):
- raise ValueError
- k = np.array([1]).view(KeepIndexObject)
- k[0:5]
- assert_equal(k.indx, slice(0, 5))
- assert_equal(sys.getrefcount(k.indx), 2)
- try:
- k[0:7]
- raise AssertionError
- except ValueError:
- # The exception holds a reference to the slice so clear on Py2
- if hasattr(sys, 'exc_clear'):
- with suppress_warnings() as sup:
- sup.filter(DeprecationWarning)
- sys.exc_clear()
- assert_equal(k.indx, slice(0, 7))
- assert_equal(sys.getrefcount(k.indx), 2)
-
- k[0:3] = 6
- assert_equal(k.indx, slice(0, 3))
- assert_equal(sys.getrefcount(k.indx), 2)
- try:
- k[0:4] = 2
- raise AssertionError
- except ValueError:
- # The exception holds a reference to the slice so clear on Py2
- if hasattr(sys, 'exc_clear'):
- with suppress_warnings() as sup:
- sup.filter(DeprecationWarning)
- sys.exc_clear()
- assert_equal(k.indx, slice(0, 4))
- assert_equal(sys.getrefcount(k.indx), 2)
-
-
-class TestFancyIndexingCast(object):
+class TestFancyIndexingCast:
def test_boolean_index_cast_assign(self):
# Setup the boolean index and float arrays.
shape = (8, 63)
@@ -719,7 +671,7 @@ class TestFancyIndexingCast(object):
zero_array.__setitem__, bool_index, np.array([1j]))
assert_equal(zero_array[0, 1], 0)
-class TestFancyIndexingEquivalence(object):
+class TestFancyIndexingEquivalence:
def test_object_assign(self):
# Check that the field and object special case using copyto is active.
# The right hand side cannot be converted to an array here.
@@ -767,7 +719,7 @@ class TestFancyIndexingEquivalence(object):
assert_array_equal(a, b[0])
-class TestMultiIndexingAutomated(object):
+class TestMultiIndexingAutomated:
"""
These tests use code to mimic the C-Code indexing for selection.
@@ -1189,7 +1141,7 @@ class TestMultiIndexingAutomated(object):
for index in self.complex_indices:
self._check_single_index(a, index)
-class TestFloatNonIntegerArgument(object):
+class TestFloatNonIntegerArgument:
"""
These test that ``TypeError`` is raised when you try to use
non-integers as arguments to for indexing and slicing e.g. ``a[0.0:5]``
@@ -1244,7 +1196,7 @@ class TestFloatNonIntegerArgument(object):
assert_raises(TypeError, np.min, d, (.2, 1.2))
-class TestBooleanIndexing(object):
+class TestBooleanIndexing:
# Using a boolean as integer argument/indexing is an error.
def test_bool_as_int_argument_errors(self):
a = np.array([[[1]]])
@@ -1265,7 +1217,7 @@ class TestBooleanIndexing(object):
assert_raises(IndexError, lambda: a[False, [0, 1], ...])
-class TestArrayToIndexDeprecation(object):
+class TestArrayToIndexDeprecation:
"""Creating an an index from array not 0-D is an error.
"""
@@ -1278,7 +1230,7 @@ class TestArrayToIndexDeprecation(object):
assert_raises(TypeError, np.take, a, [0], a)
-class TestNonIntegerArrayLike(object):
+class TestNonIntegerArrayLike:
"""Tests that array_likes only valid if can safely cast to integer.
For instance, lists give IndexError when they cannot be safely cast to
@@ -1295,7 +1247,7 @@ class TestNonIntegerArrayLike(object):
a.__getitem__([])
-class TestMultipleEllipsisError(object):
+class TestMultipleEllipsisError:
"""An index can only have a single ellipsis.
"""
@@ -1306,7 +1258,7 @@ class TestMultipleEllipsisError(object):
assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 3,))
-class TestCApiAccess(object):
+class TestCApiAccess:
def test_getitem(self):
subscript = functools.partial(array_indexing, 0)
diff --git a/numpy/core/tests/test_item_selection.py b/numpy/core/tests/test_item_selection.py
index b28b2d5da..cadd0d513 100644
--- a/numpy/core/tests/test_item_selection.py
+++ b/numpy/core/tests/test_item_selection.py
@@ -6,7 +6,7 @@ from numpy.testing import (
)
-class TestTake(object):
+class TestTake:
def test_simple(self):
a = [[1, 2], [3, 4]]
a_str = [[b'1', b'2'], [b'3', b'4']]
diff --git a/numpy/core/tests/test_longdouble.py b/numpy/core/tests/test_longdouble.py
index b1db252c3..bf12f0e1b 100644
--- a/numpy/core/tests/test_longdouble.py
+++ b/numpy/core/tests/test_longdouble.py
@@ -119,7 +119,7 @@ def test_fromstring_missing():
np.array([1]))
-class TestFileBased(object):
+class TestFileBased:
ldbl = 1 + LD_INFO.eps
tgt = np.array([ldbl]*5)
diff --git a/numpy/core/tests/test_machar.py b/numpy/core/tests/test_machar.py
index 43d5871a9..673f309f1 100644
--- a/numpy/core/tests/test_machar.py
+++ b/numpy/core/tests/test_machar.py
@@ -8,7 +8,7 @@ import numpy.core.numerictypes as ntypes
from numpy import errstate, array
-class TestMachAr(object):
+class TestMachAr:
def _run_machar_highprec(self):
# Instantiate MachAr instance with high enough precision to cause
# underflow
diff --git a/numpy/core/tests/test_mem_overlap.py b/numpy/core/tests/test_mem_overlap.py
index 876ac2455..c5115fa7e 100644
--- a/numpy/core/tests/test_mem_overlap.py
+++ b/numpy/core/tests/test_mem_overlap.py
@@ -562,7 +562,7 @@ def test_internal_overlap_fuzz():
def test_non_ndarray_inputs():
# Regression check for gh-5604
- class MyArray(object):
+ class MyArray:
def __init__(self, data):
self.data = data
@@ -570,7 +570,7 @@ def test_non_ndarray_inputs():
def __array_interface__(self):
return self.data.__array_interface__
- class MyArray2(object):
+ class MyArray2:
def __init__(self, data):
self.data = data
@@ -617,7 +617,7 @@ def assert_copy_equivalent(operation, args, out, **kwargs):
assert_equal(got, expected)
-class TestUFunc(object):
+class TestUFunc:
"""
Test ufunc call memory overlap handling
"""
diff --git a/numpy/core/tests/test_memmap.py b/numpy/core/tests/test_memmap.py
index b8d0651ca..bae7a318a 100644
--- a/numpy/core/tests/test_memmap.py
+++ b/numpy/core/tests/test_memmap.py
@@ -14,7 +14,7 @@ from numpy.testing import (
assert_, assert_equal, assert_array_equal, suppress_warnings
)
-class TestMemmap(object):
+class TestMemmap:
def setup(self):
self.tmpfp = NamedTemporaryFile(prefix='mmap')
self.tempdir = mkdtemp()
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index 647981835..85910886a 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -18,7 +18,6 @@ import gc
import weakref
import pytest
from contextlib import contextmanager
-from test.support import no_tracing
from numpy.compat import pickle
@@ -95,8 +94,28 @@ def _aligned_zeros(shape, dtype=float, order="C", align=None):
data.fill(0)
return data
+def _no_tracing(func):
+ """
+ Decorator to temporarily turn off tracing for the duration of a test.
+ Needed in tests that check refcounting, otherwise the tracing itself
+ influences the refcounts
+ """
+ if not hasattr(sys, 'gettrace'):
+ return func
+ else:
+ @functools.wraps(func)
+ def wrapper(*args, **kwargs):
+ original_trace = sys.gettrace()
+ try:
+ sys.settrace(None)
+ return func(*args, **kwargs)
+ finally:
+ sys.settrace(original_trace)
+ return wrapper
-class TestFlags(object):
+
+
+class TestFlags:
def setup(self):
self.a = np.arange(10)
@@ -264,7 +283,7 @@ class TestFlags(object):
assert_(a.flags.aligned)
-class TestHash(object):
+class TestHash:
# see #3793
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
@@ -286,7 +305,7 @@ class TestHash(object):
err_msg="%r: 2**%d - 1" % (ut, i))
-class TestAttributes(object):
+class TestAttributes:
def setup(self):
self.one = np.arange(10)
self.two = np.arange(20).reshape(4, 5)
@@ -327,17 +346,8 @@ class TestAttributes(object):
numpy_int = np.int_(0)
- if sys.version_info[0] >= 3:
- # On Py3k int_ should not inherit from int, because it's not
- # fixed-width anymore
- assert_equal(isinstance(numpy_int, int), False)
- else:
- # Otherwise, it should inherit from int...
- assert_equal(isinstance(numpy_int, int), True)
-
- # ... and fast-path checks on C-API level should also work
- from numpy.core._multiarray_tests import test_int_subclass
- assert_equal(test_int_subclass(numpy_int), True)
+ # int_ doesn't inherit from Python int, because it's not fixed-width
+ assert_(not isinstance(numpy_int, int))
def test_stridesattr(self):
x = self.one
@@ -419,7 +429,7 @@ class TestAttributes(object):
assert_array_equal(x['b'], [-2, -2])
-class TestArrayConstruction(object):
+class TestArrayConstruction:
def test_array(self):
d = np.ones(6)
r = np.array([d, d])
@@ -497,7 +507,7 @@ class TestArrayConstruction(object):
assert_(np.asfortranarray(d).flags.f_contiguous)
-class TestAssignment(object):
+class TestAssignment:
def test_assignment_broadcasting(self):
a = np.arange(6).reshape(2, 3)
@@ -563,7 +573,7 @@ class TestAssignment(object):
u = np.array([u'done'])
b = np.array([b'done'])
- class bad_sequence(object):
+ class bad_sequence:
def __getitem__(self): pass
def __len__(self): raise RuntimeError
@@ -614,7 +624,7 @@ class TestAssignment(object):
assert_equal(a[0], b"1.1234567890123457")
-class TestDtypedescr(object):
+class TestDtypedescr:
def test_construction(self):
d1 = np.dtype('i4')
assert_equal(d1, np.dtype(np.int32))
@@ -636,7 +646,7 @@ class TestDtypedescr(object):
"array([0, 0, 0, 0], dtype=(numpy.int32, [('a', '<i2'), ('b', '<i2')]))")
-class TestZeroRank(object):
+class TestZeroRank:
def setup(self):
self.d = np.array(0), np.array('x', object)
@@ -734,7 +744,7 @@ class TestZeroRank(object):
assert_equal(xi.flags.f_contiguous, True)
-class TestScalarIndexing(object):
+class TestScalarIndexing:
def setup(self):
self.d = np.array([0, 1])[0]
@@ -830,12 +840,12 @@ class TestScalarIndexing(object):
assert_equal(a, [0, 1, 0, 1, 2])
-class TestCreation(object):
+class TestCreation:
"""
Test the np.array constructor
"""
def test_from_attribute(self):
- class x(object):
+ class x:
def __array__(self, dtype=None):
pass
@@ -977,14 +987,14 @@ class TestCreation(object):
of an error in the Fail case.
"""
- class Fail(object):
+ class Fail:
def __len__(self):
return 1
def __getitem__(self, index):
raise ValueError()
- class Map(object):
+ class Map:
def __len__(self):
return 1
@@ -1022,7 +1032,7 @@ class TestCreation(object):
def test_failed_len_sequence(self):
# gh-7393
- class A(object):
+ class A:
def __init__(self, data):
self._data = data
def __getitem__(self, item):
@@ -1078,7 +1088,7 @@ class TestCreation(object):
assert_equal(a.dtype, object)
-class TestStructured(object):
+class TestStructured:
def test_subarray_field_access(self):
a = np.zeros((3, 5), dtype=[('a', ('i4', (2, 2)))])
a['a'] = np.arange(60).reshape(3, 5, 2, 2)
@@ -1366,7 +1376,7 @@ class TestStructured(object):
a[['b','c']] # no exception
-class TestBool(object):
+class TestBool:
def test_test_interning(self):
a0 = np.bool_(0)
b0 = np.bool_(False)
@@ -1456,7 +1466,7 @@ class TestBool(object):
self._test_cast_from_flexible(np.bytes_)
-class TestZeroSizeFlexible(object):
+class TestZeroSizeFlexible:
@staticmethod
def _zeros(shape, dtype=str):
dtype = np.dtype(dtype)
@@ -1543,7 +1553,7 @@ class TestZeroSizeFlexible(object):
assert array_from_buffer[0] == -1, array_from_buffer[0]
-class TestMethods(object):
+class TestMethods:
sort_kinds = ['quicksort', 'heapsort', 'stable']
@@ -1870,7 +1880,7 @@ class TestMethods(object):
# test generic class with bogus ordering,
# should not segfault.
- class Boom(object):
+ class Boom:
def __lt__(self, other):
return True
@@ -1898,7 +1908,7 @@ class TestMethods(object):
for kind in self.sort_kinds:
assert_raises(TypeError, arr.sort, kind=kind)
#gh-3879
- class Raiser(object):
+ class Raiser:
def raises_anything(*args, **kwargs):
raise TypeError("SOMETHING ERRORED")
__eq__ = __ne__ = __lt__ = __gt__ = __ge__ = __le__ = raises_anything
@@ -2142,6 +2152,8 @@ class TestMethods(object):
msg = "Test real searchsorted with nans, side='r'"
b = a.searchsorted(a, side='r')
assert_equal(b, np.arange(1, 4), msg)
+ # check keyword arguments
+ a.searchsorted(v=1)
# check double complex
a = np.zeros(9, dtype=np.complex128)
a.real += [0, 0, 1, 1, 0, 1, np.nan, np.nan, np.nan]
@@ -2940,7 +2952,7 @@ class TestMethods(object):
def test_dot_matmul_inner_array_casting_fails(self):
- class A(object):
+ class A:
def __array__(self, *args, **kwargs):
raise NotImplementedError
@@ -3301,12 +3313,12 @@ class TestMethods(object):
e = np.array(['1+1j'], 'U')
assert_raises(TypeError, complex, e)
-class TestCequenceMethods(object):
+class TestCequenceMethods:
def test_array_contains(self):
assert_(4.0 in np.arange(16.).reshape(4,4))
assert_(20.0 not in np.arange(16.).reshape(4,4))
-class TestBinop(object):
+class TestBinop:
def test_inplace(self):
# test refcount 1 inplace conversion
assert_array_almost_equal(np.array([0.5]) * np.array([1.0, 2.0]),
@@ -3531,7 +3543,7 @@ class TestBinop(object):
def test_ufunc_override_normalize_signature(self):
# gh-5674
- class SomeClass(object):
+ class SomeClass:
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
return kw
@@ -3549,7 +3561,7 @@ class TestBinop(object):
# Check that index is set appropriately, also if only an output
# is passed on (latter is another regression tests for github bug 4753)
# This also checks implicitly that 'out' is always a tuple.
- class CheckIndex(object):
+ class CheckIndex:
def __array_ufunc__(self, ufunc, method, *inputs, **kw):
for i, a in enumerate(inputs):
if a is self:
@@ -3637,7 +3649,7 @@ class TestBinop(object):
def test_pow_array_object_dtype(self):
# test pow on arrays of object dtype
- class SomeClass(object):
+ class SomeClass:
def __init__(self, num=None):
self.num = num
@@ -3678,7 +3690,7 @@ class TestBinop(object):
+tst
-class TestTemporaryElide(object):
+class TestTemporaryElide:
# elision is only triggered on relatively large arrays
def test_extension_incref_elide(self):
@@ -3780,7 +3792,7 @@ class TestTemporaryElide(object):
assert_equal(a, 1)
-class TestCAPI(object):
+class TestCAPI:
def test_IsPythonScalar(self):
from numpy.core._multiarray_tests import IsPythonScalar
assert_(IsPythonScalar(b'foobar'))
@@ -3790,7 +3802,7 @@ class TestCAPI(object):
assert_(IsPythonScalar("a"))
-class TestSubscripting(object):
+class TestSubscripting:
def test_test_zero_rank(self):
x = np.array([1, 2, 3])
assert_(isinstance(x[0], np.int_))
@@ -3799,7 +3811,7 @@ class TestSubscripting(object):
assert_(type(x[0, ...]) is np.ndarray)
-class TestPickling(object):
+class TestPickling:
@pytest.mark.skipif(pickle.HIGHEST_PROTOCOL >= 5,
reason=('this tests the error messages when trying to'
'protocol 5 although it is not available'))
@@ -3956,7 +3968,7 @@ class TestPickling(object):
assert_equal(original.dtype, new.dtype)
-class TestFancyIndexing(object):
+class TestFancyIndexing:
def test_list(self):
x = np.ones((1, 1))
x[:, [0]] = 2.0
@@ -4010,7 +4022,7 @@ class TestFancyIndexing(object):
assert_array_equal(x, np.array([[1, 10, 3, 4], [5, 6, 7, 8]]))
-class TestStringCompare(object):
+class TestStringCompare:
def test_string(self):
g1 = np.array(["This", "is", "example"])
g2 = np.array(["This", "was", "example"])
@@ -4042,7 +4054,7 @@ class TestStringCompare(object):
assert_array_equal(g1 > g2, [g1[i] > g2[i] for i in [0, 1, 2]])
-class TestArgmax(object):
+class TestArgmax:
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
@@ -4177,7 +4189,7 @@ class TestArgmax(object):
assert_equal(a.argmax(), 1)
-class TestArgmin(object):
+class TestArgmin:
nan_arr = [
([0, 1, 2, 3, np.nan], 4),
@@ -4326,7 +4338,7 @@ class TestArgmin(object):
assert_equal(a.argmin(), 1)
-class TestMinMax(object):
+class TestMinMax:
def test_scalar(self):
assert_raises(np.AxisError, np.amax, 1, 1)
@@ -4352,14 +4364,14 @@ class TestMinMax(object):
assert_equal(np.amax(a), a[3])
-class TestNewaxis(object):
+class TestNewaxis:
def test_basic(self):
sk = np.array([0, -0.1, 0.1])
res = 250*sk[:, np.newaxis]
assert_almost_equal(res.ravel(), 250*sk)
-class TestClip(object):
+class TestClip:
def _check_range(self, x, cmin, cmax):
assert_(np.all(x >= cmin))
assert_(np.all(x <= cmax))
@@ -4437,7 +4449,7 @@ class TestClip(object):
assert_array_equal(result, expected)
-class TestCompress(object):
+class TestCompress:
def test_axis(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
@@ -4460,7 +4472,7 @@ class TestCompress(object):
assert_equal(out, 1)
-class TestPutmask(object):
+class TestPutmask:
def tst_basic(self, x, T, mask, val):
np.putmask(x, mask, val)
assert_equal(x[mask], T(val))
@@ -4511,7 +4523,7 @@ class TestPutmask(object):
assert_equal(x, np.array([True, True, True, True]))
-class TestTake(object):
+class TestTake:
def tst_basic(self, x):
ind = list(range(x.shape[0]))
assert_array_equal(x.take(ind, axis=0), x)
@@ -4564,7 +4576,7 @@ class TestTake(object):
y = np.take(x, [1, 2, 3], out=x[2:5], mode='wrap')
assert_equal(y, np.array([1, 2, 3]))
-class TestLexsort(object):
+class TestLexsort:
@pytest.mark.parametrize('dtype',[
np.uint8, np.uint16, np.uint32, np.uint64,
np.int8, np.int16, np.int32, np.int64,
@@ -4619,7 +4631,7 @@ class TestLexsort(object):
x = np.linspace(0., 1., 42*3).reshape(42, 3)
assert_raises(np.AxisError, np.lexsort, x, axis=2)
-class TestIO(object):
+class TestIO:
"""Test tofile, fromfile, tobytes, and fromstring"""
def setup(self):
@@ -5009,7 +5021,7 @@ class TestIO(object):
assert_array_equal(x, res)
-class TestFromBuffer(object):
+class TestFromBuffer:
@pytest.mark.parametrize('byteorder', ['<', '>'])
@pytest.mark.parametrize('dtype', [float, int, complex])
def test_basic(self, byteorder, dtype):
@@ -5022,7 +5034,7 @@ class TestFromBuffer(object):
assert_array_equal(np.frombuffer(b''), np.array([]))
-class TestFlat(object):
+class TestFlat:
def setup(self):
a0 = np.arange(20.0)
a = a0.reshape(4, 5)
@@ -5094,9 +5106,9 @@ class TestFlat(object):
assert_(abs(sys.getrefcount(indtype) - rc_indtype) < 50)
-class TestResize(object):
+class TestResize:
- @no_tracing
+ @_no_tracing
def test_basic(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
if IS_PYPY:
@@ -5113,7 +5125,7 @@ class TestResize(object):
assert_raises(ValueError, x.resize, (5, 1))
del y # avoid pyflakes unused variable warning.
- @no_tracing
+ @_no_tracing
def test_int_shape(self):
x = np.eye(3)
if IS_PYPY:
@@ -5147,7 +5159,7 @@ class TestResize(object):
assert_raises(TypeError, np.eye(3).resize, order=1)
assert_raises(TypeError, np.eye(3).resize, refcheck='hi')
- @no_tracing
+ @_no_tracing
def test_freeform_shape(self):
x = np.eye(3)
if IS_PYPY:
@@ -5156,7 +5168,7 @@ class TestResize(object):
x.resize(3, 2, 1)
assert_(x.shape == (3, 2, 1))
- @no_tracing
+ @_no_tracing
def test_zeros_appended(self):
x = np.eye(3)
if IS_PYPY:
@@ -5166,7 +5178,7 @@ class TestResize(object):
assert_array_equal(x[0], np.eye(3))
assert_array_equal(x[1], np.zeros((3, 3)))
- @no_tracing
+ @_no_tracing
def test_obj_obj(self):
# check memory is initialized on resize, gh-4857
a = np.ones(10, dtype=[('k', object, 2)])
@@ -5193,7 +5205,7 @@ class TestResize(object):
del xref # avoid pyflakes unused variable warning.
-class TestRecord(object):
+class TestRecord:
def test_field_rename(self):
dt = np.dtype([('f', float), ('i', int)])
dt.names = ['p', 'q']
@@ -5355,7 +5367,7 @@ class TestRecord(object):
v[:] = (4,5)
assert_equal(a[0].item(), (4, 1, 5))
-class TestView(object):
+class TestView:
def test_basic(self):
x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)],
dtype=[('r', np.int8), ('g', np.int8),
@@ -5380,7 +5392,7 @@ def _std(a, **args):
return a.std(**args)
-class TestStats(object):
+class TestStats:
funcs = [_mean, _var, _std]
@@ -5587,7 +5599,7 @@ class TestStats(object):
res = dat.var(1)
assert_(res.info == dat.info)
-class TestVdot(object):
+class TestVdot:
def test_basic(self):
dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger']
dt_complex = np.typecodes['Complex']
@@ -5647,7 +5659,7 @@ class TestVdot(object):
np.vdot(a.flatten(), b.flatten()))
-class TestDot(object):
+class TestDot:
def setup(self):
np.random.seed(128)
self.A = np.random.rand(4, 2)
@@ -5749,7 +5761,7 @@ class TestDot(object):
assert_almost_equal(res, tgt, decimal=self.N)
def test_vecobject(self):
- class Vec(object):
+ class Vec:
def __init__(self, sequence=None):
if sequence is None:
sequence = []
@@ -5918,7 +5930,7 @@ class TestDot(object):
assert_dot_close(A_f_12, X_f_2, desired)
-class MatmulCommon(object):
+class MatmulCommon:
"""Common tests for '@' operator and numpy.matmul.
"""
@@ -6300,7 +6312,7 @@ if sys.version_info[:2] >= (3, 5):
def test_array_priority_override(self):
- class A(object):
+ class A:
__array_priority__ = 1000
def __matmul__(self, other):
@@ -6345,7 +6357,7 @@ if sys.version_info[:2] >= (3, 5):
assert f.shape == (4, 5)
-class TestInner(object):
+class TestInner:
def test_inner_type_mismatch(self):
c = 1.
@@ -6423,7 +6435,7 @@ class TestInner(object):
assert_equal(np.inner(b, a).transpose(2,3,0,1), desired)
-class TestAlen(object):
+class TestAlen:
def test_basic(self):
with pytest.warns(DeprecationWarning):
m = np.array([1, 2, 3])
@@ -6443,7 +6455,7 @@ class TestAlen(object):
assert_equal(np.alen(5), 1)
-class TestChoose(object):
+class TestChoose:
def setup(self):
self.x = 2*np.ones((3,), dtype=int)
self.y = 3*np.ones((3,), dtype=int)
@@ -6473,7 +6485,7 @@ class TestChoose(object):
assert(np.choose([0], ops).dtype == expected_dt)
-class TestRepeat(object):
+class TestRepeat:
def setup(self):
self.m = np.array([1, 2, 3, 4, 5, 6])
self.m_rect = self.m.reshape((2, 3))
@@ -6515,7 +6527,7 @@ NEIGH_MODE = {'zero': 0, 'one': 1, 'constant': 2, 'circular': 3, 'mirror': 4}
@pytest.mark.parametrize('dt', [float, Decimal], ids=['float', 'object'])
-class TestNeighborhoodIter(object):
+class TestNeighborhoodIter:
# Simple, 2d tests
def test_simple2d(self, dt):
# Test zero and one padding for simple data type
@@ -6594,7 +6606,7 @@ class TestNeighborhoodIter(object):
# Test stacking neighborhood iterators
-class TestStackedNeighborhoodIter(object):
+class TestStackedNeighborhoodIter:
# Simple, 1d test: stacking 2 constant-padded neigh iterators
def test_simple_const(self):
dt = np.float64
@@ -6744,7 +6756,7 @@ class TestStackedNeighborhoodIter(object):
x, [1, 1], NEIGH_MODE['zero'], [-1, 2], NEIGH_MODE['circular'])
assert_array_equal(l, r)
-class TestWarnings(object):
+class TestWarnings:
def test_complex_warning(self):
x = np.array([1, 2])
@@ -6756,7 +6768,7 @@ class TestWarnings(object):
assert_equal(x, [1, 2])
-class TestMinScalarType(object):
+class TestMinScalarType:
def test_usigned_shortshort(self):
dt = np.min_scalar_type(2**8-1)
@@ -6787,7 +6799,7 @@ class TestMinScalarType(object):
from numpy.core._internal import _dtype_from_pep3118
-class TestPEP3118Dtype(object):
+class TestPEP3118Dtype:
def _check(self, spec, wanted):
dt = np.dtype(wanted)
actual = _dtype_from_pep3118(spec)
@@ -6894,7 +6906,7 @@ class TestPEP3118Dtype(object):
self._check('i:f0:', [('f0', 'i')])
-class TestNewBufferProtocol(object):
+class TestNewBufferProtocol:
""" Test PEP3118 buffers """
def _check_roundtrip(self, obj):
@@ -7291,7 +7303,7 @@ class TestNewBufferProtocol(object):
assert_equal(arr['a'], 3)
-class TestArrayAttributeDeletion(object):
+class TestArrayAttributeDeletion:
def test_multiarray_writable_attributes_deletion(self):
# ticket #2046, should not seqfault, raise AttributeError
@@ -7326,7 +7338,7 @@ class TestArrayAttributeDeletion(object):
class TestArrayInterface():
- class Foo(object):
+ class Foo:
def __init__(self, value):
self.value = value
self.iface = {'typestr': 'f8'}
@@ -7371,7 +7383,7 @@ class TestArrayInterface():
assert_equal(pre_cnt, post_cnt)
def test_interface_no_shape():
- class ArrayLike(object):
+ class ArrayLike:
array = np.array(1)
__array_interface__ = array.__array_interface__
assert_equal(np.array(ArrayLike()), 1)
@@ -7393,7 +7405,7 @@ def test_array_interface_empty_shape():
interface1 = dict(arr.__array_interface__)
interface1['shape'] = ()
- class DummyArray1(object):
+ class DummyArray1:
__array_interface__ = interface1
# NOTE: Because Py2 str/Py3 bytes supports the buffer interface, setting
@@ -7403,7 +7415,7 @@ def test_array_interface_empty_shape():
interface2 = dict(interface1)
interface2['data'] = arr[0].tobytes()
- class DummyArray2(object):
+ class DummyArray2:
__array_interface__ = interface2
arr1 = np.asarray(DummyArray1())
@@ -7420,7 +7432,7 @@ def test_array_interface_offset():
interface['offset'] = 4
- class DummyArray(object):
+ class DummyArray:
__array_interface__ = interface
arr1 = np.asarray(DummyArray())
@@ -7442,7 +7454,7 @@ def test_scalar_element_deletion():
assert_raises(ValueError, a[0].__delitem__, 'x')
-class TestMemEventHook(object):
+class TestMemEventHook:
def test_mem_seteventhook(self):
# The actual tests are within the C code in
# multiarray/_multiarray_tests.c.src
@@ -7454,7 +7466,7 @@ class TestMemEventHook(object):
break_cycles()
_multiarray_tests.test_pydatamem_seteventhook_end()
-class TestMapIter(object):
+class TestMapIter:
def test_mapiter(self):
# The actual tests are within the C code in
# multiarray/_multiarray_tests.c.src
@@ -7476,7 +7488,7 @@ class TestMapIter(object):
assert_equal(b, [100.1, 51., 6., 3., 4., 5.])
-class TestAsCArray(object):
+class TestAsCArray:
def test_1darray(self):
array = np.arange(24, dtype=np.double)
from_c = _multiarray_tests.test_as_c_array(array, 3)
@@ -7493,7 +7505,7 @@ class TestAsCArray(object):
assert_equal(array[1, 2, 3], from_c)
-class TestConversion(object):
+class TestConversion:
def test_array_scalar_relational_operation(self):
# All integer
for dt1 in np.typecodes['AllInteger']:
@@ -7541,7 +7553,7 @@ class TestConversion(object):
assert_equal(bool(np.array([[42]])), True)
assert_raises(ValueError, bool, np.array([1, 2]))
- class NotConvertible(object):
+ class NotConvertible:
def __bool__(self):
raise NotImplementedError
__nonzero__ = __bool__ # python 2
@@ -7578,7 +7590,7 @@ class TestConversion(object):
assert_equal(3, int_func(np.array(HasTrunc())))
assert_equal(3, int_func(np.array([HasTrunc()])))
- class NotConvertible(object):
+ class NotConvertible:
def __int__(self):
raise NotImplementedError
assert_raises(NotImplementedError,
@@ -7587,7 +7599,7 @@ class TestConversion(object):
int_func, np.array([NotConvertible()]))
-class TestWhere(object):
+class TestWhere:
def test_basic(self):
dts = [bool, np.int16, np.int32, np.int64, np.double, np.complex128,
np.longdouble, np.clongdouble]
@@ -7754,7 +7766,7 @@ class TestWhere(object):
if not IS_PYPY:
# sys.getsizeof() is not valid on PyPy
- class TestSizeOf(object):
+ class TestSizeOf:
def test_empty_array(self):
x = np.array([])
@@ -7787,7 +7799,7 @@ if not IS_PYPY:
d = np.ones(100)
assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy()))
- @no_tracing
+ @_no_tracing
def test_resize(self):
d = np.ones(100)
old = sys.getsizeof(d)
@@ -7801,7 +7813,7 @@ if not IS_PYPY:
assert_raises(TypeError, d.__sizeof__, "a")
-class TestHashing(object):
+class TestHashing:
def test_arrays_not_hashable(self):
x = np.ones(3)
@@ -7812,7 +7824,7 @@ class TestHashing(object):
assert_(not isinstance(x, collections_abc.Hashable))
-class TestArrayPriority(object):
+class TestArrayPriority:
# This will go away when __array_priority__ is settled, meanwhile
# it serves to check unintended changes.
op = operator
@@ -7839,7 +7851,7 @@ class TestArrayPriority(object):
def __new__(cls, *args, **kwargs):
return np.array(*args, **kwargs).view(cls)
- class Other(object):
+ class Other:
__array_priority__ = 1000.
def _all(self, other):
@@ -7898,7 +7910,7 @@ class TestArrayPriority(object):
assert_(isinstance(f(b, a), self.Other), msg)
-class TestBytestringArrayNonzero(object):
+class TestBytestringArrayNonzero:
def test_empty_bstring_array_is_falsey(self):
assert_(not np.array([''], dtype=str))
@@ -7919,7 +7931,7 @@ class TestBytestringArrayNonzero(object):
assert_(a)
-class TestUnicodeArrayNonzero(object):
+class TestUnicodeArrayNonzero:
def test_empty_ustring_array_is_falsey(self):
assert_(not np.array([''], dtype=np.unicode_))
@@ -7940,7 +7952,7 @@ class TestUnicodeArrayNonzero(object):
assert_(a)
-class TestFormat(object):
+class TestFormat:
def test_0d(self):
a = np.array(np.pi)
@@ -7965,7 +7977,7 @@ class TestFormat(object):
from numpy.testing import IS_PYPY
-class TestCTypes(object):
+class TestCTypes:
def test_ctypes_is_available(self):
test_arr = np.array([[1, 2, 3], [4, 5, 6]])
@@ -8057,7 +8069,7 @@ class TestCTypes(object):
assert_(arr_ref() is None, "unknowable whether ctypes pointer holds a reference")
-class TestWritebackIfCopy(object):
+class TestWritebackIfCopy:
# all these tests use the WRITEBACKIFCOPY mechanism
def test_argmax_with_out(self):
mat = np.eye(5)
@@ -8170,7 +8182,7 @@ class TestWritebackIfCopy(object):
assert_equal(arr, orig)
-class TestArange(object):
+class TestArange:
def test_infinite(self):
assert_raises_regex(
ValueError, "size exceeded",
@@ -8192,7 +8204,7 @@ class TestArange(object):
assert_raises(ZeroDivisionError, np.arange, 0.0, 0.0, 0.0)
-class TestArrayFinalize(object):
+class TestArrayFinalize:
""" Tests __array_finalize__ """
def test_receives_base(self):
@@ -8212,7 +8224,7 @@ class TestArrayFinalize(object):
raise Exception(self)
# a plain object can't be weakref'd
- class Dummy(object): pass
+ class Dummy: pass
# get a weak reference to an object within an array
obj_arr = np.array(Dummy())
@@ -8221,9 +8233,6 @@ class TestArrayFinalize(object):
# get an array that crashed in __array_finalize__
with assert_raises(Exception) as e:
obj_arr.view(RaisesInFinalize)
- if sys.version_info.major == 2:
- # prevent an extra reference being kept
- sys.exc_clear()
obj_subarray = e.exception.args[0]
del e
@@ -8252,7 +8261,7 @@ def test_equal_override():
# gh-9153: ndarray.__eq__ uses special logic for structured arrays, which
# did not respect overrides with __array_priority__ or __array_ufunc__.
# The PR fixed this for __array_priority__ and __array_ufunc__ = None.
- class MyAlwaysEqual(object):
+ class MyAlwaysEqual:
def __eq__(self, other):
return "eq"
@@ -8350,7 +8359,7 @@ def test_uintalignment_and_alignment():
dst = np.zeros((2,2), dtype='c8')
dst[:,1] = src[:,1] # assert in lowlevel_strided_loops fails?
-class TestAlignment(object):
+class TestAlignment:
# adapted from scipy._lib.tests.test__util.test__aligned_zeros
# Checks that unusual memory alignments don't trip up numpy.
# In particular, check RELAXED_STRIDES don't trip alignment assertions in
diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py
index 267076851..24272bb0d 100644
--- a/numpy/core/tests/test_nditer.py
+++ b/numpy/core/tests/test_nditer.py
@@ -2186,7 +2186,7 @@ def test_iter_no_broadcast():
[['readonly'], ['readonly'], ['readonly', 'no_broadcast']])
-class TestIterNested(object):
+class TestIterNested:
def test_basic(self):
# Test nested iteration basic usage
diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py
index 8ad460a09..934f0a2fd 100644
--- a/numpy/core/tests/test_numeric.py
+++ b/numpy/core/tests/test_numeric.py
@@ -15,7 +15,7 @@ from numpy.testing import (
)
-class TestResize(object):
+class TestResize:
def test_copies(self):
A = np.array([[1, 2], [3, 4]])
Ar1 = np.array([[1, 2, 3, 4], [1, 2, 3, 4]])
@@ -47,7 +47,7 @@ class TestResize(object):
assert_equal(A.dtype, Ar.dtype)
-class TestNonarrayArgs(object):
+class TestNonarrayArgs:
# check that non-array arguments to functions wrap them in arrays
def test_choose(self):
choices = [[0, 1, 2],
@@ -218,7 +218,7 @@ class TestNonarrayArgs(object):
B[0] = 1j
assert_almost_equal(np.var(B), 0.25)
-class TestIsscalar(object):
+class TestIsscalar:
def test_isscalar(self):
assert_(np.isscalar(3.1))
assert_(np.isscalar(np.int16(12345)))
@@ -234,7 +234,7 @@ class TestIsscalar(object):
assert_(np.isscalar(Number()))
-class TestBoolScalar(object):
+class TestBoolScalar:
def test_logical(self):
f = np.False_
t = np.True_
@@ -267,7 +267,7 @@ class TestBoolScalar(object):
assert_((f ^ f) is f)
-class TestBoolArray(object):
+class TestBoolArray:
def setup(self):
# offset for simd tests
self.t = np.array([True] * 41, dtype=bool)[1::]
@@ -354,7 +354,7 @@ class TestBoolArray(object):
assert_array_equal(self.im ^ False, self.im)
-class TestBoolCmp(object):
+class TestBoolCmp:
def setup(self):
self.f = np.ones(256, dtype=np.float32)
self.ef = np.ones(self.f.size, dtype=bool)
@@ -454,7 +454,7 @@ class TestBoolCmp(object):
assert_array_equal(np.signbit(self.signd[i:]), self.ed[i:])
-class TestSeterr(object):
+class TestSeterr:
def test_default(self):
err = np.geterr()
assert_equal(err,
@@ -535,7 +535,7 @@ class TestSeterr(object):
np.seterrobj(olderrobj)
-class TestFloatExceptions(object):
+class TestFloatExceptions:
def assert_raises_fpe(self, fpeerr, flop, x, y):
ftype = type(x)
try:
@@ -631,7 +631,7 @@ class TestFloatExceptions(object):
assert_("underflow" in str(w[-1].message))
-class TestTypes(object):
+class TestTypes:
def check_promotion_cases(self, promote_func):
# tests that the scalars get coerced correctly.
b = np.bool_(0)
@@ -952,7 +952,7 @@ class NIterError(Exception):
pass
-class TestFromiter(object):
+class TestFromiter:
def makegen(self):
for x in range(24):
yield x**2
@@ -1003,7 +1003,7 @@ class TestFromiter(object):
self.load_data(count, eindex), dtype=int, count=count)
-class TestNonzero(object):
+class TestNonzero:
def test_nonzero_trivial(self):
assert_equal(np.count_nonzero(np.array([])), 0)
assert_equal(np.count_nonzero(np.array([], dtype='?')), 0)
@@ -1286,7 +1286,7 @@ class TestNonzero(object):
assert_raises(ValueError, np.nonzero, a)
-class TestIndex(object):
+class TestIndex:
def test_boolean(self):
a = rand(3, 5, 8)
V = rand(5, 8)
@@ -1303,7 +1303,7 @@ class TestIndex(object):
assert_equal(c.dtype, np.dtype('int32'))
-class TestBinaryRepr(object):
+class TestBinaryRepr:
def test_zero(self):
assert_equal(np.binary_repr(0), '0')
@@ -1345,7 +1345,7 @@ class TestBinaryRepr(object):
'11' + '0'*62)
-class TestBaseRepr(object):
+class TestBaseRepr:
def test_base3(self):
assert_equal(np.base_repr(3**5, 3), '100000')
@@ -1367,7 +1367,7 @@ class TestBaseRepr(object):
np.base_repr(1, 37)
-class TestArrayComparisons(object):
+class TestArrayComparisons:
def test_array_equal(self):
res = np.array_equal(np.array([1, 2]), np.array([1, 2]))
assert_(res)
@@ -1446,7 +1446,7 @@ def assert_array_strict_equal(x, y):
assert_(x.dtype.isnative == y.dtype.isnative)
-class TestClip(object):
+class TestClip:
def setup(self):
self.nr = 5
self.nc = 3
@@ -2020,7 +2020,7 @@ class TestClip(object):
assert_equal(actual, expected)
-class TestAllclose(object):
+class TestAllclose:
rtol = 1e-5
atol = 1e-8
@@ -2105,7 +2105,7 @@ class TestAllclose(object):
assert_(type(np.allclose(a, a)) is bool)
-class TestIsclose(object):
+class TestIsclose:
rtol = 1e-5
atol = 1e-8
@@ -2243,7 +2243,7 @@ class TestIsclose(object):
assert_(type(np.isclose(0, np.inf)) is np.bool_)
-class TestStdVar(object):
+class TestStdVar:
def setup(self):
self.A = np.array([1, -1, 1, -1])
self.real_var = 1
@@ -2282,7 +2282,7 @@ class TestStdVar(object):
assert_array_equal(r, out)
-class TestStdVarComplex(object):
+class TestStdVarComplex:
def test_basic(self):
A = np.array([1, 1.j, -1, -1.j])
real_var = 1
@@ -2294,7 +2294,7 @@ class TestStdVarComplex(object):
assert_equal(np.std(1j), 0)
-class TestCreationFuncs(object):
+class TestCreationFuncs:
# Test ones, zeros, empty and full.
def setup(self):
@@ -2365,7 +2365,7 @@ class TestCreationFuncs(object):
assert_(sys.getrefcount(dim) == beg)
-class TestLikeFuncs(object):
+class TestLikeFuncs:
'''Test ones_like, zeros_like, empty_like and full_like'''
def setup(self):
@@ -2515,7 +2515,7 @@ class TestLikeFuncs(object):
self.check_like_function(np.full_like, np.inf, True)
-class TestCorrelate(object):
+class TestCorrelate:
def _setup(self, dt):
self.x = np.array([1, 2, 3, 4, 5], dtype=dt)
self.xs = np.arange(1, 20)[::3]
@@ -2571,7 +2571,7 @@ class TestCorrelate(object):
with pytest.raises(ValueError):
np.correlate(np.ones(1000), np.array([]), mode='full')
-class TestConvolve(object):
+class TestConvolve:
def test_object(self):
d = [1.] * 100
k = [1.] * 3
@@ -2585,7 +2585,7 @@ class TestConvolve(object):
assert_array_equal(k, np.ones(3))
-class TestArgwhere(object):
+class TestArgwhere:
@pytest.mark.parametrize('nd', [0, 1, 2])
def test_nd(self, nd):
@@ -2622,7 +2622,7 @@ class TestArgwhere(object):
assert_equal(np.argwhere([4, 0, 2, 1, 3]), [[0], [2], [3], [4]])
-class TestStringFunction(object):
+class TestStringFunction:
def test_set_string_function(self):
a = np.array([1])
@@ -2637,7 +2637,7 @@ class TestStringFunction(object):
assert_equal(str(a), "[1]")
-class TestRoll(object):
+class TestRoll:
def test_roll1d(self):
x = np.arange(10)
xr = np.roll(x, 2)
@@ -2695,7 +2695,7 @@ class TestRoll(object):
assert_equal(np.roll(x, 1), np.array([]))
-class TestRollaxis(object):
+class TestRollaxis:
# expected shape indexed by (axis, start) for array of
# shape (1, 2, 3, 4)
@@ -2757,7 +2757,7 @@ class TestRollaxis(object):
assert_(not res.flags['OWNDATA'])
-class TestMoveaxis(object):
+class TestMoveaxis:
def test_move_to_end(self):
x = np.random.randn(5, 6, 7)
for source, expected in [(0, (6, 7, 5)),
@@ -2831,7 +2831,7 @@ class TestMoveaxis(object):
assert_(isinstance(result, np.ndarray))
-class TestCross(object):
+class TestCross:
def test_2x2(self):
u = [1, 2]
v = [3, 4]
@@ -2920,7 +2920,7 @@ def test_outer_out_param():
assert_equal(np.outer(arr2, arr3, out2), out2)
-class TestIndices(object):
+class TestIndices:
def test_simple(self):
[x, y] = np.indices((4, 3))
@@ -2961,7 +2961,7 @@ class TestIndices(object):
assert_(arr.dtype == dtype)
-class TestRequire(object):
+class TestRequire:
flag_names = ['C', 'C_CONTIGUOUS', 'CONTIGUOUS',
'F', 'F_CONTIGUOUS', 'FORTRAN',
'A', 'ALIGNED',
@@ -3035,7 +3035,7 @@ class TestRequire(object):
self.set_and_check_flag(flag, None, a)
-class TestBroadcast(object):
+class TestBroadcast:
def test_broadcast_in_args(self):
# gh-5881
arrs = [np.empty((6, 7)), np.empty((5, 6, 1)), np.empty((7,)),
@@ -3086,7 +3086,7 @@ class TestBroadcast(object):
assert_raises(ValueError, np.broadcast, 1, **{'x': 1})
-class TestKeepdims(object):
+class TestKeepdims:
class sub_array(np.ndarray):
def sum(self, axis=None, dtype=None, out=None):
@@ -3098,7 +3098,7 @@ class TestKeepdims(object):
assert_raises(TypeError, np.sum, x, keepdims=True)
-class TestTensordot(object):
+class TestTensordot:
def test_zero_dimension(self):
# Test resolution to issue #5663
diff --git a/numpy/core/tests/test_numerictypes.py b/numpy/core/tests/test_numerictypes.py
index 439aa3ff9..7105154ed 100644
--- a/numpy/core/tests/test_numerictypes.py
+++ b/numpy/core/tests/test_numerictypes.py
@@ -98,7 +98,7 @@ def normalize_descr(descr):
# Creation tests
############################################################
-class CreateZeros(object):
+class CreateZeros:
"""Check the creation of heterogeneous arrays zero-valued"""
def test_zeros0D(self):
@@ -141,7 +141,7 @@ class TestCreateZerosNested(CreateZeros):
_descr = Ndescr
-class CreateValues(object):
+class CreateValues:
"""Check the creation of heterogeneous arrays with values"""
def test_tuple(self):
@@ -201,7 +201,7 @@ class TestCreateValuesNestedMultiple(CreateValues):
# Reading tests
############################################################
-class ReadValuesPlain(object):
+class ReadValuesPlain:
"""Check the reading of values in heterogeneous arrays (plain)"""
def test_access_fields(self):
@@ -233,7 +233,7 @@ class TestReadValuesPlainMultiple(ReadValuesPlain):
multiple_rows = 1
_buffer = PbufferT
-class ReadValuesNested(object):
+class ReadValuesNested:
"""Check the reading of values in heterogeneous arrays (nested)"""
def test_access_top_fields(self):
@@ -331,14 +331,14 @@ class TestReadValuesNestedMultiple(ReadValuesNested):
multiple_rows = True
_buffer = NbufferT
-class TestEmptyField(object):
+class TestEmptyField:
def test_assign(self):
a = np.arange(10, dtype=np.float32)
a.dtype = [("int", "<0i4"), ("float", "<2f4")]
assert_(a['int'].shape == (5, 0))
assert_(a['float'].shape == (5, 2))
-class TestCommonType(object):
+class TestCommonType:
def test_scalar_loses1(self):
res = np.find_common_type(['f4', 'f4', 'i2'], ['f8'])
assert_(res == 'f4')
@@ -359,7 +359,7 @@ class TestCommonType(object):
res = np.find_common_type(['u8', 'i8', 'i8'], ['f8'])
assert_(res == 'f8')
-class TestMultipleFields(object):
+class TestMultipleFields:
def setup(self):
self.ary = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype='i4,f4,i2,c8')
@@ -374,7 +374,7 @@ class TestMultipleFields(object):
assert_(res == [(1, 3), (5, 7)])
-class TestIsSubDType(object):
+class TestIsSubDType:
# scalar types can be promoted into dtypes
wrappers = [np.dtype, lambda x: x]
@@ -405,18 +405,18 @@ class TestIsSubDType(object):
assert_(not np.issubdtype(w1(np.float64), w2(np.float32)))
-class TestSctypeDict(object):
+class TestSctypeDict:
def test_longdouble(self):
assert_(np.sctypeDict['f8'] is not np.longdouble)
assert_(np.sctypeDict['c16'] is not np.clongdouble)
-class TestBitName(object):
+class TestBitName:
def test_abstract(self):
assert_raises(ValueError, np.core.numerictypes.bitname, np.floating)
-class TestMaximumSctype(object):
+class TestMaximumSctype:
# note that parametrizing with sctype['int'] and similar would skip types
# with the same size (gh-11923)
@@ -442,7 +442,7 @@ class TestMaximumSctype(object):
assert_equal(np.maximum_sctype(t), t)
-class Test_sctype2char(object):
+class Test_sctype2char:
# This function is old enough that we're really just documenting the quirks
# at this point.
@@ -490,7 +490,7 @@ def test_issctype(rep, expected):
@pytest.mark.skipif(sys.flags.optimize > 1,
reason="no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1")
@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc")
-class TestDocStrings(object):
+class TestDocStrings:
def test_platform_dependent_aliases(self):
if np.int64 is np.int_:
assert_('int64' in np.int_.__doc__)
diff --git a/numpy/core/tests/test_overrides.py b/numpy/core/tests/test_overrides.py
index 8a1cccb74..7e73d8c03 100644
--- a/numpy/core/tests/test_overrides.py
+++ b/numpy/core/tests/test_overrides.py
@@ -34,7 +34,7 @@ def dispatched_two_arg(array1, array2):
return 'original'
-class TestGetImplementingArgs(object):
+class TestGetImplementingArgs:
def test_ndarray(self):
array = np.array(1)
@@ -75,7 +75,7 @@ class TestGetImplementingArgs(object):
def test_ndarray_and_duck_array(self):
- class Other(object):
+ class Other:
__array_function__ = _return_not_implemented
array = np.array(1)
@@ -92,7 +92,7 @@ class TestGetImplementingArgs(object):
class OverrideSub(np.ndarray):
__array_function__ = _return_not_implemented
- class Other(object):
+ class Other:
__array_function__ = _return_not_implemented
array = np.array(1)
@@ -106,7 +106,7 @@ class TestGetImplementingArgs(object):
def test_many_duck_arrays(self):
- class A(object):
+ class A:
__array_function__ = _return_not_implemented
class B(A):
@@ -115,7 +115,7 @@ class TestGetImplementingArgs(object):
class C(A):
__array_function__ = _return_not_implemented
- class D(object):
+ class D:
__array_function__ = _return_not_implemented
a = A()
@@ -145,12 +145,12 @@ class TestGetImplementingArgs(object):
_get_implementing_args(relevant_args)
-class TestNDArrayArrayFunction(object):
+class TestNDArrayArrayFunction:
@requires_array_function
def test_method(self):
- class Other(object):
+ class Other:
__array_function__ = _return_not_implemented
class NoOverrideSub(np.ndarray):
@@ -207,7 +207,7 @@ class TestNDArrayArrayFunction(object):
@requires_array_function
-class TestArrayFunctionDispatch(object):
+class TestArrayFunctionDispatch:
def test_pickle(self):
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
@@ -222,7 +222,7 @@ class TestArrayFunctionDispatch(object):
def test_interface(self):
- class MyArray(object):
+ class MyArray:
def __array_function__(self, func, types, args, kwargs):
return (self, func, types, args, kwargs)
@@ -237,7 +237,7 @@ class TestArrayFunctionDispatch(object):
def test_not_implemented(self):
- class MyArray(object):
+ class MyArray:
def __array_function__(self, func, types, args, kwargs):
return NotImplemented
@@ -247,7 +247,7 @@ class TestArrayFunctionDispatch(object):
@requires_array_function
-class TestVerifyMatchingSignatures(object):
+class TestVerifyMatchingSignatures:
def test_verify_matching_signatures(self):
@@ -281,7 +281,7 @@ def _new_duck_type_and_implements():
"""Create a duck array type and implements functions."""
HANDLED_FUNCTIONS = {}
- class MyArray(object):
+ class MyArray:
def __array_function__(self, func, types, args, kwargs):
if func not in HANDLED_FUNCTIONS:
return NotImplemented
@@ -300,7 +300,7 @@ def _new_duck_type_and_implements():
@requires_array_function
-class TestArrayFunctionImplementation(object):
+class TestArrayFunctionImplementation:
def test_one_arg(self):
MyArray, implements = _new_duck_type_and_implements()
@@ -353,7 +353,7 @@ class TestArrayFunctionImplementation(object):
func(MyArray())
-class TestNDArrayMethods(object):
+class TestNDArrayMethods:
def test_repr(self):
# gh-12162: should still be defined even if __array_function__ doesn't
@@ -368,7 +368,7 @@ class TestNDArrayMethods(object):
assert_equal(str(array), '1')
-class TestNumPyFunctions(object):
+class TestNumPyFunctions:
def test_set_module(self):
assert_equal(np.sum.__module__, 'numpy')
diff --git a/numpy/core/tests/test_records.py b/numpy/core/tests/test_records.py
index e2c8d12f0..4b4bd2729 100644
--- a/numpy/core/tests/test_records.py
+++ b/numpy/core/tests/test_records.py
@@ -18,7 +18,7 @@ from numpy.testing import (
from numpy.compat import pickle
-class TestFromrecords(object):
+class TestFromrecords:
def test_fromrecords(self):
r = np.rec.fromrecords([[456, 'dbe', 1.2], [2, 'de', 1.3]],
names='col1,col2,col3')
@@ -324,7 +324,7 @@ class TestFromrecords(object):
@pytest.mark.skipif(Path is None, reason="No pathlib.Path")
-class TestPathUsage(object):
+class TestPathUsage:
# Test that pathlib.Path can be used
def test_tofile_fromfile(self):
with temppath(suffix='.bin') as path:
@@ -340,7 +340,7 @@ class TestPathUsage(object):
assert_array_equal(x, a)
-class TestRecord(object):
+class TestRecord:
def setup(self):
self.data = np.rec.fromrecords([(1, 2, 3), (4, 5, 6)],
dtype=[("col1", "<i4"),
diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py
index d1116960b..c77c11d41 100644
--- a/numpy/core/tests/test_regression.py
+++ b/numpy/core/tests/test_regression.py
@@ -22,7 +22,7 @@ try:
except NameError:
RecursionError = RuntimeError # python < 3.5
-class TestRegression(object):
+class TestRegression:
def test_invalid_round(self):
# Ticket #3
v = 4.7599999999999998
@@ -426,7 +426,7 @@ class TestRegression(object):
def test_lexsort_invalid_sequence(self):
# Issue gh-4123
- class BuggySequence(object):
+ class BuggySequence:
def __len__(self):
return 4
@@ -1039,7 +1039,7 @@ class TestRegression(object):
def test_mem_custom_float_to_array(self):
# Ticket 702
- class MyFloat(object):
+ class MyFloat:
def __float__(self):
return 1.0
@@ -1048,7 +1048,7 @@ class TestRegression(object):
def test_object_array_refcount_self_assign(self):
# Ticket #711
- class VictimObject(object):
+ class VictimObject:
deleted = False
def __del__(self):
@@ -2231,7 +2231,7 @@ class TestRegression(object):
import operator as op
# dummy class where __array__ throws exception
- class Foo(object):
+ class Foo:
__array_priority__ = 1002
def __array__(self, *args, **kwargs):
@@ -2480,7 +2480,7 @@ class TestRegression(object):
assert_equal(pickle.loads(dumped), arr)
def test_bad_array_interface(self):
- class T(object):
+ class T:
__array_interface__ = {}
np.array([T()])
diff --git a/numpy/core/tests/test_scalar_ctors.py b/numpy/core/tests/test_scalar_ctors.py
index dbe57de02..ae29d0605 100644
--- a/numpy/core/tests/test_scalar_ctors.py
+++ b/numpy/core/tests/test_scalar_ctors.py
@@ -10,7 +10,7 @@ from numpy.testing import (
assert_equal, assert_almost_equal, assert_raises, assert_warns,
)
-class TestFromString(object):
+class TestFromString:
def test_floating(self):
# Ticket #640, floats from string
fsingle = np.single('1.234')
@@ -54,7 +54,7 @@ class TestFromString(object):
assert_equal(255, np.intp('0xFF', 16))
-class TestFromInt(object):
+class TestFromInt:
def test_intp(self):
# Ticket #99
assert_equal(1024, np.intp(1024))
diff --git a/numpy/core/tests/test_scalar_methods.py b/numpy/core/tests/test_scalar_methods.py
index ab16b4e67..c9de3e402 100644
--- a/numpy/core/tests/test_scalar_methods.py
+++ b/numpy/core/tests/test_scalar_methods.py
@@ -14,7 +14,7 @@ from numpy.testing import (
dec
)
-class TestAsIntegerRatio(object):
+class TestAsIntegerRatio:
# derived in part from the cpython test "test_floatasratio"
@pytest.mark.parametrize("ftype", [
diff --git a/numpy/core/tests/test_scalarbuffer.py b/numpy/core/tests/test_scalarbuffer.py
index 3ded7eecd..85673e3ab 100644
--- a/numpy/core/tests/test_scalarbuffer.py
+++ b/numpy/core/tests/test_scalarbuffer.py
@@ -33,7 +33,7 @@ scalars_only, codes_only = zip(*scalars_and_codes)
@pytest.mark.skipif(sys.version_info.major < 3,
reason="Python 2 scalars lack a buffer interface")
-class TestScalarPEP3118(object):
+class TestScalarPEP3118:
@pytest.mark.parametrize('scalar', scalars_only, ids=codes_only)
def test_scalar_match_array(self, scalar):
diff --git a/numpy/core/tests/test_scalarinherit.py b/numpy/core/tests/test_scalarinherit.py
index a2e34fdee..af3669d73 100644
--- a/numpy/core/tests/test_scalarinherit.py
+++ b/numpy/core/tests/test_scalarinherit.py
@@ -6,7 +6,7 @@ import numpy as np
from numpy.testing import assert_
-class A(object):
+class A:
pass
class B(A, np.float64):
pass
@@ -21,7 +21,7 @@ class B0(np.float64, A):
class C0(B0):
pass
-class TestInherit(object):
+class TestInherit:
def test_init(self):
x = B(1.0)
assert_(str(x) == '1.0')
@@ -37,7 +37,7 @@ class TestInherit(object):
assert_(str(y) == '2.0')
-class TestCharacter(object):
+class TestCharacter:
def test_char_radd(self):
# GH issue 9620, reached gentype_add and raise TypeError
np_s = np.string_('abc')
diff --git a/numpy/core/tests/test_scalarmath.py b/numpy/core/tests/test_scalarmath.py
index 25a8b6526..789ef4acd 100644
--- a/numpy/core/tests/test_scalarmath.py
+++ b/numpy/core/tests/test_scalarmath.py
@@ -23,7 +23,7 @@ complex_floating_types = np.complexfloating.__subclasses__()
# This compares scalarmath against ufuncs.
-class TestTypes(object):
+class TestTypes:
def test_types(self):
for atype in types:
a = atype(1)
@@ -62,7 +62,7 @@ class TestTypes(object):
np.add(1, 1)
-class TestBaseMath(object):
+class TestBaseMath:
def test_blocked(self):
# test alignments offsets for simd instructions
# alignments for vz + 2 * (vs - 1) + 1
@@ -108,7 +108,7 @@ class TestBaseMath(object):
np.add(d, np.ones_like(d))
-class TestPower(object):
+class TestPower:
def test_small_types(self):
for t in [np.int8, np.int16, np.float16]:
a = t(3)
@@ -200,7 +200,7 @@ def _signs(dt):
return (+1, -1)
-class TestModulus(object):
+class TestModulus:
def test_modulus_basic(self):
dt = np.typecodes['AllInteger'] + np.typecodes['Float']
@@ -302,7 +302,7 @@ class TestModulus(object):
a //= b
-class TestComplexDivision(object):
+class TestComplexDivision:
def test_zero_division(self):
with np.errstate(all="ignore"):
for t in [np.complex64, np.complex128]:
@@ -374,7 +374,7 @@ class TestComplexDivision(object):
assert_equal(result.imag, ex[1])
-class TestConversion(object):
+class TestConversion:
def test_int_from_long(self):
l = [1e6, 1e12, 1e18, -1e6, -1e12, -1e18]
li = [10**6, 10**12, 10**18, -10**6, -10**12, -10**18]
@@ -510,7 +510,7 @@ class TestConversion(object):
assert_(np.equal(np.datetime64('NaT'), None))
-#class TestRepr(object):
+#class TestRepr:
# def test_repr(self):
# for t in types:
# val = t(1197346475.0137341)
@@ -519,7 +519,7 @@ class TestConversion(object):
# assert_equal( val, val2 )
-class TestRepr(object):
+class TestRepr:
def _test_type_repr(self, t):
finfo = np.finfo(t)
last_fraction_bit_idx = finfo.nexp + finfo.nmant
@@ -554,7 +554,7 @@ class TestRepr(object):
if not IS_PYPY:
# sys.getsizeof() is not valid on PyPy
- class TestSizeOf(object):
+ class TestSizeOf:
def test_equal_nbytes(self):
for type in types:
@@ -566,7 +566,7 @@ if not IS_PYPY:
assert_raises(TypeError, d.__sizeof__, "a")
-class TestMultiply(object):
+class TestMultiply:
def test_seq_repeat(self):
# Test that basic sequences get repeated when multiplied with
# numpy integers. And errors are raised when multiplied with others.
@@ -603,7 +603,7 @@ class TestMultiply(object):
# Test that an array-like which does not know how to be multiplied
# does not attempt sequence repeat (raise TypeError).
# See also gh-7428.
- class ArrayLike(object):
+ class ArrayLike:
def __init__(self, arr):
self.arr = arr
def __array__(self):
@@ -617,7 +617,7 @@ class TestMultiply(object):
assert_array_equal(np.int_(3) * arr_like, np.full(3, 3))
-class TestNegative(object):
+class TestNegative:
def test_exceptions(self):
a = np.ones((), dtype=np.bool_)[()]
assert_raises(TypeError, operator.neg, a)
@@ -631,7 +631,7 @@ class TestNegative(object):
assert_equal(operator.neg(a) + a, 0)
-class TestSubtract(object):
+class TestSubtract:
def test_exceptions(self):
a = np.ones((), dtype=np.bool_)[()]
assert_raises(TypeError, operator.sub, a, a)
@@ -645,7 +645,7 @@ class TestSubtract(object):
assert_equal(operator.sub(a, a), 0)
-class TestAbs(object):
+class TestAbs:
def _test_abs_func(self, absfunc):
for tp in floating_types + complex_floating_types:
x = tp(-1.5)
@@ -674,7 +674,7 @@ class TestAbs(object):
self._test_abs_func(np.abs)
-class TestBitShifts(object):
+class TestBitShifts:
@pytest.mark.parametrize('type_code', np.typecodes['AllInteger'])
@pytest.mark.parametrize('op',
diff --git a/numpy/core/tests/test_scalarprint.py b/numpy/core/tests/test_scalarprint.py
index b12fdb249..225b8295f 100644
--- a/numpy/core/tests/test_scalarprint.py
+++ b/numpy/core/tests/test_scalarprint.py
@@ -10,7 +10,7 @@ from tempfile import TemporaryFile
import numpy as np
from numpy.testing import assert_, assert_equal, suppress_warnings
-class TestRealScalars(object):
+class TestRealScalars:
def test_str(self):
svals = [0.0, -0.0, 1, -1, np.inf, -np.inf, np.nan]
styps = [np.float16, np.float32, np.float64, np.longdouble]
diff --git a/numpy/core/tests/test_shape_base.py b/numpy/core/tests/test_shape_base.py
index 723c41d6e..d2f5287a6 100644
--- a/numpy/core/tests/test_shape_base.py
+++ b/numpy/core/tests/test_shape_base.py
@@ -14,7 +14,7 @@ from numpy.testing import (
from numpy.compat import long
-class TestAtleast1d(object):
+class TestAtleast1d:
def test_0D_array(self):
a = array(1)
b = array(2)
@@ -55,7 +55,7 @@ class TestAtleast1d(object):
assert_(atleast_1d([[2, 3], [4, 5]]).shape == (2, 2))
-class TestAtleast2d(object):
+class TestAtleast2d:
def test_0D_array(self):
a = array(1)
b = array(2)
@@ -94,7 +94,7 @@ class TestAtleast2d(object):
assert_(atleast_2d([[[3, 1], [4, 5]], [[3, 5], [1, 2]]]).shape == (2, 2, 2))
-class TestAtleast3d(object):
+class TestAtleast3d:
def test_0D_array(self):
a = array(1)
b = array(2)
@@ -126,7 +126,7 @@ class TestAtleast3d(object):
assert_array_equal(res, desired)
-class TestHstack(object):
+class TestHstack:
def test_non_iterable(self):
assert_raises(TypeError, hstack, 1)
@@ -163,7 +163,7 @@ class TestHstack(object):
hstack(map(lambda x: x, np.ones((3, 2))))
-class TestVstack(object):
+class TestVstack:
def test_non_iterable(self):
assert_raises(TypeError, vstack, 1)
@@ -203,7 +203,7 @@ class TestVstack(object):
vstack((np.arange(3) for _ in range(2)))
-class TestConcatenate(object):
+class TestConcatenate:
def test_returns_copy(self):
a = np.eye(3)
b = np.concatenate([a])
@@ -407,7 +407,7 @@ def test_stack():
assert_array_equal(result, np.array([0, 1, 2]))
-class TestBlock(object):
+class TestBlock:
@pytest.fixture(params=['block', 'force_concatenate', 'force_slicing'])
def block(self, request):
# blocking small arrays and large arrays go through different paths.
@@ -705,7 +705,7 @@ class TestBlock(object):
def test_block_dispatcher():
- class ArrayLike(object):
+ class ArrayLike:
pass
a = ArrayLike()
b = ArrayLike()
diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py
index 959345785..679bea96a 100644
--- a/numpy/core/tests/test_ufunc.py
+++ b/numpy/core/tests/test_ufunc.py
@@ -21,7 +21,7 @@ UNARY_UFUNCS = [obj for obj in np.core.umath.__dict__.values()
UNARY_OBJECT_UFUNCS = [uf for uf in UNARY_UFUNCS if "O->O" in uf.types]
-class TestUfuncKwargs(object):
+class TestUfuncKwargs:
def test_kwarg_exact(self):
assert_raises(TypeError, np.add, 1, 2, castingx='safe')
assert_raises(TypeError, np.add, 1, 2, dtypex=int)
@@ -47,7 +47,7 @@ class TestUfuncKwargs(object):
assert_raises(TypeError, np.add, 1, 2, extobj=[4096], parrot=True)
-class TestUfuncGenericLoops(object):
+class TestUfuncGenericLoops:
"""Test generic loops.
The loops to be tested are:
@@ -116,7 +116,7 @@ class TestUfuncGenericLoops(object):
assert_equal(ys.dtype, output_dtype)
# class to use in testing object method loops
- class foo(object):
+ class foo:
def conjugate(self):
return np.bool_(1)
@@ -177,7 +177,7 @@ class TestUfuncGenericLoops(object):
assert_array_equal(res_num.astype("O"), res_obj)
-class TestUfunc(object):
+class TestUfunc:
def test_pickle(self):
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
assert_(pickle.loads(pickle.dumps(np.sin,
@@ -1121,7 +1121,7 @@ class TestUfunc(object):
assert_equal(np.logical_and.reduce(a), None)
def test_object_comparison(self):
- class HasComparisons(object):
+ class HasComparisons:
def __eq__(self, other):
return '=='
@@ -1590,7 +1590,7 @@ class TestUfunc(object):
def test_custom_array_like(self):
- class MyThing(object):
+ class MyThing:
__array_priority__ = 1000
rmul_count = 0
@@ -1982,3 +1982,21 @@ def test_ufunc_noncontiguous(ufunc):
assert_allclose(res_c, res_n, atol=tol, rtol=tol)
else:
assert_equal(c_ar, n_ar)
+
+
+@pytest.mark.parametrize('ufunc', [np.sign, np.equal])
+def test_ufunc_warn_with_nan(ufunc):
+ # issue gh-15127
+ # test that calling certain ufuncs with a non-standard `nan` value does not
+ # emit a warning
+ # `b` holds a 64 bit signaling nan: the most significant bit of the
+ # significand is zero.
+ b = np.array([0x7ff0000000000001], 'i8').view('f8')
+ assert np.isnan(b)
+ if ufunc.nin == 1:
+ ufunc(b)
+ elif ufunc.nin == 2:
+ ufunc(b, b.copy())
+ else:
+ raise ValueError('ufunc with more than 2 inputs')
+
diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py
index ad0bc9a54..e966eebf0 100644
--- a/numpy/core/tests/test_umath.py
+++ b/numpy/core/tests/test_umath.py
@@ -21,7 +21,7 @@ def on_powerpc():
platform.machine().startswith('ppc')
-class _FilterInvalids(object):
+class _FilterInvalids:
def setup(self):
self.olderr = np.seterr(invalid='ignore')
@@ -29,7 +29,7 @@ class _FilterInvalids(object):
np.seterr(**self.olderr)
-class TestConstants(object):
+class TestConstants:
def test_pi(self):
assert_allclose(ncu.pi, 3.141592653589793, 1e-15)
@@ -40,7 +40,7 @@ class TestConstants(object):
assert_allclose(ncu.euler_gamma, 0.5772156649015329, 1e-15)
-class TestOut(object):
+class TestOut:
def test_out_subok(self):
for subok in (True, False):
a = np.array(0.5)
@@ -166,7 +166,7 @@ class TestOut(object):
r1, r2 = np.frexp(d, out=o1, subok=subok)
-class TestComparisons(object):
+class TestComparisons:
def test_ignore_object_identity_in_equal(self):
# Check comparing identical objects whose comparison
# is not a simple boolean, e.g., arrays that are compared elementwise.
@@ -174,7 +174,7 @@ class TestComparisons(object):
assert_raises(ValueError, np.equal, a, a)
# Check error raised when comparing identical non-comparable objects.
- class FunkyType(object):
+ class FunkyType:
def __eq__(self, other):
raise TypeError("I won't compare")
@@ -192,7 +192,7 @@ class TestComparisons(object):
assert_raises(ValueError, np.not_equal, a, a)
# Check error raised when comparing identical non-comparable objects.
- class FunkyType(object):
+ class FunkyType:
def __ne__(self, other):
raise TypeError("I won't compare")
@@ -204,7 +204,7 @@ class TestComparisons(object):
assert_equal(np.not_equal(a, a), [True])
-class TestAdd(object):
+class TestAdd:
def test_reduce_alignment(self):
# gh-9876
# make sure arrays with weird strides work with the optimizations in
@@ -215,7 +215,7 @@ class TestAdd(object):
assert_equal(a['b'].sum(), 0)
-class TestDivision(object):
+class TestDivision:
def test_division_int(self):
# int division should follow Python
x = np.array([5, 10, 90, 100, -5, -10, -90, -100, -120])
@@ -282,7 +282,7 @@ def _signs(dt):
return (+1, -1)
-class TestRemainder(object):
+class TestRemainder:
def test_remainder_basic(self):
dt = np.typecodes['AllInteger'] + np.typecodes['Float']
@@ -373,7 +373,7 @@ class TestRemainder(object):
assert_(np.isnan(rem), 'dt: %s, rem: %s' % (dt, rem))
-class TestCbrt(object):
+class TestCbrt:
def test_cbrt_scalar(self):
assert_almost_equal((np.cbrt(np.float32(-2.5)**3)), -2.5)
@@ -386,7 +386,7 @@ class TestCbrt(object):
assert_equal(np.cbrt(-np.inf), -np.inf)
-class TestPower(object):
+class TestPower:
def test_power_float(self):
x = np.array([1., 2., 3.])
assert_equal(x**0, [1., 1., 1.])
@@ -525,7 +525,7 @@ class TestPower(object):
assert_raises(ValueError, np.power, one, minusone)
-class TestFloat_power(object):
+class TestFloat_power:
def test_type_conversion(self):
arg_type = '?bhilBHILefdgFDG'
res_type = 'ddddddddddddgDDG'
@@ -536,7 +536,7 @@ class TestFloat_power(object):
assert_(res.dtype.name == np.dtype(dtout).name, msg)
-class TestLog2(object):
+class TestLog2:
def test_log2_values(self):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
@@ -567,7 +567,7 @@ class TestLog2(object):
assert_(w[2].category is RuntimeWarning)
-class TestExp2(object):
+class TestExp2:
def test_exp2_values(self):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
@@ -619,7 +619,7 @@ class TestLogAddExp2(_FilterInvalids):
assert_(np.isnan(np.logaddexp2(np.nan, np.nan)))
-class TestLog(object):
+class TestLog:
def test_log_values(self):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
@@ -630,7 +630,7 @@ class TestLog(object):
assert_almost_equal(np.log(xf), yf)
-class TestExp(object):
+class TestExp:
def test_exp_values(self):
x = [1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024]
y = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
@@ -640,7 +640,7 @@ class TestExp(object):
yf = np.array(y, dtype=dt)*log2_
assert_almost_equal(np.exp(yf), xf)
-class TestSpecialFloats(object):
+class TestSpecialFloats:
def test_exp_values(self):
x = [np.nan, np.nan, np.inf, 0.]
y = [np.nan, -np.nan, np.inf, -np.inf]
@@ -742,7 +742,7 @@ avx_ufuncs = {'sqrt' :[1, 0., 100.],
'ceil' :[0, -100., 100.],
'trunc' :[0, -100., 100.]}
-class TestAVXUfuncs(object):
+class TestAVXUfuncs:
def test_avx_based_ufunc(self):
strides = np.array([-4,-3,-2,-1,1,2,3,4])
np.random.seed(42)
@@ -774,7 +774,7 @@ class TestAVXUfuncs(object):
assert_equal(myfunc(x_f64[::jj]), y_true64[::jj])
assert_equal(myfunc(x_f32[::jj]), y_true32[::jj])
-class TestAVXFloat32Transcendental(object):
+class TestAVXFloat32Transcendental:
def test_exp_float32(self):
np.random.seed(42)
x_f32 = np.float32(np.random.uniform(low=0.0,high=88.1,size=1000000))
@@ -860,7 +860,7 @@ class TestLogAddExp(_FilterInvalids):
assert_equal(np.logaddexp.reduce([]), -np.inf)
-class TestLog1p(object):
+class TestLog1p:
def test_log1p(self):
assert_almost_equal(ncu.log1p(0.2), ncu.log(1.2))
assert_almost_equal(ncu.log1p(1e-6), ncu.log(1+1e-6))
@@ -874,7 +874,7 @@ class TestLog1p(object):
assert_equal(ncu.log1p(-np.inf), np.nan)
-class TestExpm1(object):
+class TestExpm1:
def test_expm1(self):
assert_almost_equal(ncu.expm1(0.2), ncu.exp(0.2)-1)
assert_almost_equal(ncu.expm1(1e-6), ncu.exp(1e-6)-1)
@@ -893,7 +893,7 @@ class TestExpm1(object):
assert_allclose(x, ncu.expm1(x))
-class TestHypot(object):
+class TestHypot:
def test_simple(self):
assert_almost_equal(ncu.hypot(1, 1), ncu.sqrt(2))
assert_almost_equal(ncu.hypot(0, 0), 0)
@@ -917,7 +917,7 @@ def assert_hypot_isinf(x, y):
"hypot(%s, %s) is %s, not inf" % (x, y, ncu.hypot(x, y)))
-class TestHypotSpecialValues(object):
+class TestHypotSpecialValues:
def test_nan_outputs(self):
assert_hypot_isnan(np.nan, np.nan)
assert_hypot_isnan(np.nan, 1)
@@ -954,7 +954,7 @@ def assert_arctan2_isnzero(x, y):
assert_((ncu.arctan2(x, y) == 0 and np.signbit(ncu.arctan2(x, y))), "arctan(%s, %s) is %s, not -0" % (x, y, ncu.arctan2(x, y)))
-class TestArctan2SpecialValues(object):
+class TestArctan2SpecialValues:
def test_one_one(self):
# atan2(1, 1) returns pi/4.
assert_almost_equal(ncu.arctan2(1, 1), 0.25 * np.pi)
@@ -1023,7 +1023,7 @@ class TestArctan2SpecialValues(object):
assert_arctan2_isnan(np.nan, np.nan)
-class TestLdexp(object):
+class TestLdexp:
def _check_ldexp(self, tp):
assert_almost_equal(ncu.ldexp(np.array(2., np.float32),
np.array(3, tp)), 16.)
@@ -1251,7 +1251,7 @@ class TestFmin(_FilterInvalids):
assert_equal(np.fmin(arg1, arg2), out)
-class TestBool(object):
+class TestBool:
def test_exceptions(self):
a = np.ones(1, dtype=np.bool_)
assert_raises(TypeError, np.negative, a)
@@ -1314,7 +1314,7 @@ class TestBool(object):
assert_equal(np.logical_xor.reduce(arr), arr.sum() % 2 == 1)
-class TestBitwiseUFuncs(object):
+class TestBitwiseUFuncs:
bitwise_types = [np.dtype(c) for c in '?' + 'bBhHiIlLqQ' + 'O']
@@ -1398,7 +1398,7 @@ class TestBitwiseUFuncs(object):
assert_(type(f.reduce(btype)) is bool, msg)
-class TestInt(object):
+class TestInt:
def test_logical_not(self):
x = np.ones(10, dtype=np.int16)
o = np.ones(10 * 2, dtype=bool)
@@ -1409,24 +1409,24 @@ class TestInt(object):
assert_array_equal(o, tgt)
-class TestFloatingPoint(object):
+class TestFloatingPoint:
def test_floating_point(self):
assert_equal(ncu.FLOATING_POINT_SUPPORT, 1)
-class TestDegrees(object):
+class TestDegrees:
def test_degrees(self):
assert_almost_equal(ncu.degrees(np.pi), 180.0)
assert_almost_equal(ncu.degrees(-0.5*np.pi), -90.0)
-class TestRadians(object):
+class TestRadians:
def test_radians(self):
assert_almost_equal(ncu.radians(180.0), np.pi)
assert_almost_equal(ncu.radians(-90.0), -0.5*np.pi)
-class TestHeavside(object):
+class TestHeavside:
def test_heaviside(self):
x = np.array([[-30.0, -0.1, 0.0, 0.2], [7.5, np.nan, np.inf, -np.inf]])
expectedhalf = np.array([[0.0, 0.0, 0.5, 1.0], [1.0, np.nan, 1.0, 0.0]])
@@ -1448,7 +1448,7 @@ class TestHeavside(object):
assert_equal(h, expected1.astype(np.float32))
-class TestSign(object):
+class TestSign:
def test_sign(self):
a = np.array([np.inf, -np.inf, np.nan, 0.0, 3.0, -3.0])
out = np.zeros(a.shape)
@@ -1479,7 +1479,7 @@ class TestSign(object):
assert_raises(TypeError, test_nan)
-class TestMinMax(object):
+class TestMinMax:
def test_minmax_blocked(self):
# simd tests on max/min, test all alignments, slow but important
# for 2 * vz + 2 * (vs - 1) + 1 (unrolled once)
@@ -1522,7 +1522,7 @@ class TestMinMax(object):
assert_equal(a, np.nan)
-class TestAbsoluteNegative(object):
+class TestAbsoluteNegative:
def test_abs_neg_blocked(self):
# simd tests on abs, test all alignments for vz + 2 * (vs - 1) + 1
for dt, sz in [(np.float32, 11), (np.float64, 5)]:
@@ -1564,7 +1564,7 @@ class TestAbsoluteNegative(object):
np.abs(np.ones_like(d), out=d)
-class TestPositive(object):
+class TestPositive:
def test_valid(self):
valid_dtypes = [int, float, complex, object]
for dtype in valid_dtypes:
@@ -1583,10 +1583,10 @@ class TestPositive(object):
np.positive(np.array(['bar'], dtype=object))
-class TestSpecialMethods(object):
+class TestSpecialMethods:
def test_wrap(self):
- class with_wrap(object):
+ class with_wrap:
def __array__(self):
return np.zeros(1)
@@ -1690,7 +1690,7 @@ class TestSpecialMethods(object):
def test_old_wrap(self):
- class with_wrap(object):
+ class with_wrap:
def __array__(self):
return np.zeros(1)
@@ -1705,7 +1705,7 @@ class TestSpecialMethods(object):
def test_priority(self):
- class A(object):
+ class A:
def __array__(self):
return np.zeros(1)
@@ -1748,7 +1748,7 @@ class TestSpecialMethods(object):
def test_failing_wrap(self):
- class A(object):
+ class A:
def __array__(self):
return np.zeros(2)
@@ -1780,7 +1780,7 @@ class TestSpecialMethods(object):
def test_none_wrap(self):
# Tests that issue #8507 is resolved. Previously, this would segfault
- class A(object):
+ class A:
def __array__(self):
return np.zeros(1)
@@ -1792,7 +1792,7 @@ class TestSpecialMethods(object):
def test_default_prepare(self):
- class with_wrap(object):
+ class with_wrap:
__array_priority__ = 10
def __array__(self):
@@ -1838,7 +1838,7 @@ class TestSpecialMethods(object):
def test_failing_prepare(self):
- class A(object):
+ class A:
def __array__(self):
return np.zeros(1)
@@ -1850,7 +1850,7 @@ class TestSpecialMethods(object):
def test_array_with_context(self):
- class A(object):
+ class A:
def __array__(self, dtype=None, context=None):
func, args, i = context
self.func = func
@@ -1858,11 +1858,11 @@ class TestSpecialMethods(object):
self.i = i
return np.zeros(1)
- class B(object):
+ class B:
def __array__(self, dtype=None):
return np.zeros(1, dtype)
- class C(object):
+ class C:
def __array__(self):
return np.zeros(1)
@@ -1877,7 +1877,7 @@ class TestSpecialMethods(object):
def test_ufunc_override(self):
# check override works even with instance with high priority.
- class A(object):
+ class A:
def __array_ufunc__(self, func, method, *inputs, **kwargs):
return self, func, method, inputs, kwargs
@@ -1914,7 +1914,7 @@ class TestSpecialMethods(object):
three_mul_ufunc = np.frompyfunc(tres_mul, 3, 1)
four_mul_ufunc = np.frompyfunc(quatro_mul, 4, 1)
- class A(object):
+ class A:
def __array_ufunc__(self, func, method, *inputs, **kwargs):
return "A"
@@ -1922,11 +1922,11 @@ class TestSpecialMethods(object):
def __array_ufunc__(self, func, method, *inputs, **kwargs):
return "ASub"
- class B(object):
+ class B:
def __array_ufunc__(self, func, method, *inputs, **kwargs):
return "B"
- class C(object):
+ class C:
def __init__(self):
self.count = 0
@@ -2038,7 +2038,7 @@ class TestSpecialMethods(object):
def test_ufunc_override_methods(self):
- class A(object):
+ class A:
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
return self, ufunc, method, inputs, kwargs
@@ -2207,11 +2207,11 @@ class TestSpecialMethods(object):
def test_ufunc_override_out(self):
- class A(object):
+ class A:
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
return kwargs
- class B(object):
+ class B:
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
return kwargs
@@ -2263,7 +2263,7 @@ class TestSpecialMethods(object):
def test_ufunc_override_exception(self):
- class A(object):
+ class A:
def __array_ufunc__(self, *a, **kwargs):
raise ValueError("oops")
@@ -2274,7 +2274,7 @@ class TestSpecialMethods(object):
def test_ufunc_override_not_implemented(self):
- class A(object):
+ class A:
def __array_ufunc__(self, *args, **kwargs):
return NotImplemented
@@ -2291,7 +2291,7 @@ class TestSpecialMethods(object):
def test_ufunc_override_disabled(self):
- class OptOut(object):
+ class OptOut:
__array_ufunc__ = None
opt_out = OptOut()
@@ -2308,7 +2308,7 @@ class TestSpecialMethods(object):
# opt-outs still hold even when other arguments have pathological
# __array_ufunc__ implementations
- class GreedyArray(object):
+ class GreedyArray:
def __array_ufunc__(self, *args, **kwargs):
return self
@@ -2322,7 +2322,7 @@ class TestSpecialMethods(object):
def test_gufunc_override(self):
# gufunc are just ufunc instances, but follow a different path,
# so check __array_ufunc__ overrides them properly.
- class A(object):
+ class A:
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
return self, ufunc, method, inputs, kwargs
@@ -2353,7 +2353,7 @@ class TestSpecialMethods(object):
# NOTE: this class is given as an example in doc/subclassing.py;
# if you make any changes here, do update it there too.
class A(np.ndarray):
- def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+ def __array_ufunc__(self, ufunc, method, *inputs, out=None, **kwargs):
args = []
in_no = []
for i, input_ in enumerate(inputs):
@@ -2363,7 +2363,7 @@ class TestSpecialMethods(object):
else:
args.append(input_)
- outputs = kwargs.pop('out', None)
+ outputs = out
out_no = []
if outputs:
out_args = []
@@ -2404,7 +2404,7 @@ class TestSpecialMethods(object):
return results[0] if len(results) == 1 else results
- class B(object):
+ class B:
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
if any(isinstance(input_, A) for input_ in inputs):
return "A!"
@@ -2523,14 +2523,14 @@ class TestSpecialMethods(object):
assert_(a.info, {'inputs': [0, 2]})
-class TestChoose(object):
+class TestChoose:
def test_mixed(self):
c = np.array([True, True])
a = np.array([True, True])
assert_equal(np.choose(c, (a, 1)), np.array([1, 1]))
-class TestRationalFunctions(object):
+class TestRationalFunctions:
def test_lcm(self):
self._test_lcm_inner(np.int16)
self._test_lcm_inner(np.uint16)
@@ -2629,7 +2629,7 @@ class TestRationalFunctions(object):
assert_equal(np.gcd(2**100, 3**100), 1)
-class TestRoundingFunctions(object):
+class TestRoundingFunctions:
def test_object_direct(self):
""" test direct implementation of these magic methods """
@@ -2665,7 +2665,7 @@ class TestRoundingFunctions(object):
assert_equal(np.trunc(f), -1)
-class TestComplexFunctions(object):
+class TestComplexFunctions:
funcs = [np.arcsin, np.arccos, np.arctan, np.arcsinh, np.arccosh,
np.arctanh, np.sin, np.cos, np.tan, np.exp,
np.exp2, np.log, np.sqrt, np.log10, np.log2,
@@ -2848,7 +2848,7 @@ class TestComplexFunctions(object):
check(func, pts, 1+1j)
-class TestAttributes(object):
+class TestAttributes:
def test_attributes(self):
add = ncu.add
assert_equal(add.__name__, 'add')
@@ -2867,7 +2867,7 @@ class TestAttributes(object):
"frexp(x[, out1, out2], / [, out=(None, None)], *, where=True"))
-class TestSubclass(object):
+class TestSubclass:
def test_subclass_op(self):
diff --git a/numpy/core/tests/test_umath_accuracy.py b/numpy/core/tests/test_umath_accuracy.py
index fec180786..677d9af60 100644
--- a/numpy/core/tests/test_umath_accuracy.py
+++ b/numpy/core/tests/test_umath_accuracy.py
@@ -28,7 +28,7 @@ files = ['umath-validation-set-exp',
'umath-validation-set-sin',
'umath-validation-set-cos']
-class TestAccuracy(object):
+class TestAccuracy:
@pytest.mark.xfail(reason="Fails for MacPython/numpy-wheels builds")
def test_validate_transcendentals(self):
with np.errstate(all='ignore'):
diff --git a/numpy/core/tests/test_umath_complex.py b/numpy/core/tests/test_umath_complex.py
index 8c0918a88..5e5ced85c 100644
--- a/numpy/core/tests/test_umath_complex.py
+++ b/numpy/core/tests/test_umath_complex.py
@@ -29,7 +29,7 @@ platform_skip = pytest.mark.skipif(xfail_complex_tests,
-class TestCexp(object):
+class TestCexp:
def test_simple(self):
check = check_complex_value
f = np.exp
@@ -129,7 +129,7 @@ class TestCexp(object):
check(f, np.nan, 0, np.nan, 0)
-class TestClog(object):
+class TestClog:
def test_simple(self):
x = np.array([1+0j, 1+2j])
y_r = np.log(np.abs(x)) + 1j * np.angle(x)
@@ -274,7 +274,7 @@ class TestClog(object):
assert_almost_equal(np.log(xa[i].conj()), ya[i].conj())
-class TestCsqrt(object):
+class TestCsqrt:
def test_simple(self):
# sqrt(1)
@@ -354,7 +354,7 @@ class TestCsqrt(object):
# XXX: check for conj(csqrt(z)) == csqrt(conj(z)) (need to fix branch
# cuts first)
-class TestCpow(object):
+class TestCpow:
def setup(self):
self.olderr = np.seterr(invalid='ignore')
@@ -394,7 +394,7 @@ class TestCpow(object):
for i in lx:
assert_almost_equal(n_r[i], p_r[i], err_msg='Loop %d\n' % i)
-class TestCabs(object):
+class TestCabs:
def setup(self):
self.olderr = np.seterr(invalid='ignore')
@@ -456,7 +456,7 @@ class TestCabs(object):
ref = g(x[i], y[i])
check_real_value(f, x[i], y[i], ref)
-class TestCarg(object):
+class TestCarg:
def test_simple(self):
check_real_value(ncu._arg, 1, 0, 0, False)
check_real_value(ncu._arg, 0, 1, 0.5*np.pi, False)
diff --git a/numpy/core/tests/test_unicode.py b/numpy/core/tests/test_unicode.py
index 3034b491d..b8ec56116 100644
--- a/numpy/core/tests/test_unicode.py
+++ b/numpy/core/tests/test_unicode.py
@@ -65,7 +65,7 @@ def test_string_cast():
# Creation tests
############################################################
-class CreateZeros(object):
+class CreateZeros:
"""Check the creation of zero-valued arrays"""
def content_check(self, ua, ua_scalar, nbytes):
@@ -117,7 +117,7 @@ class TestCreateZeros_1009(CreateZeros):
ulen = 1009
-class CreateValues(object):
+class CreateValues:
"""Check the creation of unicode arrays with values"""
def content_check(self, ua, ua_scalar, nbytes):
@@ -202,7 +202,7 @@ class TestCreateValues_1009_UCS4(CreateValues):
# Assignment tests
############################################################
-class AssignValues(object):
+class AssignValues:
"""Check the assignment of unicode arrays with values"""
def content_check(self, ua, ua_scalar, nbytes):
@@ -292,7 +292,7 @@ class TestAssignValues_1009_UCS4(AssignValues):
# Byteorder tests
############################################################
-class ByteorderValues(object):
+class ByteorderValues:
"""Check the byteorder of unicode arrays in round-trip conversions"""
def test_values0D(self):
diff --git a/numpy/distutils/ccompiler.py b/numpy/distutils/ccompiler.py
index ef29189f7..5e3cd0e74 100644
--- a/numpy/distutils/ccompiler.py
+++ b/numpy/distutils/ccompiler.py
@@ -14,7 +14,6 @@ from distutils.sysconfig import customize_compiler
from distutils.version import LooseVersion
from numpy.distutils import log
-from numpy.distutils.compat import get_exception
from numpy.distutils.exec_command import (
filepath_from_subprocess_output, forward_bytes_to_stdout
)
@@ -530,11 +529,6 @@ def CCompiler_customize(self, dist, need_cxx=0):
'g++' in self.compiler[0] or
'clang' in self.compiler[0]):
self._auto_depends = True
- if 'gcc' in self.compiler[0] and not need_cxx:
- # add std=c99 flag for gcc
- # TODO: does this need to be more specific?
- self.compiler.append('-std=c99')
- self.compiler_so.append('-std=c99')
elif os.name == 'posix':
import tempfile
import shutil
@@ -754,15 +748,15 @@ def new_compiler (plat=None,
module_name = "numpy.distutils." + module_name
try:
__import__ (module_name)
- except ImportError:
- msg = str(get_exception())
+ except ImportError as e:
+ msg = str(e)
log.info('%s in numpy.distutils; trying from distutils',
str(msg))
module_name = module_name[6:]
try:
__import__(module_name)
- except ImportError:
- msg = str(get_exception())
+ except ImportError as e:
+ msg = str(e)
raise DistutilsModuleError("can't compile C/C++ code: unable to load module '%s'" % \
module_name)
try:
diff --git a/numpy/distutils/command/config.py b/numpy/distutils/command/config.py
index bd6728281..2c833aad7 100644
--- a/numpy/distutils/command/config.py
+++ b/numpy/distutils/command/config.py
@@ -22,7 +22,6 @@ from numpy.distutils.command.autodist import (check_gcc_function_attribute,
check_inline,
check_restrict,
check_compiler_gcc4)
-from numpy.distutils.compat import get_exception
LANG_EXT['f77'] = '.f'
LANG_EXT['f90'] = '.f90'
@@ -50,8 +49,7 @@ class config(old_config):
if not self.compiler.initialized:
try:
self.compiler.initialize()
- except IOError:
- e = get_exception()
+ except IOError as e:
msg = textwrap.dedent("""\
Could not initialize compiler instance: do you have Visual Studio
installed? If you are trying to build with MinGW, please use "python setup.py
@@ -94,8 +92,8 @@ class config(old_config):
self.compiler = self.fcompiler
try:
ret = mth(*((self,)+args))
- except (DistutilsExecError, CompileError):
- str(get_exception())
+ except (DistutilsExecError, CompileError) as e:
+ str(e)
self.compiler = save_compiler
raise CompileError
self.compiler = save_compiler
@@ -493,7 +491,7 @@ class config(old_config):
self._clean()
return exitcode, output
-class GrabStdout(object):
+class GrabStdout:
def __init__(self):
self.sys_stdout = sys.stdout
diff --git a/numpy/distutils/compat.py b/numpy/distutils/compat.py
deleted file mode 100644
index afe0beedb..000000000
--- a/numpy/distutils/compat.py
+++ /dev/null
@@ -1,8 +0,0 @@
-"""Small modules to cope with python 2 vs 3 incompatibilities inside
-numpy.distutils
-
-"""
-import sys
-
-def get_exception():
- return sys.exc_info()[1]
diff --git a/numpy/distutils/conv_template.py b/numpy/distutils/conv_template.py
index 8c84ddaae..ec5a84a68 100644
--- a/numpy/distutils/conv_template.py
+++ b/numpy/distutils/conv_template.py
@@ -85,8 +85,6 @@ import os
import sys
import re
-from numpy.distutils.compat import get_exception
-
# names for replacement that are already global.
global_names = {}
@@ -238,8 +236,7 @@ def parse_string(astr, env, level, line) :
code.append(replace_re.sub(replace, pref))
try :
envlist = parse_loop_header(head)
- except ValueError:
- e = get_exception()
+ except ValueError as e:
msg = "line %d: %s" % (newline, e)
raise ValueError(msg)
for newenv in envlist :
@@ -287,8 +284,7 @@ def process_file(source):
sourcefile = os.path.normcase(source).replace("\\", "\\\\")
try:
code = process_str(''.join(lines))
- except ValueError:
- e = get_exception()
+ except ValueError as e:
raise ValueError('In "%s" loop at %s' % (sourcefile, e))
return '#line 1 "%s"\n%s' % (sourcefile, code)
@@ -325,8 +321,7 @@ def main():
allstr = fid.read()
try:
writestr = process_str(allstr)
- except ValueError:
- e = get_exception()
+ except ValueError as e:
raise ValueError("In %s loop at %s" % (file, e))
outfile.write(writestr)
diff --git a/numpy/distutils/cpuinfo.py b/numpy/distutils/cpuinfo.py
index 73daf0d5d..efea90113 100644
--- a/numpy/distutils/cpuinfo.py
+++ b/numpy/distutils/cpuinfo.py
@@ -25,13 +25,10 @@ else:
import warnings
import platform
-from numpy.distutils.compat import get_exception
-
def getoutput(cmd, successful_status=(0,), stacklevel=1):
try:
status, output = getstatusoutput(cmd)
- except EnvironmentError:
- e = get_exception()
+ except EnvironmentError as e:
warnings.warn(str(e), UserWarning, stacklevel=stacklevel)
return False, ""
if os.WIFEXITED(status) and os.WEXITSTATUS(status) in successful_status:
@@ -65,7 +62,7 @@ def key_value_from_command(cmd, sep, successful_status=(0,),
d[l[0]] = l[1]
return d
-class CPUInfoBase(object):
+class CPUInfoBase:
"""Holds CPU information and provides methods for requiring
the availability of various CPU features.
"""
@@ -113,8 +110,7 @@ class LinuxCPUInfo(CPUInfoBase):
info[0]['uname_m'] = output.strip()
try:
fo = open('/proc/cpuinfo')
- except EnvironmentError:
- e = get_exception()
+ except EnvironmentError as e:
warnings.warn(str(e), UserWarning, stacklevel=2)
else:
for line in fo:
@@ -521,8 +517,8 @@ class Win32CPUInfo(CPUInfoBase):
info[-1]["Family"]=int(srch.group("FML"))
info[-1]["Model"]=int(srch.group("MDL"))
info[-1]["Stepping"]=int(srch.group("STP"))
- except Exception:
- print(sys.exc_info()[1], '(ignoring)')
+ except Exception as e:
+ print(e, '(ignoring)')
self.__class__.info = info
def _not_impl(self): pass
diff --git a/numpy/distutils/fcompiler/__init__.py b/numpy/distutils/fcompiler/__init__.py
index 6d99d3a61..a88b0d713 100644
--- a/numpy/distutils/fcompiler/__init__.py
+++ b/numpy/distutils/fcompiler/__init__.py
@@ -34,7 +34,6 @@ from numpy.distutils import log
from numpy.distutils.misc_util import is_string, all_strings, is_sequence, \
make_temp_file, get_shared_lib_extension
from numpy.distutils.exec_command import find_executable
-from numpy.distutils.compat import get_exception
from numpy.distutils import _shell_utils
from .environment import EnvironmentConfig
@@ -612,8 +611,8 @@ class FCompiler(CCompiler):
src)
try:
self.spawn(command, display=display)
- except DistutilsExecError:
- msg = str(get_exception())
+ except DistutilsExecError as e:
+ msg = str(e)
raise CompileError(msg)
def module_options(self, module_dirs, module_build_dir):
@@ -680,8 +679,8 @@ class FCompiler(CCompiler):
command = linker + ld_args
try:
self.spawn(command)
- except DistutilsExecError:
- msg = str(get_exception())
+ except DistutilsExecError as e:
+ msg = str(e)
raise LinkError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
@@ -929,8 +928,7 @@ def show_fcompilers(dist=None):
c = new_fcompiler(compiler=compiler, verbose=dist.verbose)
c.customize(dist)
v = c.get_version()
- except (DistutilsModuleError, CompilerNotFound):
- e = get_exception()
+ except (DistutilsModuleError, CompilerNotFound) as e:
log.debug("show_fcompilers: %s not found" % (compiler,))
log.debug(repr(e))
diff --git a/numpy/distutils/fcompiler/compaq.py b/numpy/distutils/fcompiler/compaq.py
index 2088f0c9b..6ce590c7c 100644
--- a/numpy/distutils/fcompiler/compaq.py
+++ b/numpy/distutils/fcompiler/compaq.py
@@ -4,7 +4,6 @@ import os
import sys
from numpy.distutils.fcompiler import FCompiler
-from numpy.distutils.compat import get_exception
from distutils.errors import DistutilsPlatformError
compilers = ['CompaqFCompiler']
@@ -80,19 +79,16 @@ class CompaqVisualFCompiler(FCompiler):
ar_exe = m.lib
except DistutilsPlatformError:
pass
- except AttributeError:
- msg = get_exception()
+ except AttributeError as e:
if '_MSVCCompiler__root' in str(msg):
print('Ignoring "%s" (I think it is msvccompiler.py bug)' % (msg))
else:
raise
- except IOError:
- e = get_exception()
+ except IOError as e:
if not "vcvarsall.bat" in str(e):
print("Unexpected IOError in", __file__)
raise e
- except ValueError:
- e = get_exception()
+ except ValueError as e:
if not "'path'" in str(e):
print("Unexpected ValueError in", __file__)
raise e
diff --git a/numpy/distutils/fcompiler/environment.py b/numpy/distutils/fcompiler/environment.py
index 5d5b750ff..ae5fc404a 100644
--- a/numpy/distutils/fcompiler/environment.py
+++ b/numpy/distutils/fcompiler/environment.py
@@ -4,7 +4,7 @@ from distutils.dist import Distribution
__metaclass__ = type
-class EnvironmentConfig(object):
+class EnvironmentConfig:
def __init__(self, distutils_section='ALL', **kw):
self._distutils_section = distutils_section
self._conf_keys = kw
diff --git a/numpy/distutils/fcompiler/gnu.py b/numpy/distutils/fcompiler/gnu.py
index 0a68fee72..4fc9f33ff 100644
--- a/numpy/distutils/fcompiler/gnu.py
+++ b/numpy/distutils/fcompiler/gnu.py
@@ -10,7 +10,6 @@ import subprocess
from subprocess import Popen, PIPE, STDOUT
from numpy.distutils.exec_command import filepath_from_subprocess_output
from numpy.distutils.fcompiler import FCompiler
-from numpy.distutils.compat import get_exception
from numpy.distutils.system_info import system_info
compilers = ['GnuFCompiler', 'Gnu95FCompiler']
@@ -558,5 +557,5 @@ if __name__ == '__main__':
print(customized_fcompiler('gnu').get_version())
try:
print(customized_fcompiler('g95').get_version())
- except Exception:
- print(get_exception())
+ except Exception as e:
+ print(e)
diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py
index e75e620b0..eec8d56a3 100644
--- a/numpy/distutils/misc_util.py
+++ b/numpy/distutils/misc_util.py
@@ -32,7 +32,6 @@ def clean_up_temporary_directory():
atexit.register(clean_up_temporary_directory)
-from numpy.distutils.compat import get_exception
from numpy.compat import basestring
from numpy.compat import npy_load_module
@@ -49,7 +48,7 @@ __all__ = ['Configuration', 'get_numpy_include_dirs', 'default_config_dict',
'quote_args', 'get_build_architecture', 'get_info', 'get_pkg_info',
'get_num_build_jobs']
-class InstallableLib(object):
+class InstallableLib:
"""
Container to hold information on an installable library.
@@ -726,7 +725,7 @@ def get_frame(level=0):
######################
-class Configuration(object):
+class Configuration:
_list_keys = ['packages', 'ext_modules', 'data_files', 'include_dirs',
'libraries', 'headers', 'scripts', 'py_modules',
@@ -1970,9 +1969,8 @@ class Configuration(object):
try:
version_module = npy_load_module('_'.join(n.split('.')),
fn, info)
- except ImportError:
- msg = get_exception()
- self.warn(str(msg))
+ except ImportError as e:
+ self.warn(str(e))
version_module = None
if version_module is None:
continue
diff --git a/numpy/distutils/npy_pkg_config.py b/numpy/distutils/npy_pkg_config.py
index 377a24e41..47965b4ae 100644
--- a/numpy/distutils/npy_pkg_config.py
+++ b/numpy/distutils/npy_pkg_config.py
@@ -76,7 +76,7 @@ def parse_flags(line):
def _escape_backslash(val):
return val.replace('\\', '\\\\')
-class LibraryInfo(object):
+class LibraryInfo:
"""
Object containing build information about a library.
@@ -148,7 +148,7 @@ class LibraryInfo(object):
return "\n".join(m)
-class VariableSet(object):
+class VariableSet:
"""
Container object for the variables defined in a config file.
diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py
index a2ab04b88..508aeefc5 100644
--- a/numpy/distutils/system_info.py
+++ b/numpy/distutils/system_info.py
@@ -161,7 +161,6 @@ from numpy.distutils.exec_command import (
from numpy.distutils.misc_util import (is_sequence, is_string,
get_shared_lib_extension)
from numpy.distutils.command.config import config as cmd_config
-from numpy.distutils.compat import get_exception
from numpy.distutils import customized_ccompiler as _customized_ccompiler
from numpy.distutils import _shell_utils
import distutils.ccompiler
@@ -580,7 +579,7 @@ class UmfpackNotFoundError(NotFoundError):
the UMFPACK environment variable."""
-class system_info(object):
+class system_info:
""" get_info() is the only public method. Don't use others.
"""
@@ -2539,18 +2538,18 @@ class numerix_info(system_info):
try:
import numpy # noqa: F401
which = "numpy", "defaulted"
- except ImportError:
- msg1 = str(get_exception())
+ except ImportError as e:
+ msg1 = str(e)
try:
import Numeric # noqa: F401
which = "numeric", "defaulted"
- except ImportError:
- msg2 = str(get_exception())
+ except ImportError as e:
+ msg2 = str(e)
try:
import numarray # noqa: F401
which = "numarray", "defaulted"
- except ImportError:
- msg3 = str(get_exception())
+ except ImportError as e:
+ msg3 = str(e)
log.info(msg1)
log.info(msg2)
log.info(msg3)
diff --git a/numpy/distutils/tests/test_ccompiler.py b/numpy/distutils/tests/test_ccompiler.py
deleted file mode 100644
index 72aa8227c..000000000
--- a/numpy/distutils/tests/test_ccompiler.py
+++ /dev/null
@@ -1,22 +0,0 @@
-from distutils.ccompiler import new_compiler
-
-from numpy.distutils.numpy_distribution import NumpyDistribution
-
-def test_ccompiler():
- '''
- scikit-image/scikit-image issue 4369
- We unconditionally add ``-std-c99`` to the gcc compiler in order
- to support c99 with very old gcc compilers. However the same call
- is used to get the flags for the c++ compiler, just with a kwarg.
- Make sure in this case, where it would not be legal, the option is **not** added
- '''
- dist = NumpyDistribution()
- compiler = new_compiler()
- compiler.customize(dist)
- if hasattr(compiler, 'compiler') and 'gcc' in compiler.compiler[0]:
- assert 'c99' in ' '.join(compiler.compiler)
-
- compiler = new_compiler()
- compiler.customize(dist, need_cxx=True)
- if hasattr(compiler, 'compiler') and 'gcc' in compiler.compiler[0]:
- assert 'c99' not in ' '.join(compiler.compiler)
diff --git a/numpy/distutils/tests/test_exec_command.py b/numpy/distutils/tests/test_exec_command.py
index 8c3a4516a..2ac0a6308 100644
--- a/numpy/distutils/tests/test_exec_command.py
+++ b/numpy/distutils/tests/test_exec_command.py
@@ -13,7 +13,7 @@ if sys.version_info[0] >= 3:
else:
from StringIO import StringIO
-class redirect_stdout(object):
+class redirect_stdout:
"""Context manager to redirect stdout for exec_command test."""
def __init__(self, stdout=None):
self._stdout = stdout or sys.stdout
@@ -28,7 +28,7 @@ class redirect_stdout(object):
# note: closing sys.stdout won't close it.
self._stdout.close()
-class redirect_stderr(object):
+class redirect_stderr:
"""Context manager to redirect stderr for exec_command test."""
def __init__(self, stderr=None):
self._stderr = stderr or sys.stderr
@@ -43,7 +43,7 @@ class redirect_stderr(object):
# note: closing sys.stderr won't close it.
self._stderr.close()
-class emulate_nonposix(object):
+class emulate_nonposix:
"""Context manager to emulate os.name != 'posix' """
def __init__(self, osname='non-posix'):
self._new_name = osname
@@ -96,7 +96,7 @@ def test_exec_command_stderr():
exec_command.exec_command("cd '.'")
-class TestExecCommand(object):
+class TestExecCommand:
def setup(self):
self.pyexe = get_pythonexe()
diff --git a/numpy/distutils/tests/test_fcompiler_gnu.py b/numpy/distutils/tests/test_fcompiler_gnu.py
index b7bba2f95..0817ae58c 100644
--- a/numpy/distutils/tests/test_fcompiler_gnu.py
+++ b/numpy/distutils/tests/test_fcompiler_gnu.py
@@ -28,7 +28,7 @@ gfortran_version_strings = [
('GNU Fortran (crosstool-NG 8a21ab48) 7.2.0', '7.2.0')
]
-class TestG77Versions(object):
+class TestG77Versions:
def test_g77_version(self):
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu')
for vs, version in g77_version_strings:
@@ -41,7 +41,7 @@ class TestG77Versions(object):
v = fc.version_match(vs)
assert_(v is None, (vs, v))
-class TestGFortranVersions(object):
+class TestGFortranVersions:
def test_gfortran_version(self):
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95')
for vs, version in gfortran_version_strings:
diff --git a/numpy/distutils/tests/test_fcompiler_intel.py b/numpy/distutils/tests/test_fcompiler_intel.py
index 3bb81e027..45c9cdac1 100644
--- a/numpy/distutils/tests/test_fcompiler_intel.py
+++ b/numpy/distutils/tests/test_fcompiler_intel.py
@@ -14,7 +14,7 @@ intel_64bit_version_strings = [
"running on Intel(R) 64, Version 11.1", '11.1')
]
-class TestIntelFCompilerVersions(object):
+class TestIntelFCompilerVersions:
def test_32bit_version(self):
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intel')
for vs, version in intel_32bit_version_strings:
@@ -22,7 +22,7 @@ class TestIntelFCompilerVersions(object):
assert_(v == version)
-class TestIntelEM64TFCompilerVersions(object):
+class TestIntelEM64TFCompilerVersions:
def test_64bit_version(self):
fc = numpy.distutils.fcompiler.new_fcompiler(compiler='intelem')
for vs, version in intel_64bit_version_strings:
diff --git a/numpy/distutils/tests/test_fcompiler_nagfor.py b/numpy/distutils/tests/test_fcompiler_nagfor.py
index 785aeda14..2e04f5266 100644
--- a/numpy/distutils/tests/test_fcompiler_nagfor.py
+++ b/numpy/distutils/tests/test_fcompiler_nagfor.py
@@ -14,7 +14,7 @@ nag_version_strings = [('nagfor', 'NAG Fortran Compiler Release '
'431,435,437,446,459-460,463,472,494,496,503,508,'
'511,517,529,555,557,565)', '5.1')]
-class TestNagFCompilerVersions(object):
+class TestNagFCompilerVersions:
def test_version_match(self):
for comp, vs, version in nag_version_strings:
fc = numpy.distutils.fcompiler.new_fcompiler(compiler=comp)
diff --git a/numpy/distutils/tests/test_misc_util.py b/numpy/distutils/tests/test_misc_util.py
index a584e0869..605c80483 100644
--- a/numpy/distutils/tests/test_misc_util.py
+++ b/numpy/distutils/tests/test_misc_util.py
@@ -9,7 +9,7 @@ from numpy.testing import (
ajoin = lambda *paths: join(*((sep,)+paths))
-class TestAppendpath(object):
+class TestAppendpath:
def test_1(self):
assert_equal(appendpath('prefix', 'name'), join('prefix', 'name'))
@@ -33,7 +33,7 @@ class TestAppendpath(object):
assert_equal(appendpath('/prefix/sub/sub2', '/prefix/sub/sup/name'),
ajoin('prefix', 'sub', 'sub2', 'sup', 'name'))
-class TestMinrelpath(object):
+class TestMinrelpath:
def test_1(self):
n = lambda path: path.replace('/', sep)
@@ -47,7 +47,7 @@ class TestMinrelpath(object):
assert_equal(minrelpath(n('.././..')), n('../..'))
assert_equal(minrelpath(n('aa/bb/.././../dd')), n('dd'))
-class TestGpaths(object):
+class TestGpaths:
def test_gpaths(self):
local_path = minrelpath(join(dirname(__file__), '..'))
@@ -56,7 +56,7 @@ class TestGpaths(object):
f = gpaths('system_info.py', local_path)
assert_(join(local_path, 'system_info.py') == f[0], repr(f))
-class TestSharedExtension(object):
+class TestSharedExtension:
def test_get_shared_lib_extension(self):
import sys
diff --git a/numpy/distutils/tests/test_npy_pkg_config.py b/numpy/distutils/tests/test_npy_pkg_config.py
index d202fce85..b287ebe2e 100644
--- a/numpy/distutils/tests/test_npy_pkg_config.py
+++ b/numpy/distutils/tests/test_npy_pkg_config.py
@@ -34,7 +34,7 @@ libs = -L${libdir}
simple_variable_d = {'cflags': '-I/foo/bar/include', 'libflags': '-L/foo/bar/lib',
'version': '0.1', 'name': 'foo'}
-class TestLibraryInfo(object):
+class TestLibraryInfo:
def test_simple(self):
with temppath('foo.ini') as path:
with open(path, 'w') as f:
@@ -61,7 +61,7 @@ class TestLibraryInfo(object):
out.vars['prefix'] = '/Users/david'
assert_(out.cflags() == '-I/Users/david/include')
-class TestParseFlags(object):
+class TestParseFlags:
def test_simple_cflags(self):
d = parse_flags("-I/usr/include")
assert_(d['include_dirs'] == ['/usr/include'])
diff --git a/numpy/distutils/tests/test_system_info.py b/numpy/distutils/tests/test_system_info.py
index 04f5f8320..c40cdb7db 100644
--- a/numpy/distutils/tests/test_system_info.py
+++ b/numpy/distutils/tests/test_system_info.py
@@ -128,7 +128,7 @@ class DuplicateOptionInfo(_system_info):
section = 'duplicate_options'
-class TestSystemInfoReading(object):
+class TestSystemInfoReading:
def setup(self):
""" Create the libraries """
diff --git a/numpy/distutils/unixccompiler.py b/numpy/distutils/unixccompiler.py
index 23db2a814..9a4d3ba52 100644
--- a/numpy/distutils/unixccompiler.py
+++ b/numpy/distutils/unixccompiler.py
@@ -7,7 +7,6 @@ import os
from distutils.errors import DistutilsExecError, CompileError
from distutils.unixccompiler import *
from numpy.distutils.ccompiler import replace_method
-from numpy.distutils.compat import get_exception
from numpy.distutils.misc_util import _commandline_dep_string
if sys.version_info[0] < 3:
@@ -54,8 +53,8 @@ def UnixCCompiler__compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts
try:
self.spawn(self.compiler_so + cc_args + [src, '-o', obj] + deps +
extra_postargs, display = display)
- except DistutilsExecError:
- msg = str(get_exception())
+ except DistutilsExecError as e:
+ msg = str(e)
raise CompileError(msg)
# add commandline flags to dependency file
@@ -126,8 +125,8 @@ def UnixCCompiler_create_static_lib(self, objects, output_libname,
try:
self.spawn(self.ranlib + [output_filename],
display = display)
- except DistutilsExecError:
- msg = str(get_exception())
+ except DistutilsExecError as e:
+ msg = str(e)
raise LibError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
diff --git a/numpy/doc/glossary.py b/numpy/doc/glossary.py
index 2431516a8..6d2e0010f 100644
--- a/numpy/doc/glossary.py
+++ b/numpy/doc/glossary.py
@@ -182,7 +182,7 @@ Glossary
instance
A class definition gives the blueprint for constructing an object::
- >>> class House(object):
+ >>> class House:
... wall_colour = 'white'
Yet, we have to *build* a house before it exists::
@@ -336,7 +336,7 @@ Glossary
Often seen in method signatures, ``self`` refers to the instance
of the associated class. For example:
- >>> class Paintbrush(object):
+ >>> class Paintbrush:
... color = 'blue'
...
... def paint(self):
diff --git a/numpy/doc/subclassing.py b/numpy/doc/subclassing.py
index 7ef426f5b..5a54ddd90 100644
--- a/numpy/doc/subclassing.py
+++ b/numpy/doc/subclassing.py
@@ -114,7 +114,7 @@ For example, consider the following Python code:
.. testcode::
- class C(object):
+ class C:
def __new__(cls, *args):
print('Cls in __new__:', cls)
print('Args in __new__:', args)
@@ -454,7 +454,7 @@ following.
input numpy as np
class A(np.ndarray):
- def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+ def __array_ufunc__(self, ufunc, method, *inputs, out=None, **kwargs):
args = []
in_no = []
for i, input_ in enumerate(inputs):
@@ -464,7 +464,7 @@ following.
else:
args.append(input_)
- outputs = kwargs.pop('out', None)
+ outputs = out
out_no = []
if outputs:
out_args = []
diff --git a/numpy/f2py/auxfuncs.py b/numpy/f2py/auxfuncs.py
index d985e6e36..31802621e 100644
--- a/numpy/f2py/auxfuncs.py
+++ b/numpy/f2py/auxfuncs.py
@@ -550,7 +550,7 @@ class F2PYError(Exception):
pass
-class throw_error(object):
+class throw_error:
def __init__(self, mess):
self.mess = mess
diff --git a/numpy/f2py/src/fortranobject.c b/numpy/f2py/src/fortranobject.c
index 973741ca7..8ec5b510f 100644
--- a/numpy/f2py/src/fortranobject.c
+++ b/numpy/f2py/src/fortranobject.c
@@ -312,13 +312,11 @@ fortran_getattr(PyFortranObject *fp, char *name) {
return NULL;
return cobj;
}
- if (1) {
- PyObject *str, *ret;
- str = PyUnicode_FromString(name);
- ret = PyObject_GenericGetAttr((PyObject *)fp, str);
- Py_DECREF(str);
- return ret;
- }
+ PyObject *str, *ret;
+ str = PyUnicode_FromString(name);
+ ret = PyObject_GenericGetAttr((PyObject *)fp, str);
+ Py_DECREF(str);
+ return ret;
}
static int
@@ -590,7 +588,7 @@ count_negative_dimensions(const int rank,
}
#ifdef DEBUG_COPY_ND_ARRAY
-void dump_dims(int rank, npy_intp* dims) {
+void dump_dims(int rank, npy_intp const* dims) {
int i;
printf("[");
for(i=0;i<rank;++i) {
@@ -1017,8 +1015,6 @@ int copy_ND_array(const PyArrayObject *arr, PyArrayObject *out)
/* Compatibility functions for Python >= 3.0 */
/*********************************************/
-#if PY_VERSION_HEX >= 0x03000000
-
PyObject *
F2PyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *))
{
@@ -1045,29 +1041,6 @@ F2PyCapsule_Check(PyObject *ptr)
return PyCapsule_CheckExact(ptr);
}
-#else
-
-PyObject *
-F2PyCapsule_FromVoidPtr(void *ptr, void (*dtor)(void *))
-{
- return PyCObject_FromVoidPtr(ptr, dtor);
-}
-
-void *
-F2PyCapsule_AsVoidPtr(PyObject *ptr)
-{
- return PyCObject_AsVoidPtr(ptr);
-}
-
-int
-F2PyCapsule_Check(PyObject *ptr)
-{
- return PyCObject_Check(ptr);
-}
-
-#endif
-
-
#ifdef __cplusplus
}
#endif
diff --git a/numpy/f2py/tests/test_array_from_pyobj.py b/numpy/f2py/tests/test_array_from_pyobj.py
index c5757dba1..b719f2495 100644
--- a/numpy/f2py/tests/test_array_from_pyobj.py
+++ b/numpy/f2py/tests/test_array_from_pyobj.py
@@ -55,7 +55,7 @@ def flags2names(flags):
return info
-class Intent(object):
+class Intent:
def __init__(self, intent_list=[]):
self.intent_list = intent_list[:]
@@ -129,7 +129,7 @@ if ((intp().dtype.itemsize != 4 or clongdouble().dtype.alignment <= 8) and
_cast_dict['CDOUBLE'] = _cast_dict['DOUBLE'] + ['CFLOAT', 'CDOUBLE']
-class Type(object):
+class Type:
_type_cache = {}
def __new__(cls, name):
@@ -190,7 +190,7 @@ class Type(object):
return types
-class Array(object):
+class Array:
def __init__(self, typ, dims, intent, obj):
self.type = typ
@@ -291,7 +291,7 @@ class Array(object):
return obj_attr[0] == self.arr_attr[0]
-class TestIntent(object):
+class TestIntent:
def test_in_out(self):
assert_equal(str(intent.in_.out), 'intent(in,out)')
@@ -302,7 +302,7 @@ class TestIntent(object):
assert_(not intent.in_.is_intent('c'))
-class TestSharedMemory(object):
+class TestSharedMemory:
num2seq = [1, 2]
num23seq = [[1, 2, 3], [4, 5, 6]]
diff --git a/numpy/f2py/tests/test_callback.py b/numpy/f2py/tests/test_callback.py
index e4e61f450..7629df605 100644
--- a/numpy/f2py/tests/test_callback.py
+++ b/numpy/f2py/tests/test_callback.py
@@ -116,7 +116,7 @@ cf2py intent(out) a
r = t(self.module.func0._cpointer)
assert_(r == 11, repr(r))
- class A(object):
+ class A:
def __call__(self):
return 7
diff --git a/numpy/f2py/tests/util.py b/numpy/f2py/tests/util.py
index c4fcea8d1..c430a6608 100644
--- a/numpy/f2py/tests/util.py
+++ b/numpy/f2py/tests/util.py
@@ -313,7 +313,7 @@ def build_module_distutils(source_files, config_code, module_name, **kw):
#
-class F2PyTest(object):
+class F2PyTest:
code = None
sources = None
options = []
diff --git a/numpy/fft/tests/test_helper.py b/numpy/fft/tests/test_helper.py
index dd24139f2..2b457271b 100644
--- a/numpy/fft/tests/test_helper.py
+++ b/numpy/fft/tests/test_helper.py
@@ -8,7 +8,7 @@ from numpy.testing import assert_array_almost_equal, assert_equal
from numpy import fft, pi
-class TestFFTShift(object):
+class TestFFTShift:
def test_definition(self):
x = [0, 1, 2, 3, 4, -4, -3, -2, -1]
@@ -134,7 +134,7 @@ class TestFFTShift(object):
original_ifftshift(inp, axes_keyword))
-class TestFFTFreq(object):
+class TestFFTFreq:
def test_definition(self):
x = [0, 1, 2, 3, 4, -4, -3, -2, -1]
@@ -145,7 +145,7 @@ class TestFFTFreq(object):
assert_array_almost_equal(10*pi*fft.fftfreq(10, pi), x)
-class TestRFFTFreq(object):
+class TestRFFTFreq:
def test_definition(self):
x = [0, 1, 2, 3, 4]
@@ -156,7 +156,7 @@ class TestRFFTFreq(object):
assert_array_almost_equal(10*pi*fft.rfftfreq(10, pi), x)
-class TestIRFFTN(object):
+class TestIRFFTN:
def test_not_last_axis_success(self):
ar, ai = np.random.random((2, 16, 8, 32))
diff --git a/numpy/fft/tests/test_pocketfft.py b/numpy/fft/tests/test_pocketfft.py
index 0aa8d0912..da31ec09e 100644
--- a/numpy/fft/tests/test_pocketfft.py
+++ b/numpy/fft/tests/test_pocketfft.py
@@ -19,13 +19,13 @@ def fft1(x):
return np.sum(x*np.exp(phase), axis=1)
-class TestFFTShift(object):
+class TestFFTShift:
def test_fft_n(self):
assert_raises(ValueError, np.fft.fft, [1, 2, 3], 0)
-class TestFFT1D(object):
+class TestFFT1D:
def test_identity(self):
maxlen = 512
@@ -220,7 +220,7 @@ def test_fft_with_order(dtype, order, fft):
raise ValueError()
-class TestFFTThreadSafe(object):
+class TestFFTThreadSafe:
threads = 16
input_shape = (800, 200)
diff --git a/numpy/lib/_datasource.py b/numpy/lib/_datasource.py
index 8d1b8339a..aa793958e 100644
--- a/numpy/lib/_datasource.py
+++ b/numpy/lib/_datasource.py
@@ -139,7 +139,7 @@ def _python2_gzipopen(fn, mode, encoding, newline):
# deferring the import of lzma, bz2 and gzip until needed
# TODO: .zip support, .tar support?
-class _FileOpeners(object):
+class _FileOpeners:
"""
Container for different methods to open (un-)compressed files.
@@ -268,7 +268,7 @@ def open(path, mode='r', destpath=os.curdir, encoding=None, newline=None):
@set_module('numpy')
-class DataSource(object):
+class DataSource:
"""
DataSource(destpath='.')
diff --git a/numpy/lib/_iotools.py b/numpy/lib/_iotools.py
index f612cbe94..dd6e9ec66 100644
--- a/numpy/lib/_iotools.py
+++ b/numpy/lib/_iotools.py
@@ -130,7 +130,7 @@ def flatten_dtype(ndtype, flatten_base=False):
return types
-class LineSplitter(object):
+class LineSplitter:
"""
Object to split a string at a given delimiter or at given places.
@@ -231,7 +231,7 @@ class LineSplitter(object):
return self._handyman(_decode_line(line, self.encoding))
-class NameValidator(object):
+class NameValidator:
"""
Object to validate a list of strings to use as field names.
@@ -454,7 +454,7 @@ class ConversionWarning(UserWarning):
pass
-class StringConverter(object):
+class StringConverter:
"""
Factory class for function transforming a string into another object
(int, float).
diff --git a/numpy/lib/arrayterator.py b/numpy/lib/arrayterator.py
index 0727c7a3e..15b0f8aa9 100644
--- a/numpy/lib/arrayterator.py
+++ b/numpy/lib/arrayterator.py
@@ -15,7 +15,7 @@ from numpy.compat import long
__all__ = ['Arrayterator']
-class Arrayterator(object):
+class Arrayterator:
"""
Buffered iterator for big arrays.
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index 3b0a6783b..ef8a26fe3 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -770,14 +770,14 @@ def copy(a, order='K'):
# Basic operations
-def _gradient_dispatcher(f, *varargs, **kwargs):
+def _gradient_dispatcher(f, *varargs, axis=None, edge_order=None):
yield f
for v in varargs:
yield v
@array_function_dispatch(_gradient_dispatcher)
-def gradient(f, *varargs, **kwargs):
+def gradient(f, *varargs, axis=None, edge_order=1):
"""
Return the gradient of an N-dimensional array.
@@ -954,11 +954,10 @@ def gradient(f, *varargs, **kwargs):
f = np.asanyarray(f)
N = f.ndim # number of dimensions
- axes = kwargs.pop('axis', None)
- if axes is None:
+ if axis is None:
axes = tuple(range(N))
else:
- axes = _nx.normalize_axis_tuple(axes, N)
+ axes = _nx.normalize_axis_tuple(axis, N)
len_axes = len(axes)
n = len(varargs)
@@ -993,10 +992,6 @@ def gradient(f, *varargs, **kwargs):
else:
raise TypeError("invalid number of arguments")
- edge_order = kwargs.pop('edge_order', 1)
- if kwargs:
- raise TypeError('"{}" are not valid keyword arguments.'.format(
- '", "'.join(kwargs.keys())))
if edge_order > 2:
raise ValueError("'edge_order' greater than 2 not supported")
@@ -1870,7 +1865,7 @@ def _create_arrays(broadcast_shape, dim_sizes, list_of_core_dims, dtypes):
@set_module('numpy')
-class vectorize(object):
+class vectorize:
"""
vectorize(pyfunc, otypes=None, doc=None, excluded=None, cache=False,
signature=None)
@@ -4063,13 +4058,13 @@ def trapz(y, x=None, dx=1.0, axis=-1):
return ret
-def _meshgrid_dispatcher(*xi, **kwargs):
+def _meshgrid_dispatcher(*xi, copy=None, sparse=None, indexing=None):
return xi
# Based on scitools meshgrid
@array_function_dispatch(_meshgrid_dispatcher)
-def meshgrid(*xi, **kwargs):
+def meshgrid(*xi, copy=True, sparse=False, indexing='xy'):
"""
Return coordinate matrices from coordinate vectors.
@@ -4175,14 +4170,6 @@ def meshgrid(*xi, **kwargs):
"""
ndim = len(xi)
- copy_ = kwargs.pop('copy', True)
- sparse = kwargs.pop('sparse', False)
- indexing = kwargs.pop('indexing', 'xy')
-
- if kwargs:
- raise TypeError("meshgrid() got an unexpected keyword argument '%s'"
- % (list(kwargs)[0],))
-
if indexing not in ['xy', 'ij']:
raise ValueError(
"Valid values for `indexing` are 'xy' and 'ij'.")
@@ -4200,7 +4187,7 @@ def meshgrid(*xi, **kwargs):
# Return the full N-D matrix (not only the 1-D vector)
output = np.broadcast_arrays(*output, subok=True)
- if copy_:
+ if copy:
output = [x.copy() for x in output]
return output
diff --git a/numpy/lib/index_tricks.py b/numpy/lib/index_tricks.py
index f6a6f922c..0560bd36d 100644
--- a/numpy/lib/index_tricks.py
+++ b/numpy/lib/index_tricks.py
@@ -105,7 +105,7 @@ def ix_(*args):
out.append(new)
return tuple(out)
-class nd_grid(object):
+class nd_grid:
"""
Construct a multi-dimensional "meshgrid".
@@ -297,7 +297,7 @@ class OGridClass(nd_grid):
ogrid = OGridClass()
-class AxisConcatenator(object):
+class AxisConcatenator:
"""
Translates slice objects to concatenation along an axis.
@@ -550,7 +550,7 @@ c_ = CClass()
@set_module('numpy')
-class ndenumerate(object):
+class ndenumerate:
"""
Multidimensional index iterator.
@@ -601,7 +601,7 @@ class ndenumerate(object):
@set_module('numpy')
-class ndindex(object):
+class ndindex:
"""
An N-dimensional iterator object to index arrays.
@@ -679,7 +679,7 @@ class ndindex(object):
#
#
-class IndexExpression(object):
+class IndexExpression:
"""
A nicer way to build up index tuples for arrays.
diff --git a/numpy/lib/mixins.py b/numpy/lib/mixins.py
index dd17adb41..d4811b94d 100644
--- a/numpy/lib/mixins.py
+++ b/numpy/lib/mixins.py
@@ -58,7 +58,7 @@ def _unary_method(ufunc, name):
return func
-class NDArrayOperatorsMixin(object):
+class NDArrayOperatorsMixin:
"""Mixin defining all operator special methods using __array_ufunc__.
This class implements the special methods for almost all of Python's
diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py
index c85db2922..c47e388c0 100644
--- a/numpy/lib/npyio.py
+++ b/numpy/lib/npyio.py
@@ -53,7 +53,7 @@ array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
-class BagObj(object):
+class BagObj:
"""
BagObj(obj)
@@ -67,7 +67,7 @@ class BagObj(object):
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
- >>> class BagDemo(object):
+ >>> class BagDemo:
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
@@ -1334,7 +1334,7 @@ def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
fmt = asstr(fmt)
delimiter = asstr(delimiter)
- class WriteWrap(object):
+ class WriteWrap:
"""Convert to unicode in py2 or to bytes on bytestream inputs.
"""
diff --git a/numpy/lib/polynomial.py b/numpy/lib/polynomial.py
index c2b615e0e..5a0fa5431 100644
--- a/numpy/lib/polynomial.py
+++ b/numpy/lib/polynomial.py
@@ -1005,7 +1005,7 @@ def _raise_power(astr, wrap=70):
@set_module('numpy')
-class poly1d(object):
+class poly1d:
"""
A one-dimensional polynomial class.
diff --git a/numpy/lib/stride_tricks.py b/numpy/lib/stride_tricks.py
index b65706dd5..502235bdf 100644
--- a/numpy/lib/stride_tricks.py
+++ b/numpy/lib/stride_tricks.py
@@ -11,7 +11,7 @@ from numpy.core.overrides import array_function_dispatch
__all__ = ['broadcast_to', 'broadcast_arrays']
-class DummyArray(object):
+class DummyArray:
"""Dummy object that just exists to hang __array_interface__ dictionaries
and possibly keep alive a reference to a base array.
"""
@@ -197,12 +197,12 @@ def _broadcast_shape(*args):
return b.shape
-def _broadcast_arrays_dispatcher(*args, **kwargs):
+def _broadcast_arrays_dispatcher(*args, subok=None):
return args
@array_function_dispatch(_broadcast_arrays_dispatcher, module='numpy')
-def broadcast_arrays(*args, **kwargs):
+def broadcast_arrays(*args, subok=False):
"""
Broadcast any number of arrays against each other.
@@ -253,10 +253,6 @@ def broadcast_arrays(*args, **kwargs):
# return np.nditer(args, flags=['multi_index', 'zerosize_ok'],
# order='C').itviews
- subok = kwargs.pop('subok', False)
- if kwargs:
- raise TypeError('broadcast_arrays() got an unexpected keyword '
- 'argument {!r}'.format(list(kwargs.keys())[0]))
args = [np.array(_m, copy=False, subok=subok) for _m in args]
shape = _broadcast_shape(*args)
diff --git a/numpy/lib/tests/test__datasource.py b/numpy/lib/tests/test__datasource.py
index e7c14b807..fdd22347d 100644
--- a/numpy/lib/tests/test__datasource.py
+++ b/numpy/lib/tests/test__datasource.py
@@ -94,7 +94,7 @@ def invalid_httpfile():
return http_fakefile
-class TestDataSourceOpen(object):
+class TestDataSourceOpen:
def setup(self):
self.tmpdir = mkdtemp()
self.ds = datasource.DataSource(self.tmpdir)
@@ -181,7 +181,7 @@ class TestDataSourceOpen(object):
assert_equal(magic_line, result)
-class TestDataSourceExists(object):
+class TestDataSourceExists:
def setup(self):
self.tmpdir = mkdtemp()
self.ds = datasource.DataSource(self.tmpdir)
@@ -211,7 +211,7 @@ class TestDataSourceExists(object):
assert_equal(self.ds.exists(tmpfile), False)
-class TestDataSourceAbspath(object):
+class TestDataSourceAbspath:
def setup(self):
self.tmpdir = os.path.abspath(mkdtemp())
self.ds = datasource.DataSource(self.tmpdir)
@@ -276,7 +276,7 @@ class TestDataSourceAbspath(object):
os.sep = orig_os_sep
-class TestRepositoryAbspath(object):
+class TestRepositoryAbspath:
def setup(self):
self.tmpdir = os.path.abspath(mkdtemp())
self.repos = datasource.Repository(valid_baseurl(), self.tmpdir)
@@ -309,7 +309,7 @@ class TestRepositoryAbspath(object):
os.sep = orig_os_sep
-class TestRepositoryExists(object):
+class TestRepositoryExists:
def setup(self):
self.tmpdir = mkdtemp()
self.repos = datasource.Repository(valid_baseurl(), self.tmpdir)
@@ -342,7 +342,7 @@ class TestRepositoryExists(object):
assert_(self.repos.exists(tmpfile))
-class TestOpenFunc(object):
+class TestOpenFunc:
def setup(self):
self.tmpdir = mkdtemp()
diff --git a/numpy/lib/tests/test__iotools.py b/numpy/lib/tests/test__iotools.py
index 24c2533e8..1d69d869e 100644
--- a/numpy/lib/tests/test__iotools.py
+++ b/numpy/lib/tests/test__iotools.py
@@ -12,7 +12,7 @@ from numpy.lib._iotools import (
from numpy.compat import unicode
-class TestLineSplitter(object):
+class TestLineSplitter:
"Tests the LineSplitter class."
def test_no_delimiter(self):
@@ -81,7 +81,7 @@ class TestLineSplitter(object):
# -----------------------------------------------------------------------------
-class TestNameValidator(object):
+class TestNameValidator:
def test_case_sensitivity(self):
"Test case sensitivity"
@@ -139,7 +139,7 @@ def _bytes_to_date(s):
return date(*time.strptime(s, "%Y-%m-%d")[:3])
-class TestStringConverter(object):
+class TestStringConverter:
"Test StringConverter"
def test_creation(self):
@@ -264,7 +264,7 @@ class TestStringConverter(object):
assert_(converter(val) == 9223372043271415339)
-class TestMiscFunctions(object):
+class TestMiscFunctions:
def test_has_nested_dtype(self):
"Test has_nested_dtype"
diff --git a/numpy/lib/tests/test_arraypad.py b/numpy/lib/tests/test_arraypad.py
index 6ae3ef5c0..cd75b4ac4 100644
--- a/numpy/lib/tests/test_arraypad.py
+++ b/numpy/lib/tests/test_arraypad.py
@@ -29,7 +29,7 @@ _all_modes = {
}
-class TestAsPairs(object):
+class TestAsPairs:
def test_single_value(self):
"""Test casting for a single value."""
expected = np.array([[3, 3]] * 10)
@@ -112,7 +112,7 @@ class TestAsPairs(object):
_as_pairs(np.ones((2, 3)), 3)
-class TestConditionalShortcuts(object):
+class TestConditionalShortcuts:
@pytest.mark.parametrize("mode", _all_modes.keys())
def test_zero_padding_shortcuts(self, mode):
test = np.arange(120).reshape(4, 5, 6)
@@ -134,7 +134,7 @@ class TestConditionalShortcuts(object):
np.pad(test, pad_amt, mode=mode, stat_length=30))
-class TestStatistic(object):
+class TestStatistic:
def test_check_mean_stat_length(self):
a = np.arange(100).astype('f')
a = np.pad(a, ((25, 20), ), 'mean', stat_length=((2, 3), ))
@@ -496,7 +496,7 @@ class TestStatistic(object):
np.pad([1., 2.], 1, mode, stat_length=(1, 0))
-class TestConstant(object):
+class TestConstant:
def test_check_constant(self):
a = np.arange(100)
a = np.pad(a, (25, 20), 'constant', constant_values=(10, 20))
@@ -675,7 +675,7 @@ class TestConstant(object):
assert result.shape == (3, 4, 4)
-class TestLinearRamp(object):
+class TestLinearRamp:
def test_check_simple(self):
a = np.arange(100).astype('f')
a = np.pad(a, (25, 20), 'linear_ramp', end_values=(4, 5))
@@ -760,7 +760,7 @@ class TestLinearRamp(object):
assert_equal(result, expected)
-class TestReflect(object):
+class TestReflect:
def test_check_simple(self):
a = np.arange(100)
a = np.pad(a, (25, 20), 'reflect')
@@ -870,7 +870,7 @@ class TestReflect(object):
assert_array_equal(a, b)
-class TestEmptyArray(object):
+class TestEmptyArray:
"""Check how padding behaves on arrays with an empty dimension."""
@pytest.mark.parametrize(
@@ -894,7 +894,7 @@ class TestEmptyArray(object):
assert result.shape == (8, 0, 4)
-class TestSymmetric(object):
+class TestSymmetric:
def test_check_simple(self):
a = np.arange(100)
a = np.pad(a, (25, 20), 'symmetric')
@@ -1028,7 +1028,7 @@ class TestSymmetric(object):
assert_array_equal(a, b)
-class TestWrap(object):
+class TestWrap:
def test_check_simple(self):
a = np.arange(100)
a = np.pad(a, (25, 20), 'wrap')
@@ -1142,7 +1142,7 @@ class TestWrap(object):
assert_array_equal(np.r_[a, a, a, a][:-3], b)
-class TestEdge(object):
+class TestEdge:
def test_check_simple(self):
a = np.arange(12)
a = np.reshape(a, (4, 3))
@@ -1181,7 +1181,7 @@ class TestEdge(object):
assert_array_equal(padded, expected)
-class TestEmpty(object):
+class TestEmpty:
def test_simple(self):
arr = np.arange(24).reshape(4, 6)
result = np.pad(arr, [(2, 3), (3, 1)], mode="empty")
@@ -1229,7 +1229,7 @@ def test_object_input(mode):
assert_array_equal(np.pad(a, pad_amt, mode=mode), b)
-class TestPadWidth(object):
+class TestPadWidth:
@pytest.mark.parametrize("pad_width", [
(4, 5, 6, 7),
((1,), (2,), (3,)),
diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py
index 992a37cef..851fd31ea 100644
--- a/numpy/lib/tests/test_arraysetops.py
+++ b/numpy/lib/tests/test_arraysetops.py
@@ -12,7 +12,7 @@ import pytest
-class TestSetOps(object):
+class TestSetOps:
def test_intersect1d(self):
# unique inputs
@@ -34,7 +34,7 @@ class TestSetOps(object):
def test_intersect1d_array_like(self):
# See gh-11772
- class Test(object):
+ class Test:
def __array__(self):
return np.arange(3)
@@ -413,7 +413,7 @@ class TestSetOps(object):
assert_array_equal(c1, c2)
-class TestUnique(object):
+class TestUnique:
def test_unique_1d(self):
diff --git a/numpy/lib/tests/test_financial.py b/numpy/lib/tests/test_financial.py
index e7a71c722..26e79bc06 100644
--- a/numpy/lib/tests/test_financial.py
+++ b/numpy/lib/tests/test_financial.py
@@ -16,7 +16,7 @@ def filter_deprecation(func):
return newfunc
-class TestFinancial(object):
+class TestFinancial:
@filter_deprecation
def test_npv_irr_congruence(self):
# IRR is defined as the rate required for the present value of a
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index 42a8c591c..dfce2d55d 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -42,7 +42,7 @@ def _make_complex(real, imag):
return ret
-class TestRot90(object):
+class TestRot90:
def test_basic(self):
assert_raises(ValueError, rot90, np.ones(4))
assert_raises(ValueError, rot90, np.ones((2,2,2)), axes=(0,1,2))
@@ -110,7 +110,7 @@ class TestRot90(object):
rot90(a_rot90_20, k=k-1, axes=(2, 0)))
-class TestFlip(object):
+class TestFlip:
def test_axes(self):
assert_raises(np.AxisError, np.flip, np.ones(4), axis=1)
@@ -213,7 +213,7 @@ class TestFlip(object):
assert_equal(np.flip(a, axis=(1, 2)), c)
-class TestAny(object):
+class TestAny:
def test_basic(self):
y1 = [0, 0, 1, 0]
@@ -230,7 +230,7 @@ class TestAny(object):
assert_array_equal(np.sometrue(y1, axis=1), [0, 1, 1])
-class TestAll(object):
+class TestAll:
def test_basic(self):
y1 = [0, 1, 1, 0]
@@ -248,7 +248,7 @@ class TestAll(object):
assert_array_equal(np.alltrue(y1, axis=1), [0, 0, 1])
-class TestCopy(object):
+class TestCopy:
def test_basic(self):
a = np.array([[1, 2], [3, 4]])
@@ -276,7 +276,7 @@ class TestCopy(object):
assert_(a_fort_copy.flags.f_contiguous)
-class TestAverage(object):
+class TestAverage:
def test_basic(self):
y1 = np.array([1, 2, 3])
@@ -377,7 +377,7 @@ class TestAverage(object):
w /= w.sum()
assert_almost_equal(a.mean(0), average(a, weights=w))
-class TestSelect(object):
+class TestSelect:
choices = [np.array([1, 2, 3]),
np.array([4, 5, 6]),
np.array([7, 8, 9])]
@@ -439,7 +439,7 @@ class TestSelect(object):
select(conditions, choices)
-class TestInsert(object):
+class TestInsert:
def test_basic(self):
a = [1, 2, 3]
@@ -540,7 +540,7 @@ class TestInsert(object):
assert_array_equal(b[[0, 3]], np.array(val, dtype=b.dtype))
-class TestAmax(object):
+class TestAmax:
def test_basic(self):
a = [3, 4, 5, 10, -3, -5, 6.0]
@@ -552,7 +552,7 @@ class TestAmax(object):
assert_equal(np.amax(b, axis=1), [9.0, 10.0, 8.0])
-class TestAmin(object):
+class TestAmin:
def test_basic(self):
a = [3, 4, 5, 10, -3, -5, 6.0]
@@ -564,7 +564,7 @@ class TestAmin(object):
assert_equal(np.amin(b, axis=1), [3.0, 4.0, 2.0])
-class TestPtp(object):
+class TestPtp:
def test_basic(self):
a = np.array([3, 4, 5, 10, -3, -5, 6.0])
@@ -579,7 +579,7 @@ class TestPtp(object):
assert_equal(b.ptp(axis=(0,1), keepdims=True), [[8.0]])
-class TestCumsum(object):
+class TestCumsum:
def test_basic(self):
ba = [1, 2, 10, 11, 6, 5, 4]
@@ -602,7 +602,7 @@ class TestCumsum(object):
assert_array_equal(np.cumsum(a2, axis=1), tgt)
-class TestProd(object):
+class TestProd:
def test_basic(self):
ba = [1, 2, 10, 11, 6, 5, 4]
@@ -622,7 +622,7 @@ class TestProd(object):
np.array([24, 1890, 600], ctype))
-class TestCumprod(object):
+class TestCumprod:
def test_basic(self):
ba = [1, 2, 10, 11, 6, 5, 4]
@@ -649,7 +649,7 @@ class TestCumprod(object):
[10, 30, 120, 600]], ctype))
-class TestDiff(object):
+class TestDiff:
def test_basic(self):
x = [1, 4, 6, 7, 12]
@@ -789,7 +789,7 @@ class TestDiff(object):
assert_raises(np.AxisError, diff, x, append=0, axis=3)
-class TestDelete(object):
+class TestDelete:
def setup(self):
self.a = np.arange(5)
@@ -864,7 +864,7 @@ class TestDelete(object):
assert_equal(m.flags.f_contiguous, k.flags.f_contiguous)
-class TestGradient(object):
+class TestGradient:
def test_basic(self):
v = [[1, 1], [3, 4]]
@@ -1116,7 +1116,7 @@ class TestGradient(object):
assert_array_equal(dfdx, [0.5, 0.5])
-class TestAngle(object):
+class TestAngle:
def test_basic(self):
x = [1 + 3j, np.sqrt(2) / 2.0 + 1j * np.sqrt(2) / 2,
@@ -1142,7 +1142,7 @@ class TestAngle(object):
assert_equal(actual, expected)
-class TestTrimZeros(object):
+class TestTrimZeros:
"""
Only testing for integer splits.
@@ -1165,7 +1165,7 @@ class TestTrimZeros(object):
assert_array_equal(res, np.array([1, 0, 2, 3, 0, 4]))
-class TestExtins(object):
+class TestExtins:
def test_basic(self):
a = np.array([1, 3, 2, 1, 2, 3, 3])
@@ -1204,7 +1204,7 @@ class TestExtins(object):
assert_array_equal(a, ac)
-class TestVectorize(object):
+class TestVectorize:
def test_simple(self):
def addsubtract(a, b):
@@ -1536,8 +1536,8 @@ class TestVectorize(object):
f(x)
-class TestLeaks(object):
- class A(object):
+class TestLeaks:
+ class A:
iters = 20
def bound(self, *args):
@@ -1579,7 +1579,7 @@ class TestLeaks(object):
finally:
gc.enable()
-class TestDigitize(object):
+class TestDigitize:
def test_forward(self):
x = np.arange(-6, 5)
@@ -1664,7 +1664,7 @@ class TestDigitize(object):
assert_equal(np.digitize(x, [x + 1, x - 1]), 1)
-class TestUnwrap(object):
+class TestUnwrap:
def test_simple(self):
# check that unwrap removes jumps greater that 2*pi
@@ -1673,7 +1673,7 @@ class TestUnwrap(object):
assert_(np.all(diff(unwrap(rand(10) * 100)) < np.pi))
-class TestFilterwindows(object):
+class TestFilterwindows:
def test_hanning(self):
# check symmetry
@@ -1704,7 +1704,7 @@ class TestFilterwindows(object):
assert_almost_equal(np.sum(w, axis=0), 3.7800, 4)
-class TestTrapz(object):
+class TestTrapz:
def test_simple(self):
x = np.arange(-10, 10, .1)
@@ -1766,7 +1766,7 @@ class TestTrapz(object):
assert_almost_equal(trapz(y, xm), r)
-class TestSinc(object):
+class TestSinc:
def test_simple(self):
assert_(sinc(0) == 1)
@@ -1783,7 +1783,7 @@ class TestSinc(object):
assert_array_equal(y1, y3)
-class TestUnique(object):
+class TestUnique:
def test_simple(self):
x = np.array([4, 3, 2, 1, 1, 2, 3, 4, 0])
@@ -1795,7 +1795,7 @@ class TestUnique(object):
assert_(np.all(unique(x) == [1 + 1j, 1 + 10j, 5 + 6j, 10]))
-class TestCheckFinite(object):
+class TestCheckFinite:
def test_simple(self):
a = [1, 2, 3]
@@ -1812,7 +1812,7 @@ class TestCheckFinite(object):
assert_(a.dtype == np.float64)
-class TestCorrCoef(object):
+class TestCorrCoef:
A = np.array(
[[0.15391142, 0.18045767, 0.14197213],
[0.70461506, 0.96474128, 0.27906989],
@@ -1897,7 +1897,7 @@ class TestCorrCoef(object):
assert_(np.all(np.abs(c) <= 1.0))
-class TestCov(object):
+class TestCov:
x1 = np.array([[0, 2], [1, 1], [2, 0]]).T
res1 = np.array([[1., -1.], [-1., 1.]])
x2 = np.array([0.0, 1.0, 2.0], ndmin=2)
@@ -1997,7 +1997,7 @@ class TestCov(object):
self.res1)
-class Test_I0(object):
+class Test_I0:
def test_simple(self):
assert_almost_equal(
@@ -2043,7 +2043,7 @@ class Test_I0(object):
assert_array_equal(exp, res)
-class TestKaiser(object):
+class TestKaiser:
def test_simple(self):
assert_(np.isfinite(kaiser(1, 1.0)))
@@ -2062,7 +2062,7 @@ class TestKaiser(object):
kaiser(3, 4)
-class TestMsort(object):
+class TestMsort:
def test_simple(self):
A = np.array([[0.44567325, 0.79115165, 0.54900530],
@@ -2075,7 +2075,7 @@ class TestMsort(object):
[0.64864341, 0.79115165, 0.96098397]]))
-class TestMeshgrid(object):
+class TestMeshgrid:
def test_simple(self):
[X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7])
@@ -2164,7 +2164,7 @@ class TestMeshgrid(object):
assert_equal(x[1, :], X)
-class TestPiecewise(object):
+class TestPiecewise:
def test_simple(self):
# Condition is single bool list
@@ -2256,7 +2256,7 @@ class TestPiecewise(object):
[3., 3., 1.]]))
-class TestBincount(object):
+class TestBincount:
def test_simple(self):
y = np.bincount(np.arange(4))
@@ -2343,7 +2343,7 @@ class TestBincount(object):
assert_equal(sys.getrefcount(np.dtype(np.double)), double_refcount)
-class TestInterp(object):
+class TestInterp:
def test_exceptions(self):
assert_raises(ValueError, interp, 0, [], [])
@@ -2542,7 +2542,7 @@ def compare_results(res, desired):
assert_array_equal(res[i], desired[i])
-class TestPercentile(object):
+class TestPercentile:
def test_basic(self):
x = np.arange(8) * 0.5
@@ -2963,7 +2963,7 @@ class TestPercentile(object):
a, [0.3, 0.6], (0, 2), interpolation='nearest'), b)
-class TestQuantile(object):
+class TestQuantile:
# most of this is already tested by TestPercentile
def test_basic(self):
@@ -3005,7 +3005,7 @@ class TestQuantile(object):
assert_array_equal(p, p0)
-class TestMedian(object):
+class TestMedian:
def test_basic(self):
a0 = np.array(1)
@@ -3244,7 +3244,7 @@ class TestMedian(object):
(1, 1, 7, 1))
-class TestAdd_newdoc_ufunc(object):
+class TestAdd_newdoc_ufunc:
def test_ufunc_arg(self):
assert_raises(TypeError, add_newdoc_ufunc, 2, "blah")
@@ -3254,7 +3254,7 @@ class TestAdd_newdoc_ufunc(object):
assert_raises(TypeError, add_newdoc_ufunc, np.add, 3)
-class TestAdd_newdoc(object):
+class TestAdd_newdoc:
@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO")
@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc")
@@ -3265,7 +3265,7 @@ class TestAdd_newdoc(object):
assert_(len(np.core.ufunc.identity.__doc__) > 300)
assert_(len(np.lib.index_tricks.mgrid.__doc__) > 300)
-class TestSortComplex(object):
+class TestSortComplex:
@pytest.mark.parametrize("type_in, type_out", [
('l', 'D'),
diff --git a/numpy/lib/tests/test_histograms.py b/numpy/lib/tests/test_histograms.py
index a78c3f4ec..c21103891 100644
--- a/numpy/lib/tests/test_histograms.py
+++ b/numpy/lib/tests/test_histograms.py
@@ -9,7 +9,7 @@ from numpy.testing import (
import pytest
-class TestHistogram(object):
+class TestHistogram:
def setup(self):
pass
@@ -422,7 +422,7 @@ class TestHistogram(object):
assert_array_equal(edges, e)
-class TestHistogramOptimBinNums(object):
+class TestHistogramOptimBinNums:
"""
Provide test coverage when using provided estimators for optimal number of
bins
@@ -610,7 +610,7 @@ class TestHistogramOptimBinNums(object):
estimator, weights=[1, 2, 3])
-class TestHistogramdd(object):
+class TestHistogramdd:
def test_simple(self):
x = np.array([[-.5, .5, 1.5], [-.5, 1.5, 2.5], [-.5, 2.5, .5],
diff --git a/numpy/lib/tests/test_index_tricks.py b/numpy/lib/tests/test_index_tricks.py
index bfc37ef9c..905165a99 100644
--- a/numpy/lib/tests/test_index_tricks.py
+++ b/numpy/lib/tests/test_index_tricks.py
@@ -12,7 +12,7 @@ from numpy.lib.index_tricks import (
)
-class TestRavelUnravelIndex(object):
+class TestRavelUnravelIndex:
def test_basic(self):
assert_equal(np.unravel_index(2, (2, 2)), (1, 0))
@@ -192,7 +192,7 @@ class TestRavelUnravelIndex(object):
with assert_raises(ValueError):
np.unravel_index([1], (2, 1, 0))
-class TestGrid(object):
+class TestGrid:
def test_basic(self):
a = mgrid[-1:1:10j]
b = mgrid[-1:1:0.1]
@@ -250,7 +250,7 @@ class TestGrid(object):
assert_equal(grid_small.size, expected[1])
-class TestConcatenator(object):
+class TestConcatenator:
def test_1d(self):
assert_array_equal(r_[1, 2, 3, 4, 5, 6], np.array([1, 2, 3, 4, 5, 6]))
b = np.ones(5)
@@ -288,14 +288,14 @@ class TestConcatenator(object):
assert_equal(r_[np.array(0), [1, 2, 3]], [0, 1, 2, 3])
-class TestNdenumerate(object):
+class TestNdenumerate:
def test_basic(self):
a = np.array([[1, 2], [3, 4]])
assert_equal(list(ndenumerate(a)),
[((0, 0), 1), ((0, 1), 2), ((1, 0), 3), ((1, 1), 4)])
-class TestIndexExpression(object):
+class TestIndexExpression:
def test_regression_1(self):
# ticket #1196
a = np.arange(2)
@@ -309,7 +309,7 @@ class TestIndexExpression(object):
assert_equal(a[:, :3, [1, 2]], a[s_[:, :3, [1, 2]]])
-class TestIx_(object):
+class TestIx_:
def test_regression_1(self):
# Test empty untyped inputs create outputs of indexing type, gh-5804
a, = np.ix_(range(0))
@@ -356,7 +356,7 @@ def test_c_():
assert_equal(a, [[1, 2, 3, 0, 0, 4, 5, 6]])
-class TestFillDiagonal(object):
+class TestFillDiagonal:
def test_basic(self):
a = np.zeros((3, 3), int)
fill_diagonal(a, 5)
@@ -455,7 +455,7 @@ def test_diag_indices():
)
-class TestDiagIndicesFrom(object):
+class TestDiagIndicesFrom:
def test_diag_indices_from(self):
x = np.random.random((4, 4))
diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py
index 12ce65984..ba27eea6c 100644
--- a/numpy/lib/tests/test_io.py
+++ b/numpy/lib/tests/test_io.py
@@ -69,7 +69,7 @@ def strptime(s, fmt=None):
return datetime(*time.strptime(s, fmt)[:3])
-class RoundtripTest(object):
+class RoundtripTest:
def roundtrip(self, save_func, *args, **kwargs):
"""
save_func : callable
@@ -316,7 +316,7 @@ class TestSavezLoad(RoundtripTest):
assert_(fp.closed)
-class TestSaveTxt(object):
+class TestSaveTxt:
def test_array(self):
a = np.array([[1, 2], [3, 4]], float)
fmt = "%.18e"
@@ -582,7 +582,7 @@ class TestSaveTxt(object):
with tempdir() as tmpdir:
np.savez(os.path.join(tmpdir, 'test.npz'), test_data=test_data)
-class LoadTxtBase(object):
+class LoadTxtBase:
def check_compressed(self, fopen, suffixes):
# Test that we can load data from a compressed file
wanted = np.arange(6).reshape((2, 3))
@@ -824,7 +824,7 @@ class TestLoadTxt(LoadTxtBase):
assert_array_equal(x, a[:, 1])
# Testing with some crazy custom integer type
- class CrazyInt(object):
+ class CrazyInt:
def __index__(self):
return 1
@@ -1156,7 +1156,7 @@ class TestLoadTxt(LoadTxtBase):
a = np.array([[1, 2, 3, 5], [4, 5, 7, 8], [2, 1, 4, 5]], int)
assert_array_equal(x, a)
-class Testfromregex(object):
+class Testfromregex:
def test_record(self):
c = TextIO()
c.write('1.312 foo\n1.534 bar\n4.444 qux')
@@ -2347,7 +2347,7 @@ M 33 21.99
@pytest.mark.skipif(Path is None, reason="No pathlib.Path")
-class TestPathUsage(object):
+class TestPathUsage:
# Test that pathlib.Path can be used
def test_loadtxt(self):
with temppath(suffix='.txt') as path:
@@ -2480,7 +2480,7 @@ def test_gzip_load():
# These next two classes encode the minimal API needed to save()/load() arrays.
# The `test_ducktyping` ensures they work correctly
-class JustWriter(object):
+class JustWriter:
def __init__(self, base):
self.base = base
@@ -2490,7 +2490,7 @@ class JustWriter(object):
def flush(self):
return self.base.flush()
-class JustReader(object):
+class JustReader:
def __init__(self, base):
self.base = base
diff --git a/numpy/lib/tests/test_mixins.py b/numpy/lib/tests/test_mixins.py
index 948268e44..7c22dae94 100644
--- a/numpy/lib/tests/test_mixins.py
+++ b/numpy/lib/tests/test_mixins.py
@@ -96,7 +96,7 @@ _ALL_BINARY_OPERATORS = [
]
-class TestNDArrayOperatorsMixin(object):
+class TestNDArrayOperatorsMixin:
def test_array_like_add(self):
@@ -126,7 +126,7 @@ class TestNDArrayOperatorsMixin(object):
def test_opt_out(self):
- class OptOut(object):
+ class OptOut:
"""Object that opts out of __array_ufunc__."""
__array_ufunc__ = None
diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py
index b0a7a04ee..db563e30c 100644
--- a/numpy/lib/tests/test_nanfunctions.py
+++ b/numpy/lib/tests/test_nanfunctions.py
@@ -35,7 +35,7 @@ _ndat_zeros = np.array([[0.6244, 0.0, 0.2692, 0.0116, 0.0, 0.1170],
[0.1610, 0.0, 0.0, 0.1859, 0.3146, 0.0]])
-class TestNanFunctions_MinMax(object):
+class TestNanFunctions_MinMax:
nanfuncs = [np.nanmin, np.nanmax]
stdfuncs = [np.min, np.max]
@@ -169,7 +169,7 @@ class TestNanFunctions_MinMax(object):
assert_(issubclass(w[0].category, RuntimeWarning))
-class TestNanFunctions_ArgminArgmax(object):
+class TestNanFunctions_ArgminArgmax:
nanfuncs = [np.nanargmin, np.nanargmax]
@@ -231,7 +231,7 @@ class TestNanFunctions_ArgminArgmax(object):
assert_(res.shape == ())
-class TestNanFunctions_IntTypes(object):
+class TestNanFunctions_IntTypes:
int_types = (np.int8, np.int16, np.int32, np.int64, np.uint8,
np.uint16, np.uint32, np.uint64)
@@ -306,7 +306,7 @@ class TestNanFunctions_IntTypes(object):
assert_equal(np.nanstd(mat, ddof=1), tgt)
-class SharedNanFunctionsTestsMixin(object):
+class SharedNanFunctionsTestsMixin:
def test_mutation(self):
# Check that passed array is not modified.
ndat = _ndat.copy()
@@ -588,7 +588,7 @@ class TestNanFunctions_MeanVarStd(SharedNanFunctionsTestsMixin):
assert_(len(w) == 0)
-class TestNanFunctions_Median(object):
+class TestNanFunctions_Median:
def test_mutation(self):
# Check that passed array is not modified.
@@ -752,7 +752,7 @@ class TestNanFunctions_Median(object):
([np.nan] * i) + [-inf] * j)
-class TestNanFunctions_Percentile(object):
+class TestNanFunctions_Percentile:
def test_mutation(self):
# Check that passed array is not modified.
@@ -891,7 +891,7 @@ class TestNanFunctions_Percentile(object):
assert_equal(np.nanpercentile(megamat, perc, axis=(1, 2)).shape, (2, 3, 6))
-class TestNanFunctions_Quantile(object):
+class TestNanFunctions_Quantile:
# most of this is already tested by TestPercentile
def test_regression(self):
diff --git a/numpy/lib/tests/test_polynomial.py b/numpy/lib/tests/test_polynomial.py
index 0d827eadf..cd0b90dc4 100644
--- a/numpy/lib/tests/test_polynomial.py
+++ b/numpy/lib/tests/test_polynomial.py
@@ -5,7 +5,7 @@ from numpy.testing import (
)
-class TestPolynomial(object):
+class TestPolynomial:
def test_poly1d_str_and_repr(self):
p = np.poly1d([1., 2, 3])
assert_equal(repr(p), 'poly1d([1., 2., 3.])')
diff --git a/numpy/lib/tests/test_recfunctions.py b/numpy/lib/tests/test_recfunctions.py
index cb4efa6c3..2f3c14df3 100644
--- a/numpy/lib/tests/test_recfunctions.py
+++ b/numpy/lib/tests/test_recfunctions.py
@@ -17,7 +17,7 @@ zip_descr = np.lib.recfunctions._zip_descr
zip_dtype = np.lib.recfunctions._zip_dtype
-class TestRecFunctions(object):
+class TestRecFunctions:
# Misc tests
def setup(self):
@@ -346,7 +346,7 @@ class TestRecFunctions(object):
assert_equal(b[()], 3)
-class TestRecursiveFillFields(object):
+class TestRecursiveFillFields:
# Test recursive_fill_fields.
def test_simple_flexible(self):
# Test recursive_fill_fields on flexible-array
@@ -369,7 +369,7 @@ class TestRecursiveFillFields(object):
assert_equal(test, control)
-class TestMergeArrays(object):
+class TestMergeArrays:
# Test merge_arrays
def setup(self):
@@ -502,7 +502,7 @@ class TestMergeArrays(object):
assert_equal(test, control)
-class TestAppendFields(object):
+class TestAppendFields:
# Test append_fields
def setup(self):
@@ -556,7 +556,7 @@ class TestAppendFields(object):
assert_equal(test, control)
-class TestStackArrays(object):
+class TestStackArrays:
# Test stack_arrays
def setup(self):
x = np.array([1, 2, ])
@@ -727,7 +727,7 @@ class TestStackArrays(object):
assert_equal(res.mask, expected.mask)
-class TestJoinBy(object):
+class TestJoinBy:
def setup(self):
self.a = np.array(list(zip(np.arange(10), np.arange(50, 60),
np.arange(100, 110))),
@@ -892,7 +892,7 @@ class TestJoinBy(object):
assert_equal(res.dtype, expected_dtype)
-class TestJoinBy2(object):
+class TestJoinBy2:
@classmethod
def setup(cls):
cls.a = np.array(list(zip(np.arange(10), np.arange(50, 60),
@@ -957,7 +957,7 @@ class TestJoinBy2(object):
assert_equal(test.dtype, control.dtype)
assert_equal(test, control)
-class TestAppendFieldsObj(object):
+class TestAppendFieldsObj:
"""
Test append_fields with arrays containing objects
"""
diff --git a/numpy/lib/tests/test_regression.py b/numpy/lib/tests/test_regression.py
index 6de89103a..019b7595e 100644
--- a/numpy/lib/tests/test_regression.py
+++ b/numpy/lib/tests/test_regression.py
@@ -9,7 +9,7 @@ from numpy.testing import (
from numpy.compat import unicode
-class TestRegression(object):
+class TestRegression:
def test_poly1d(self):
# Ticket #28
assert_equal(np.poly1d([1]) - np.poly1d([1, 0]),
diff --git a/numpy/lib/tests/test_shape_base.py b/numpy/lib/tests/test_shape_base.py
index 86e3744b8..fb7ba7874 100644
--- a/numpy/lib/tests/test_shape_base.py
+++ b/numpy/lib/tests/test_shape_base.py
@@ -27,7 +27,7 @@ def _add_keepdims(func):
return wrapped
-class TestTakeAlongAxis(object):
+class TestTakeAlongAxis:
def test_argequivalent(self):
""" Test it translates from arg<func> to <func> """
from numpy.random import rand
@@ -79,7 +79,7 @@ class TestTakeAlongAxis(object):
assert_equal(actual.shape, (3, 2, 5))
-class TestPutAlongAxis(object):
+class TestPutAlongAxis:
def test_replace_max(self):
a_base = np.array([[10, 30, 20], [60, 40, 50]])
@@ -104,7 +104,7 @@ class TestPutAlongAxis(object):
assert_equal(take_along_axis(a, ai, axis=1), 20)
-class TestApplyAlongAxis(object):
+class TestApplyAlongAxis:
def test_simple(self):
a = np.ones((20, 10), 'd')
assert_array_equal(
@@ -270,14 +270,14 @@ class TestApplyAlongAxis(object):
assert_equal(type(actual[i]), type(expected[i]))
-class TestApplyOverAxes(object):
+class TestApplyOverAxes:
def test_simple(self):
a = np.arange(24).reshape(2, 3, 4)
aoa_a = apply_over_axes(np.sum, a, [0, 2])
assert_array_equal(aoa_a, np.array([[[60], [92], [124]]]))
-class TestExpandDims(object):
+class TestExpandDims:
def test_functionality(self):
s = (2, 3, 4, 5)
a = np.empty(s)
@@ -317,7 +317,7 @@ class TestExpandDims(object):
assert_equal(expanded.mask.shape, (2, 1, 5))
-class TestArraySplit(object):
+class TestArraySplit:
def test_integer_0_split(self):
a = np.arange(10)
assert_raises(ValueError, array_split, a, 0)
@@ -451,7 +451,7 @@ class TestArraySplit(object):
compare_results(res, desired)
-class TestSplit(object):
+class TestSplit:
# The split function is essentially the same as array_split,
# except that it test if splitting will result in an
# equal split. Only test for this case.
@@ -467,7 +467,7 @@ class TestSplit(object):
assert_raises(ValueError, split, a, 3)
-class TestColumnStack(object):
+class TestColumnStack:
def test_non_iterable(self):
assert_raises(TypeError, column_stack, 1)
@@ -496,7 +496,7 @@ class TestColumnStack(object):
column_stack((np.arange(3) for _ in range(2)))
-class TestDstack(object):
+class TestDstack:
def test_non_iterable(self):
assert_raises(TypeError, dstack, 1)
@@ -535,7 +535,7 @@ class TestDstack(object):
# array_split has more comprehensive test of splitting.
# only do simple test on hsplit, vsplit, and dsplit
-class TestHsplit(object):
+class TestHsplit:
"""Only testing for integer splits.
"""
@@ -564,7 +564,7 @@ class TestHsplit(object):
compare_results(res, desired)
-class TestVsplit(object):
+class TestVsplit:
"""Only testing for integer splits.
"""
@@ -591,7 +591,7 @@ class TestVsplit(object):
compare_results(res, desired)
-class TestDsplit(object):
+class TestDsplit:
# Only testing for integer splits.
def test_non_iterable(self):
assert_raises(ValueError, dsplit, 1, 1)
@@ -624,7 +624,7 @@ class TestDsplit(object):
compare_results(res, desired)
-class TestSqueeze(object):
+class TestSqueeze:
def test_basic(self):
from numpy.random import rand
@@ -643,7 +643,7 @@ class TestSqueeze(object):
assert_equal(type(res), np.ndarray)
-class TestKron(object):
+class TestKron:
def test_return_type(self):
class myarray(np.ndarray):
__array_priority__ = 0.0
@@ -656,7 +656,7 @@ class TestKron(object):
assert_equal(type(kron(ma, a)), myarray)
-class TestTile(object):
+class TestTile:
def test_basic(self):
a = np.array([0, 1, 2])
b = [[1, 2], [3, 4]]
@@ -696,7 +696,7 @@ class TestTile(object):
assert_equal(large, klarge)
-class TestMayShareMemory(object):
+class TestMayShareMemory:
def test_basic(self):
d = np.ones((50, 60))
d2 = np.ones((30, 60, 6))
diff --git a/numpy/lib/tests/test_stride_tricks.py b/numpy/lib/tests/test_stride_tricks.py
index 6131ba5e1..9d95eb9d0 100644
--- a/numpy/lib/tests/test_stride_tricks.py
+++ b/numpy/lib/tests/test_stride_tricks.py
@@ -63,8 +63,7 @@ def test_broadcast_kwargs():
x = np.arange(10)
y = np.arange(10)
- with assert_raises_regex(TypeError,
- r'broadcast_arrays\(\) got an unexpected keyword*'):
+ with assert_raises_regex(TypeError, 'got an unexpected keyword'):
broadcast_arrays(x, y, dtype='float64')
@@ -354,14 +353,12 @@ def as_strided_writeable():
class VerySimpleSubClass(np.ndarray):
def __new__(cls, *args, **kwargs):
- kwargs['subok'] = True
- return np.array(*args, **kwargs).view(cls)
+ return np.array(*args, subok=True, **kwargs).view(cls)
class SimpleSubClass(VerySimpleSubClass):
def __new__(cls, *args, **kwargs):
- kwargs['subok'] = True
- self = np.array(*args, **kwargs).view(cls)
+ self = np.array(*args, subok=True, **kwargs).view(cls)
self.info = 'simple'
return self
diff --git a/numpy/lib/tests/test_twodim_base.py b/numpy/lib/tests/test_twodim_base.py
index 1377794f6..cce683bfe 100644
--- a/numpy/lib/tests/test_twodim_base.py
+++ b/numpy/lib/tests/test_twodim_base.py
@@ -24,7 +24,7 @@ def get_mat(n):
return data
-class TestEye(object):
+class TestEye:
def test_basic(self):
assert_equal(eye(4),
array([[1, 0, 0, 0],
@@ -106,7 +106,7 @@ class TestEye(object):
assert mat_f.flags.f_contiguous
-class TestDiag(object):
+class TestDiag:
def test_vector(self):
vals = (100 * arange(5)).astype('l')
b = zeros((5, 5))
@@ -153,7 +153,7 @@ class TestDiag(object):
assert_raises(ValueError, diag, [[[1]]])
-class TestFliplr(object):
+class TestFliplr:
def test_basic(self):
assert_raises(ValueError, fliplr, ones(4))
a = get_mat(4)
@@ -166,7 +166,7 @@ class TestFliplr(object):
assert_equal(fliplr(a), b)
-class TestFlipud(object):
+class TestFlipud:
def test_basic(self):
a = get_mat(4)
b = a[::-1, :]
@@ -178,7 +178,7 @@ class TestFlipud(object):
assert_equal(flipud(a), b)
-class TestHistogram2d(object):
+class TestHistogram2d:
def test_simple(self):
x = array(
[0.41702200, 0.72032449, 1.1437481e-4, 0.302332573, 0.146755891])
@@ -296,7 +296,7 @@ class TestHistogram2d(object):
assert_(r, ((ShouldDispatch,), (xy, xy), dict(weights=s_d)))
-class TestTri(object):
+class TestTri:
def test_dtype(self):
out = array([[1, 0, 0],
[1, 1, 0],
@@ -434,7 +434,7 @@ def test_tril_indices():
[-10, -10, -10, -10, -10]]))
-class TestTriuIndices(object):
+class TestTriuIndices:
def test_triu_indices(self):
iu1 = triu_indices(4)
iu2 = triu_indices(4, k=2)
@@ -484,21 +484,21 @@ class TestTriuIndices(object):
[16, 17, 18, -1, -1]]))
-class TestTrilIndicesFrom(object):
+class TestTrilIndicesFrom:
def test_exceptions(self):
assert_raises(ValueError, tril_indices_from, np.ones((2,)))
assert_raises(ValueError, tril_indices_from, np.ones((2, 2, 2)))
# assert_raises(ValueError, tril_indices_from, np.ones((2, 3)))
-class TestTriuIndicesFrom(object):
+class TestTriuIndicesFrom:
def test_exceptions(self):
assert_raises(ValueError, triu_indices_from, np.ones((2,)))
assert_raises(ValueError, triu_indices_from, np.ones((2, 2, 2)))
# assert_raises(ValueError, triu_indices_from, np.ones((2, 3)))
-class TestVander(object):
+class TestVander:
def test_basic(self):
c = np.array([0, 1, -2, 3])
v = vander(c)
diff --git a/numpy/lib/tests/test_type_check.py b/numpy/lib/tests/test_type_check.py
index a8af147f1..47685550a 100644
--- a/numpy/lib/tests/test_type_check.py
+++ b/numpy/lib/tests/test_type_check.py
@@ -13,7 +13,7 @@ def assert_all(x):
assert_(np.all(x), x)
-class TestCommonType(object):
+class TestCommonType:
def test_basic(self):
ai32 = np.array([[1, 2], [3, 4]], dtype=np.int32)
af16 = np.array([[1, 2], [3, 4]], dtype=np.float16)
@@ -29,7 +29,7 @@ class TestCommonType(object):
assert_(common_type(acd) == np.cdouble)
-class TestMintypecode(object):
+class TestMintypecode:
def test_default_1(self):
for itype in '1bcsuwil':
@@ -79,7 +79,7 @@ class TestMintypecode(object):
assert_equal(mintypecode('idD'), 'D')
-class TestIsscalar(object):
+class TestIsscalar:
def test_basic(self):
assert_(np.isscalar(3))
@@ -90,7 +90,7 @@ class TestIsscalar(object):
assert_(np.isscalar(4.0))
-class TestReal(object):
+class TestReal:
def test_real(self):
y = np.random.rand(10,)
@@ -121,7 +121,7 @@ class TestReal(object):
assert_(not isinstance(out, np.ndarray))
-class TestImag(object):
+class TestImag:
def test_real(self):
y = np.random.rand(10,)
@@ -152,7 +152,7 @@ class TestImag(object):
assert_(not isinstance(out, np.ndarray))
-class TestIscomplex(object):
+class TestIscomplex:
def test_fail(self):
z = np.array([-1, 0, 1])
@@ -165,7 +165,7 @@ class TestIscomplex(object):
assert_array_equal(res, [1, 0, 0])
-class TestIsreal(object):
+class TestIsreal:
def test_pass(self):
z = np.array([-1, 0, 1j])
@@ -178,7 +178,7 @@ class TestIsreal(object):
assert_array_equal(res, [0, 1, 1])
-class TestIscomplexobj(object):
+class TestIscomplexobj:
def test_basic(self):
z = np.array([-1, 0, 1])
@@ -207,7 +207,7 @@ class TestIscomplexobj(object):
# (pandas.core.dtypes)
class PdComplex(np.complex128):
pass
- class PdDtype(object):
+ class PdDtype:
name = 'category'
names = None
type = PdComplex
@@ -231,7 +231,7 @@ class TestIscomplexobj(object):
assert_(iscomplexobj(a))
-class TestIsrealobj(object):
+class TestIsrealobj:
def test_basic(self):
z = np.array([-1, 0, 1])
assert_(isrealobj(z))
@@ -239,7 +239,7 @@ class TestIsrealobj(object):
assert_(not isrealobj(z))
-class TestIsnan(object):
+class TestIsnan:
def test_goodvalues(self):
z = np.array((-1., 0., 1.))
@@ -269,7 +269,7 @@ class TestIsnan(object):
assert_all(np.isnan(np.array(0+0j)/0.) == 1)
-class TestIsfinite(object):
+class TestIsfinite:
# Fixme, wrong place, isfinite now ufunc
def test_goodvalues(self):
@@ -300,7 +300,7 @@ class TestIsfinite(object):
assert_all(np.isfinite(np.array(1+1j)/0.) == 0)
-class TestIsinf(object):
+class TestIsinf:
# Fixme, wrong place, isinf now ufunc
def test_goodvalues(self):
@@ -329,7 +329,7 @@ class TestIsinf(object):
assert_all(np.isinf(np.array((0.,))/0.) == 0)
-class TestIsposinf(object):
+class TestIsposinf:
def test_generic(self):
with np.errstate(divide='ignore', invalid='ignore'):
@@ -339,7 +339,7 @@ class TestIsposinf(object):
assert_(vals[2] == 1)
-class TestIsneginf(object):
+class TestIsneginf:
def test_generic(self):
with np.errstate(divide='ignore', invalid='ignore'):
@@ -349,7 +349,7 @@ class TestIsneginf(object):
assert_(vals[2] == 0)
-class TestNanToNum(object):
+class TestNanToNum:
def test_generic(self):
with np.errstate(divide='ignore', invalid='ignore'):
@@ -454,7 +454,7 @@ class TestNanToNum(object):
assert_equal(type(vals), np.ndarray)
-class TestRealIfClose(object):
+class TestRealIfClose:
def test_basic(self):
a = np.random.rand(10)
@@ -467,7 +467,7 @@ class TestRealIfClose(object):
assert_all(isrealobj(b))
-class TestArrayConversion(object):
+class TestArrayConversion:
def test_asfarray(self):
a = asfarray(np.array([1, 2, 3]))
diff --git a/numpy/lib/tests/test_ufunclike.py b/numpy/lib/tests/test_ufunclike.py
index 527e093e6..c280b6969 100644
--- a/numpy/lib/tests/test_ufunclike.py
+++ b/numpy/lib/tests/test_ufunclike.py
@@ -6,7 +6,7 @@ from numpy.testing import (
)
-class TestUfunclike(object):
+class TestUfunclike:
def test_isposinf(self):
a = nx.array([nx.inf, -nx.inf, nx.nan, 0.0, 3.0, -3.0])
diff --git a/numpy/lib/tests/test_utils.py b/numpy/lib/tests/test_utils.py
index 20b394912..57c840342 100644
--- a/numpy/lib/tests/test_utils.py
+++ b/numpy/lib/tests/test_utils.py
@@ -100,7 +100,7 @@ def test_safe_eval_nameconstant():
utils.safe_eval('None')
-class TestByteBounds(object):
+class TestByteBounds:
def test_byte_bounds(self):
# pointer difference matches size * itemsize
diff --git a/numpy/lib/user_array.py b/numpy/lib/user_array.py
index 9172d4249..9c266fd6b 100644
--- a/numpy/lib/user_array.py
+++ b/numpy/lib/user_array.py
@@ -14,7 +14,7 @@ from numpy.core import (
from numpy.compat import long
-class container(object):
+class container:
"""
container(data, dtype=None, copy=True)
diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py
index 8b335a9d8..d41a6e541 100644
--- a/numpy/lib/utils.py
+++ b/numpy/lib/utils.py
@@ -53,7 +53,7 @@ def _set_function_name(func, name):
return func
-class _Deprecate(object):
+class _Deprecate:
"""
Decorator class to deprecate old functions.
diff --git a/numpy/linalg/lapack_lite/clapack_scrub.py b/numpy/linalg/lapack_lite/clapack_scrub.py
index 427ccdd14..2ddd083ea 100644
--- a/numpy/linalg/lapack_lite/clapack_scrub.py
+++ b/numpy/linalg/lapack_lite/clapack_scrub.py
@@ -104,7 +104,7 @@ def cleanSource(source):
source = re.sub(r'\n\n\n\n+', r'\n\n\n', source)
return source
-class LineQueue(object):
+class LineQueue:
def __init__(self):
object.__init__(self)
self._queue = []
diff --git a/numpy/linalg/lapack_lite/fortran.py b/numpy/linalg/lapack_lite/fortran.py
index 34bcdbd4e..388c88daa 100644
--- a/numpy/linalg/lapack_lite/fortran.py
+++ b/numpy/linalg/lapack_lite/fortran.py
@@ -24,7 +24,7 @@ def lineType(line):
else:
return STATEMENT
-class LineIterator(object):
+class LineIterator:
"""LineIterator(iterable)
Return rstrip()'d lines from iterable, while keeping a count of the
@@ -47,7 +47,7 @@ class LineIterator(object):
next = __next__
-class PushbackIterator(object):
+class PushbackIterator:
"""PushbackIterator(iterable)
Return an iterator for which items can be pushed back into.
diff --git a/numpy/linalg/lapack_lite/make_lite.py b/numpy/linalg/lapack_lite/make_lite.py
index bbed177ee..4b1a0ad82 100755
--- a/numpy/linalg/lapack_lite/make_lite.py
+++ b/numpy/linalg/lapack_lite/make_lite.py
@@ -63,7 +63,7 @@ them.
#endif
'''
-class FortranRoutine(object):
+class FortranRoutine:
"""Wrapper for a Fortran routine in a file.
"""
type = 'generic'
@@ -95,7 +95,7 @@ class UnknownFortranRoutine(FortranRoutine):
def dependencies(self):
return []
-class FortranLibrary(object):
+class FortranLibrary:
"""Container for a bunch of Fortran routines.
"""
def __init__(self, src_dirs):
diff --git a/numpy/linalg/tests/test_build.py b/numpy/linalg/tests/test_build.py
index 9517b3701..cbf3089bc 100644
--- a/numpy/linalg/tests/test_build.py
+++ b/numpy/linalg/tests/test_build.py
@@ -7,7 +7,7 @@ from numpy.linalg import lapack_lite
from numpy.testing import assert_
-class FindDependenciesLdd(object):
+class FindDependenciesLdd:
def __init__(self):
self.cmd = ['ldd']
@@ -39,7 +39,7 @@ class FindDependenciesLdd(object):
return founds
-class TestF77Mismatch(object):
+class TestF77Mismatch:
@pytest.mark.skipif(not(sys.platform[:5] == 'linux'),
reason="no fortran compiler on non-Linux platform")
diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py
index 92ee6d2f3..59c71d196 100644
--- a/numpy/linalg/tests/test_linalg.py
+++ b/numpy/linalg/tests/test_linalg.py
@@ -66,7 +66,7 @@ all_tags = {
}
-class LinalgCase(object):
+class LinalgCase:
def __init__(self, name, a, b, tags=set()):
"""
A bundle of arguments to be passed to a test case, with an identifying
@@ -331,7 +331,7 @@ CASES += _make_strided_cases()
#
# Test different routines against the above cases
#
-class LinalgTestCase(object):
+class LinalgTestCase:
TEST_CASES = CASES
def check_cases(self, require=set(), exclude=set()):
@@ -632,7 +632,7 @@ class TestEig(EigCases):
assert_(isinstance(a, np.ndarray))
-class SVDBaseTests(object):
+class SVDBaseTests:
hermitian = False
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
@@ -974,7 +974,7 @@ class TestLstsq(LstsqCases):
@pytest.mark.parametrize('dt', [np.dtype(c) for c in '?bBhHiIqQefdgFDGO'])
-class TestMatrixPower(object):
+class TestMatrixPower:
rshft_0 = np.eye(4)
rshft_1 = rshft_0[[3, 0, 1, 2]]
@@ -1074,7 +1074,7 @@ class TestEigvalshCases(HermitianTestCase, HermitianGeneralizedTestCase):
assert_allclose(ev2, evalues, rtol=get_rtol(ev.dtype))
-class TestEigvalsh(object):
+class TestEigvalsh:
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
@@ -1150,7 +1150,7 @@ class TestEighCases(HermitianTestCase, HermitianGeneralizedTestCase):
rtol=get_rtol(ev.dtype), err_msg=repr(a))
-class TestEigh(object):
+class TestEigh:
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
@@ -1209,7 +1209,7 @@ class TestEigh(object):
assert_(isinstance(a, np.ndarray))
-class _TestNormBase(object):
+class _TestNormBase:
dt = None
dec = None
@@ -1495,7 +1495,7 @@ class _TestNorm(_TestNorm2D, _TestNormGeneral):
pass
-class TestNorm_NonSystematic(object):
+class TestNorm_NonSystematic:
def test_longdouble_norm(self):
# Non-regression test: p-norm of longdouble would previously raise
@@ -1550,7 +1550,7 @@ class TestNormInt64(_TestNorm, _TestNormInt64Base):
pass
-class TestMatrixRank(object):
+class TestMatrixRank:
def test_matrix_rank(self):
# Full rank matrix
@@ -1599,7 +1599,7 @@ def test_reduced_rank():
assert_equal(matrix_rank(X), 8)
-class TestQR(object):
+class TestQR:
# Define the array class here, so run this on matrices elsewhere.
array = np.array
@@ -1699,7 +1699,7 @@ class TestQR(object):
self.check_qr(m2.T)
-class TestCholesky(object):
+class TestCholesky:
# TODO: are there no other tests for cholesky?
def test_basic_property(self):
@@ -1861,7 +1861,7 @@ def test_sdot_bug_8577():
subprocess.check_call([sys.executable, "-c", code])
-class TestMultiDot(object):
+class TestMultiDot:
def test_basic_function_with_three_arguments(self):
# multi_dot with three arguments uses a fast hand coded algorithm to
@@ -1955,7 +1955,7 @@ class TestMultiDot(object):
assert_raises(ValueError, multi_dot, [np.random.random((3, 3))])
-class TestTensorinv(object):
+class TestTensorinv:
@pytest.mark.parametrize("arr, ind", [
(np.ones((4, 6, 8, 2)), 2),
diff --git a/numpy/linalg/tests/test_regression.py b/numpy/linalg/tests/test_regression.py
index 4671dfee7..1ae207b49 100644
--- a/numpy/linalg/tests/test_regression.py
+++ b/numpy/linalg/tests/test_regression.py
@@ -10,7 +10,7 @@ from numpy.testing import (
)
-class TestRegression(object):
+class TestRegression:
def test_eig_build(self):
# Ticket #652
diff --git a/numpy/ma/core.py b/numpy/ma/core.py
index 3fa0d63b3..d51d8e6ec 100644
--- a/numpy/ma/core.py
+++ b/numpy/ma/core.py
@@ -798,7 +798,7 @@ ufunc_domain = {}
ufunc_fills = {}
-class _DomainCheckInterval(object):
+class _DomainCheckInterval:
"""
Define a valid interval, so that :
@@ -823,7 +823,7 @@ class _DomainCheckInterval(object):
umath.less(x, self.a))
-class _DomainTan(object):
+class _DomainTan:
"""
Define a valid interval for the `tan` function, so that:
@@ -841,7 +841,7 @@ class _DomainTan(object):
return umath.less(umath.absolute(umath.cos(x)), self.eps)
-class _DomainSafeDivide(object):
+class _DomainSafeDivide:
"""
Define a domain for safe division.
@@ -862,7 +862,7 @@ class _DomainSafeDivide(object):
return umath.absolute(a) * self.tolerance >= umath.absolute(b)
-class _DomainGreater(object):
+class _DomainGreater:
"""
DomainGreater(v)(x) is True where x <= v.
@@ -878,7 +878,7 @@ class _DomainGreater(object):
return umath.less_equal(x, self.critical_value)
-class _DomainGreaterEqual(object):
+class _DomainGreaterEqual:
"""
DomainGreaterEqual(v)(x) is True where x < v.
@@ -894,7 +894,7 @@ class _DomainGreaterEqual(object):
return umath.less(x, self.critical_value)
-class _MaskedUFunc(object):
+class _MaskedUFunc:
def __init__(self, ufunc):
self.f = ufunc
self.__doc__ = ufunc.__doc__
@@ -2384,7 +2384,7 @@ def masked_invalid(a, copy=True):
###############################################################################
-class _MaskedPrintOption(object):
+class _MaskedPrintOption:
"""
Handle the string used to represent missing data in a masked array.
@@ -2602,7 +2602,7 @@ def _arraymethod(funcname, onmask=True):
return wrapped_method
-class MaskedIterator(object):
+class MaskedIterator:
"""
Flat iterator object to iterate over masked arrays.
@@ -6644,7 +6644,7 @@ ptp.__doc__ = MaskedArray.ptp.__doc__
##############################################################################
-class _frommethod(object):
+class _frommethod:
"""
Define functions from existing MaskedArray methods.
@@ -7978,7 +7978,7 @@ def fromflex(fxarray):
return masked_array(fxarray['_data'], mask=fxarray['_mask'])
-class _convert2ma(object):
+class _convert2ma:
"""
Convert functions from numpy to numpy.ma.
diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py
index 221e648c5..31648fb2e 100644
--- a/numpy/ma/extras.py
+++ b/numpy/ma/extras.py
@@ -212,7 +212,7 @@ def masked_all_like(arr):
#####--------------------------------------------------------------------------
#---- --- Standard functions ---
#####--------------------------------------------------------------------------
-class _fromnxfunction(object):
+class _fromnxfunction:
"""
Defines a wrapper to adapt NumPy functions to masked arrays.
diff --git a/numpy/ma/mrecords.py b/numpy/ma/mrecords.py
index 83520d6b9..4ff7866ab 100644
--- a/numpy/ma/mrecords.py
+++ b/numpy/ma/mrecords.py
@@ -85,7 +85,7 @@ def _get_fieldmask(self):
return fdmask
-class MaskedRecords(MaskedArray, object):
+class MaskedRecords(MaskedArray):
"""
Attributes
diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py
index 31f973e68..458b78580 100644
--- a/numpy/ma/tests/test_core.py
+++ b/numpy/ma/tests/test_core.py
@@ -63,7 +63,7 @@ num_dts = [np.dtype(dt_) for dt_ in '?bhilqBHILQefdgFD']
num_ids = [dt_.char for dt_ in num_dts]
-class TestMaskedArray(object):
+class TestMaskedArray:
# Base test class for MaskedArrays.
def setup(self):
@@ -944,7 +944,7 @@ class TestMaskedArray(object):
assert_(mx2[0] == 0.)
-class TestMaskedArrayArithmetic(object):
+class TestMaskedArrayArithmetic:
# Base test class for MaskedArrays.
def setup(self):
@@ -1713,7 +1713,7 @@ class TestMaskedArrayArithmetic(object):
assert_equal(a.mask, [0, 0, 0, 0, 1])
-class TestMaskedArrayAttributes(object):
+class TestMaskedArrayAttributes:
def test_keepmask(self):
# Tests the keep mask flag
@@ -1889,7 +1889,7 @@ class TestMaskedArrayAttributes(object):
assert_equal(m._mask, np.ma.nomask)
-class TestFillingValues(object):
+class TestFillingValues:
def test_check_on_scalar(self):
# Test _check_fill_value set to valid and invalid values
@@ -2227,7 +2227,7 @@ class TestFillingValues(object):
assert_equal(a["f1"].fill_value, default_fill_value("eggs"))
-class TestUfuncs(object):
+class TestUfuncs:
# Test class for the application of ufuncs on MaskedArrays.
def setup(self):
@@ -2307,7 +2307,7 @@ class TestUfuncs(object):
assert_raises(TypeError, operator.mul, a, "abc")
assert_raises(TypeError, operator.truediv, a, "abc")
- class MyClass(object):
+ class MyClass:
__array_priority__ = a.__array_priority__ + 1
def __mul__(self, other):
@@ -2321,7 +2321,7 @@ class TestUfuncs(object):
assert_(a * me == "My rmul")
# and that __array_priority__ is respected
- class MyClass2(object):
+ class MyClass2:
__array_priority__ = 100
def __mul__(self, other):
@@ -2371,7 +2371,7 @@ class TestUfuncs(object):
# also check that allclose uses ma ufuncs, to avoid warning
allclose(m, 0.5)
-class TestMaskedArrayInPlaceArithmetics(object):
+class TestMaskedArrayInPlaceArithmetics:
# Test MaskedArray Arithmetics
def setup(self):
@@ -2873,7 +2873,7 @@ class TestMaskedArrayInPlaceArithmetics(object):
assert_equal(len(w), 0, "Failed on type=%s." % t)
-class TestMaskedArrayMethods(object):
+class TestMaskedArrayMethods:
# Test class for miscellaneous MaskedArrays methods.
def setup(self):
# Base data definition.
@@ -3580,7 +3580,7 @@ class TestMaskedArrayMethods(object):
assert_equal(xd.data, x.diagonal().data)
-class TestMaskedArrayMathMethods(object):
+class TestMaskedArrayMathMethods:
def setup(self):
# Base data definition.
@@ -3858,7 +3858,7 @@ class TestMaskedArrayMathMethods(object):
assert_equal(a.max(1), [3, 6])
-class TestMaskedArrayMathMethodsComplex(object):
+class TestMaskedArrayMathMethodsComplex:
# Test class for miscellaneous MaskedArrays methods.
def setup(self):
# Base data definition.
@@ -3911,7 +3911,7 @@ class TestMaskedArrayMathMethodsComplex(object):
mX[:, k].compressed().std())
-class TestMaskedArrayFunctions(object):
+class TestMaskedArrayFunctions:
# Test class for miscellaneous functions.
def setup(self):
@@ -4550,7 +4550,7 @@ class TestMaskedArrayFunctions(object):
assert_equal(test, masked_equal([-1, -1, -1, -1, -1], -1))
-class TestMaskedFields(object):
+class TestMaskedFields:
def setup(self):
ilist = [1, 2, 3, 4, 5]
@@ -4712,7 +4712,7 @@ class TestMaskedFields(object):
assert_equal(len(rec), len(self.data['ddtype']))
-class TestMaskedObjectArray(object):
+class TestMaskedObjectArray:
def test_getitem(self):
arr = np.ma.array([None, None])
@@ -4760,7 +4760,7 @@ class TestMaskedObjectArray(object):
assert_(arr[0] is np.ma.masked)
-class TestMaskedView(object):
+class TestMaskedView:
def setup(self):
iterator = list(zip(np.arange(10), np.random.rand(10)))
@@ -4838,7 +4838,7 @@ class TestMaskedView(object):
assert_(not isinstance(test, MaskedArray))
-class TestOptionalArgs(object):
+class TestOptionalArgs:
def test_ndarrayfuncs(self):
# test axis arg behaves the same as ndarray (including multiple axes)
@@ -4925,7 +4925,7 @@ class TestOptionalArgs(object):
assert_raises(np.AxisError, count, np.ma.array(1), axis=1)
-class TestMaskedConstant(object):
+class TestMaskedConstant:
def _do_add_test(self, add):
# sanity check
assert_(add(np.ma.masked, 1) is np.ma.masked)
@@ -5042,7 +5042,7 @@ class TestMaskedConstant(object):
assert_raises(AttributeError, setattr, np.ma.masked, 'dtype', np.int64)
-class TestMaskedWhereAliases(object):
+class TestMaskedWhereAliases:
# TODO: Test masked_object, masked_equal, ...
diff --git a/numpy/ma/tests/test_deprecations.py b/numpy/ma/tests/test_deprecations.py
index 7f44a2176..14f697375 100644
--- a/numpy/ma/tests/test_deprecations.py
+++ b/numpy/ma/tests/test_deprecations.py
@@ -6,7 +6,7 @@ from numpy.testing import assert_warns
from numpy.ma.testutils import assert_equal
from numpy.ma.core import MaskedArrayFutureWarning
-class TestArgsort(object):
+class TestArgsort:
""" gh-8701 """
def _test_base(self, argsort, cls):
arr_0d = np.array(1).view(cls)
@@ -35,7 +35,7 @@ class TestArgsort(object):
return self._test_base(np.ma.MaskedArray.argsort, np.ma.MaskedArray)
-class TestMinimumMaximum(object):
+class TestMinimumMaximum:
def test_minimum(self):
assert_warns(DeprecationWarning, np.ma.minimum, np.ma.array([1, 2]))
diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py
index c36bcbbbb..1c8610625 100644
--- a/numpy/ma/tests/test_extras.py
+++ b/numpy/ma/tests/test_extras.py
@@ -32,7 +32,7 @@ from numpy.ma.extras import (
)
-class TestGeneric(object):
+class TestGeneric:
#
def test_masked_all(self):
# Tests masked_all
@@ -140,7 +140,7 @@ class TestGeneric(object):
assert_equal(test, [])
-class TestAverage(object):
+class TestAverage:
# Several tests of average. Why so many ? Good point...
def test_testAverage1(self):
# Test of average.
@@ -271,7 +271,7 @@ class TestAverage(object):
assert_almost_equal(wav1.imag, expected1.imag)
-class TestConcatenator(object):
+class TestConcatenator:
# Tests for mr_, the equivalent of r_ for masked arrays.
def test_1d(self):
@@ -315,7 +315,7 @@ class TestConcatenator(object):
assert_equal(actual.data[:2], [1, 2])
-class TestNotMasked(object):
+class TestNotMasked:
# Tests notmasked_edges and notmasked_contiguous.
def test_edges(self):
@@ -385,7 +385,7 @@ class TestNotMasked(object):
])
-class TestCompressFunctions(object):
+class TestCompressFunctions:
def test_compress_nd(self):
# Tests compress_nd
@@ -650,7 +650,7 @@ class TestCompressFunctions(object):
assert_equal(a, res)
-class TestApplyAlongAxis(object):
+class TestApplyAlongAxis:
# Tests 2D functions
def test_3d(self):
a = arange(12.).reshape(2, 2, 3)
@@ -672,7 +672,7 @@ class TestApplyAlongAxis(object):
assert_equal(xa, [[2, 5], [8, 11]])
-class TestApplyOverAxes(object):
+class TestApplyOverAxes:
# Tests apply_over_axes
def test_basic(self):
a = arange(24).reshape(2, 3, 4)
@@ -685,7 +685,7 @@ class TestApplyOverAxes(object):
assert_equal(test, ctrl)
-class TestMedian(object):
+class TestMedian:
def test_pytype(self):
r = np.ma.median([[np.inf, np.inf], [np.inf, np.inf]], axis=-1)
assert_equal(r, np.inf)
@@ -1064,7 +1064,7 @@ class TestMedian(object):
assert_(type(np.ma.median(o.astype(object))), float)
-class TestCov(object):
+class TestCov:
def setup(self):
self.data = array(np.random.rand(12))
@@ -1131,7 +1131,7 @@ class TestCov(object):
x.shape[0] / frac))
-class TestCorrcoef(object):
+class TestCorrcoef:
def setup(self):
self.data = array(np.random.rand(12))
@@ -1238,7 +1238,7 @@ class TestCorrcoef(object):
control[:-1, :-1])
-class TestPolynomial(object):
+class TestPolynomial:
#
def test_polyfit(self):
# Tests polyfit
@@ -1296,7 +1296,7 @@ class TestPolynomial(object):
assert_almost_equal(a, a_)
-class TestArraySetOps(object):
+class TestArraySetOps:
def test_unique_onlist(self):
# Test unique on list
@@ -1528,7 +1528,7 @@ class TestArraySetOps(object):
assert_array_equal(setdiff1d(a, b), np.array(['c']))
-class TestShapeBase(object):
+class TestShapeBase:
def test_atleast_2d(self):
# Test atleast_2d
@@ -1584,7 +1584,7 @@ class TestShapeBase(object):
assert_equal(b.mask.shape, b.data.shape)
-class TestStack(object):
+class TestStack:
def test_stack_1d(self):
a = masked_array([0, 1, 2], mask=[0, 1, 0])
diff --git a/numpy/ma/tests/test_mrecords.py b/numpy/ma/tests/test_mrecords.py
index 14d39d949..c2f859273 100644
--- a/numpy/ma/tests/test_mrecords.py
+++ b/numpy/ma/tests/test_mrecords.py
@@ -24,7 +24,7 @@ from numpy.ma.testutils import (
from numpy.compat import pickle
-class TestMRecords(object):
+class TestMRecords:
ilist = [1, 2, 3, 4, 5]
flist = [1.1, 2.2, 3.3, 4.4, 5.5]
@@ -346,7 +346,7 @@ class TestMRecords(object):
dtype=mult.dtype))
-class TestView(object):
+class TestView:
def setup(self):
(a, b) = (np.arange(10), np.random.rand(10))
@@ -384,7 +384,7 @@ class TestView(object):
##############################################################################
-class TestMRecordsImport(object):
+class TestMRecordsImport:
_a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int)
_b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float)
diff --git a/numpy/ma/tests/test_old_ma.py b/numpy/ma/tests/test_old_ma.py
index 5d5046c09..96c7e3609 100644
--- a/numpy/ma/tests/test_old_ma.py
+++ b/numpy/ma/tests/test_old_ma.py
@@ -31,7 +31,7 @@ def eq(v, w, msg=''):
return result
-class TestMa(object):
+class TestMa:
def setup(self):
x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
@@ -698,7 +698,7 @@ class TestMa(object):
assert_equal(b[1].shape, ())
-class TestUfuncs(object):
+class TestUfuncs:
def setup(self):
self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6),
array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),)
@@ -763,7 +763,7 @@ class TestUfuncs(object):
assert_(eq(nonzero(x), [0]))
-class TestArrayMethods(object):
+class TestArrayMethods:
def setup(self):
x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
diff --git a/numpy/ma/tests/test_regression.py b/numpy/ma/tests/test_regression.py
index 32e8e30c1..9f3368489 100644
--- a/numpy/ma/tests/test_regression.py
+++ b/numpy/ma/tests/test_regression.py
@@ -4,7 +4,7 @@ from numpy.testing import (
)
-class TestRegression(object):
+class TestRegression:
def test_masked_array_create(self):
# Ticket #17
x = np.ma.masked_array([0, 1, 2, 3, 0, 4, 5, 6],
diff --git a/numpy/ma/tests/test_subclassing.py b/numpy/ma/tests/test_subclassing.py
index 781079371..02aeebd17 100644
--- a/numpy/ma/tests/test_subclassing.py
+++ b/numpy/ma/tests/test_subclassing.py
@@ -78,7 +78,7 @@ msubarray = MSubArray
# and overrides __array_wrap__, updating the info dict, to check that this
# doesn't get destroyed by MaskedArray._update_from. But this one also needs
# its own iterator...
-class CSAIterator(object):
+class CSAIterator:
"""
Flat iterator object that uses its own setter/getter
(works around ndarray.flat not propagating subclass setters/getters
@@ -152,7 +152,7 @@ class ComplicatedSubArray(SubArray):
return obj
-class TestSubclassing(object):
+class TestSubclassing:
# Test suite for masked subclasses of ndarray.
def setup(self):
diff --git a/numpy/ma/timer_comparison.py b/numpy/ma/timer_comparison.py
index fc63c18b5..83bd7852e 100644
--- a/numpy/ma/timer_comparison.py
+++ b/numpy/ma/timer_comparison.py
@@ -13,7 +13,7 @@ np.seterr(all='ignore')
pi = np.pi
-class ModuleTester(object):
+class ModuleTester:
def __init__(self, module):
self.module = module
self.allequal = module.allequal
diff --git a/numpy/matrixlib/tests/test_defmatrix.py b/numpy/matrixlib/tests/test_defmatrix.py
index 68151833b..a8070898f 100644
--- a/numpy/matrixlib/tests/test_defmatrix.py
+++ b/numpy/matrixlib/tests/test_defmatrix.py
@@ -14,7 +14,7 @@ from numpy.testing import (
from numpy.linalg import matrix_power
from numpy.matrixlib import mat
-class TestCtor(object):
+class TestCtor:
def test_basic(self):
A = np.array([[1, 2], [3, 4]])
mA = matrix(A)
@@ -61,7 +61,7 @@ class TestCtor(object):
assert_(np.all(b2 == mixresult))
-class TestProperties(object):
+class TestProperties:
def test_sum(self):
"""Test whether matrix.sum(axis=1) preserves orientation.
Fails in NumPy <= 0.9.6.2127.
@@ -194,7 +194,7 @@ class TestProperties(object):
B = matrix([[True], [True], [False]])
assert_array_equal(A, B)
-class TestCasting(object):
+class TestCasting:
def test_basic(self):
A = np.arange(100).reshape(10, 10)
mA = matrix(A)
@@ -213,7 +213,7 @@ class TestCasting(object):
assert_(np.all(mA != mB))
-class TestAlgebra(object):
+class TestAlgebra:
def test_basic(self):
import numpy.linalg as linalg
@@ -272,7 +272,7 @@ class TestAlgebra(object):
A*object()
-class TestMatrixReturn(object):
+class TestMatrixReturn:
def test_instance_methods(self):
a = matrix([1.0], dtype='f8')
methodargs = {
@@ -314,7 +314,7 @@ class TestMatrixReturn(object):
assert_(type(d) is np.ndarray)
-class TestIndexing(object):
+class TestIndexing:
def test_basic(self):
x = asmatrix(np.zeros((3, 2), float))
y = np.zeros((3, 1), float)
@@ -323,7 +323,7 @@ class TestIndexing(object):
assert_equal(x, [[0, 1], [0, 0], [0, 0]])
-class TestNewScalarIndexing(object):
+class TestNewScalarIndexing:
a = matrix([[1, 2], [3, 4]])
def test_dimesions(self):
@@ -390,7 +390,7 @@ class TestNewScalarIndexing(object):
assert_array_equal(x[[2, 1, 0],:], x[::-1,:])
-class TestPower(object):
+class TestPower:
def test_returntype(self):
a = np.array([[0, 1], [0, 0]])
assert_(type(matrix_power(a, 2)) is np.ndarray)
@@ -401,7 +401,7 @@ class TestPower(object):
assert_array_equal(matrix_power([[0, 1], [0, 0]], 2), [[0, 0], [0, 0]])
-class TestShape(object):
+class TestShape:
a = np.array([[1], [2]])
m = matrix([[1], [2]])
diff --git a/numpy/matrixlib/tests/test_interaction.py b/numpy/matrixlib/tests/test_interaction.py
index c0b1a9fc1..608416ed7 100644
--- a/numpy/matrixlib/tests/test_interaction.py
+++ b/numpy/matrixlib/tests/test_interaction.py
@@ -288,7 +288,7 @@ def test_kron_matrix():
assert_equal(type(np.kron(m, a)), np.matrix)
-class TestConcatenatorMatrix(object):
+class TestConcatenatorMatrix:
# 2018-04-29: moved here from core.tests.test_index_tricks.
def test_matrix(self):
a = [1, 2]
diff --git a/numpy/matrixlib/tests/test_masked_matrix.py b/numpy/matrixlib/tests/test_masked_matrix.py
index ab70be6ff..45424ecf0 100644
--- a/numpy/matrixlib/tests/test_masked_matrix.py
+++ b/numpy/matrixlib/tests/test_masked_matrix.py
@@ -27,7 +27,7 @@ class MMatrix(MaskedArray, np.matrix,):
return _view
-class TestMaskedMatrix(object):
+class TestMaskedMatrix:
def test_matrix_indexing(self):
# Tests conversions and indexing
x1 = np.matrix([[1, 2, 3], [4, 3, 2]])
@@ -169,7 +169,7 @@ class TestMaskedMatrix(object):
assert_(not isinstance(test, MaskedArray))
-class TestSubclassing(object):
+class TestSubclassing:
# Test suite for masked subclasses of ndarray.
def setup(self):
@@ -210,7 +210,7 @@ class TestSubclassing(object):
assert_(isinstance(divide(mx, x), MMatrix))
assert_equal(divide(mx, mx), divide(xmx, xmx))
-class TestConcatenator(object):
+class TestConcatenator:
# Tests for mr_, the equivalent of r_ for masked arrays.
def test_matrix_builder(self):
diff --git a/numpy/matrixlib/tests/test_multiarray.py b/numpy/matrixlib/tests/test_multiarray.py
index d34c6de0d..638d0d153 100644
--- a/numpy/matrixlib/tests/test_multiarray.py
+++ b/numpy/matrixlib/tests/test_multiarray.py
@@ -1,7 +1,7 @@
import numpy as np
from numpy.testing import assert_, assert_equal, assert_array_equal
-class TestView(object):
+class TestView:
def test_type(self):
x = np.array([1, 2, 3])
assert_(isinstance(x.view(np.matrix), np.matrix))
diff --git a/numpy/matrixlib/tests/test_numeric.py b/numpy/matrixlib/tests/test_numeric.py
index 7e4b4d304..a772bb388 100644
--- a/numpy/matrixlib/tests/test_numeric.py
+++ b/numpy/matrixlib/tests/test_numeric.py
@@ -1,7 +1,7 @@
import numpy as np
from numpy.testing import assert_equal
-class TestDot(object):
+class TestDot:
def test_matscalar(self):
b1 = np.matrix(np.ones((3, 3), dtype=complex))
assert_equal(b1*1.0, b1)
diff --git a/numpy/matrixlib/tests/test_regression.py b/numpy/matrixlib/tests/test_regression.py
index 187286dd7..a54d44020 100644
--- a/numpy/matrixlib/tests/test_regression.py
+++ b/numpy/matrixlib/tests/test_regression.py
@@ -2,7 +2,7 @@ import numpy as np
from numpy.testing import assert_, assert_equal, assert_raises
-class TestRegression(object):
+class TestRegression:
def test_kron_matrix(self):
# Ticket #71
x = np.matrix('[1 0; 1 0]')
diff --git a/numpy/polynomial/polyutils.py b/numpy/polynomial/polyutils.py
index 937e2cddd..b65e88a83 100644
--- a/numpy/polynomial/polyutils.py
+++ b/numpy/polynomial/polyutils.py
@@ -78,7 +78,7 @@ class PolyDomainError(PolyError):
# Base class for all polynomial types
#
-class PolyBase(object):
+class PolyBase:
"""
Base class for all polynomial types.
diff --git a/numpy/polynomial/tests/test_chebyshev.py b/numpy/polynomial/tests/test_chebyshev.py
index ce442563c..ec0a7839a 100644
--- a/numpy/polynomial/tests/test_chebyshev.py
+++ b/numpy/polynomial/tests/test_chebyshev.py
@@ -28,7 +28,7 @@ T9 = [0, 9, 0, -120, 0, 432, 0, -576, 0, 256]
Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9]
-class TestPrivate(object):
+class TestPrivate:
def test__cseries_to_zseries(self):
for i in range(5):
@@ -45,7 +45,7 @@ class TestPrivate(object):
assert_equal(res, tgt)
-class TestConstants(object):
+class TestConstants:
def test_chebdomain(self):
assert_equal(cheb.chebdomain, [-1, 1])
@@ -60,7 +60,7 @@ class TestConstants(object):
assert_equal(cheb.chebx, [0, 1])
-class TestArithmetic(object):
+class TestArithmetic:
def test_chebadd(self):
for i in range(5):
@@ -121,7 +121,7 @@ class TestArithmetic(object):
assert_equal(trim(res), trim(tgt), err_msg=msg)
-class TestEvaluation(object):
+class TestEvaluation:
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([2.5, 2., 1.5])
c2d = np.einsum('i,j->ij', c1d, c1d)
@@ -215,7 +215,7 @@ class TestEvaluation(object):
assert_(res.shape == (2, 3)*3)
-class TestIntegral(object):
+class TestIntegral:
def test_chebint(self):
# check exceptions
@@ -317,7 +317,7 @@ class TestIntegral(object):
assert_almost_equal(res, tgt)
-class TestDerivative(object):
+class TestDerivative:
def test_chebder(self):
# check exceptions
@@ -357,7 +357,7 @@ class TestDerivative(object):
assert_almost_equal(res, tgt)
-class TestVander(object):
+class TestVander:
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
@@ -405,7 +405,7 @@ class TestVander(object):
assert_(van.shape == (1, 5, 24))
-class TestFitting(object):
+class TestFitting:
def test_chebfit(self):
def f(x):
@@ -482,7 +482,7 @@ class TestFitting(object):
assert_almost_equal(coef1, coef2)
-class TestInterpolate(object):
+class TestInterpolate:
def f(self, x):
return x * (x - 1) * (x - 2)
@@ -507,7 +507,7 @@ class TestInterpolate(object):
assert_almost_equal(cheb.chebval(x, c), powx(x, p), decimal=12)
-class TestCompanion(object):
+class TestCompanion:
def test_raises(self):
assert_raises(ValueError, cheb.chebcompanion, [])
@@ -522,7 +522,7 @@ class TestCompanion(object):
assert_(cheb.chebcompanion([1, 2])[0, 0] == -.5)
-class TestGauss(object):
+class TestGauss:
def test_100(self):
x, w = cheb.chebgauss(100)
@@ -541,7 +541,7 @@ class TestGauss(object):
assert_almost_equal(w.sum(), tgt)
-class TestMisc(object):
+class TestMisc:
def test_chebfromroots(self):
res = cheb.chebfromroots([])
diff --git a/numpy/polynomial/tests/test_classes.py b/numpy/polynomial/tests/test_classes.py
index a9da64e22..68656bc98 100644
--- a/numpy/polynomial/tests/test_classes.py
+++ b/numpy/polynomial/tests/test_classes.py
@@ -572,7 +572,7 @@ def test_ufunc_override(Poly):
-class TestLatexRepr(object):
+class TestLatexRepr:
"""Test the latex repr used by ipython """
def as_latex(self, obj):
@@ -626,7 +626,7 @@ class TestLatexRepr(object):
#
-class TestInterpolate(object):
+class TestInterpolate:
def f(self, x):
return x * (x - 1) * (x - 2)
diff --git a/numpy/polynomial/tests/test_hermite.py b/numpy/polynomial/tests/test_hermite.py
index 50175cdb3..4b67c1b18 100644
--- a/numpy/polynomial/tests/test_hermite.py
+++ b/numpy/polynomial/tests/test_hermite.py
@@ -28,7 +28,7 @@ def trim(x):
return herm.hermtrim(x, tol=1e-6)
-class TestConstants(object):
+class TestConstants:
def test_hermdomain(self):
assert_equal(herm.hermdomain, [-1, 1])
@@ -43,7 +43,7 @@ class TestConstants(object):
assert_equal(herm.hermx, [0, .5])
-class TestArithmetic(object):
+class TestArithmetic:
x = np.linspace(-3, 3, 100)
def test_hermadd(self):
@@ -109,7 +109,7 @@ class TestArithmetic(object):
assert_equal(trim(res), trim(tgt), err_msg=msg)
-class TestEvaluation(object):
+class TestEvaluation:
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([2.5, 1., .75])
c2d = np.einsum('i,j->ij', c1d, c1d)
@@ -203,7 +203,7 @@ class TestEvaluation(object):
assert_(res.shape == (2, 3)*3)
-class TestIntegral(object):
+class TestIntegral:
def test_hermint(self):
# check exceptions
@@ -305,7 +305,7 @@ class TestIntegral(object):
assert_almost_equal(res, tgt)
-class TestDerivative(object):
+class TestDerivative:
def test_hermder(self):
# check exceptions
@@ -345,7 +345,7 @@ class TestDerivative(object):
assert_almost_equal(res, tgt)
-class TestVander(object):
+class TestVander:
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
@@ -393,7 +393,7 @@ class TestVander(object):
assert_(van.shape == (1, 5, 24))
-class TestFitting(object):
+class TestFitting:
def test_hermfit(self):
def f(x):
@@ -470,7 +470,7 @@ class TestFitting(object):
assert_almost_equal(coef1, coef2)
-class TestCompanion(object):
+class TestCompanion:
def test_raises(self):
assert_raises(ValueError, herm.hermcompanion, [])
@@ -485,7 +485,7 @@ class TestCompanion(object):
assert_(herm.hermcompanion([1, 2])[0, 0] == -.25)
-class TestGauss(object):
+class TestGauss:
def test_100(self):
x, w = herm.hermgauss(100)
@@ -504,7 +504,7 @@ class TestGauss(object):
assert_almost_equal(w.sum(), tgt)
-class TestMisc(object):
+class TestMisc:
def test_hermfromroots(self):
res = herm.hermfromroots([])
diff --git a/numpy/polynomial/tests/test_hermite_e.py b/numpy/polynomial/tests/test_hermite_e.py
index ec134d407..3052500cc 100644
--- a/numpy/polynomial/tests/test_hermite_e.py
+++ b/numpy/polynomial/tests/test_hermite_e.py
@@ -28,7 +28,7 @@ def trim(x):
return herme.hermetrim(x, tol=1e-6)
-class TestConstants(object):
+class TestConstants:
def test_hermedomain(self):
assert_equal(herme.hermedomain, [-1, 1])
@@ -43,7 +43,7 @@ class TestConstants(object):
assert_equal(herme.hermex, [0, 1])
-class TestArithmetic(object):
+class TestArithmetic:
x = np.linspace(-3, 3, 100)
def test_hermeadd(self):
@@ -109,7 +109,7 @@ class TestArithmetic(object):
assert_equal(trim(res), trim(tgt), err_msg=msg)
-class TestEvaluation(object):
+class TestEvaluation:
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([4., 2., 3.])
c2d = np.einsum('i,j->ij', c1d, c1d)
@@ -203,7 +203,7 @@ class TestEvaluation(object):
assert_(res.shape == (2, 3)*3)
-class TestIntegral(object):
+class TestIntegral:
def test_hermeint(self):
# check exceptions
@@ -305,7 +305,7 @@ class TestIntegral(object):
assert_almost_equal(res, tgt)
-class TestDerivative(object):
+class TestDerivative:
def test_hermeder(self):
# check exceptions
@@ -346,7 +346,7 @@ class TestDerivative(object):
assert_almost_equal(res, tgt)
-class TestVander(object):
+class TestVander:
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
@@ -394,7 +394,7 @@ class TestVander(object):
assert_(van.shape == (1, 5, 24))
-class TestFitting(object):
+class TestFitting:
def test_hermefit(self):
def f(x):
@@ -471,7 +471,7 @@ class TestFitting(object):
assert_almost_equal(coef1, coef2)
-class TestCompanion(object):
+class TestCompanion:
def test_raises(self):
assert_raises(ValueError, herme.hermecompanion, [])
@@ -486,7 +486,7 @@ class TestCompanion(object):
assert_(herme.hermecompanion([1, 2])[0, 0] == -.5)
-class TestGauss(object):
+class TestGauss:
def test_100(self):
x, w = herme.hermegauss(100)
@@ -505,7 +505,7 @@ class TestGauss(object):
assert_almost_equal(w.sum(), tgt)
-class TestMisc(object):
+class TestMisc:
def test_hermefromroots(self):
res = herme.hermefromroots([])
diff --git a/numpy/polynomial/tests/test_laguerre.py b/numpy/polynomial/tests/test_laguerre.py
index 1f51d7ad7..ec103c258 100644
--- a/numpy/polynomial/tests/test_laguerre.py
+++ b/numpy/polynomial/tests/test_laguerre.py
@@ -25,7 +25,7 @@ def trim(x):
return lag.lagtrim(x, tol=1e-6)
-class TestConstants(object):
+class TestConstants:
def test_lagdomain(self):
assert_equal(lag.lagdomain, [0, 1])
@@ -40,7 +40,7 @@ class TestConstants(object):
assert_equal(lag.lagx, [1, -1])
-class TestArithmetic(object):
+class TestArithmetic:
x = np.linspace(-3, 3, 100)
def test_lagadd(self):
@@ -106,7 +106,7 @@ class TestArithmetic(object):
assert_equal(trim(res), trim(tgt), err_msg=msg)
-class TestEvaluation(object):
+class TestEvaluation:
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([9., -14., 6.])
c2d = np.einsum('i,j->ij', c1d, c1d)
@@ -200,7 +200,7 @@ class TestEvaluation(object):
assert_(res.shape == (2, 3)*3)
-class TestIntegral(object):
+class TestIntegral:
def test_lagint(self):
# check exceptions
@@ -302,7 +302,7 @@ class TestIntegral(object):
assert_almost_equal(res, tgt)
-class TestDerivative(object):
+class TestDerivative:
def test_lagder(self):
# check exceptions
@@ -342,7 +342,7 @@ class TestDerivative(object):
assert_almost_equal(res, tgt)
-class TestVander(object):
+class TestVander:
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
@@ -390,7 +390,7 @@ class TestVander(object):
assert_(van.shape == (1, 5, 24))
-class TestFitting(object):
+class TestFitting:
def test_lagfit(self):
def f(x):
@@ -452,7 +452,7 @@ class TestFitting(object):
assert_almost_equal(lag.lagfit(x, x, [0, 1]), [1, -1])
-class TestCompanion(object):
+class TestCompanion:
def test_raises(self):
assert_raises(ValueError, lag.lagcompanion, [])
@@ -467,7 +467,7 @@ class TestCompanion(object):
assert_(lag.lagcompanion([1, 2])[0, 0] == 1.5)
-class TestGauss(object):
+class TestGauss:
def test_100(self):
x, w = lag.laggauss(100)
@@ -486,7 +486,7 @@ class TestGauss(object):
assert_almost_equal(w.sum(), tgt)
-class TestMisc(object):
+class TestMisc:
def test_lagfromroots(self):
res = lag.lagfromroots([])
diff --git a/numpy/polynomial/tests/test_legendre.py b/numpy/polynomial/tests/test_legendre.py
index f48f4c645..8846ca6f2 100644
--- a/numpy/polynomial/tests/test_legendre.py
+++ b/numpy/polynomial/tests/test_legendre.py
@@ -28,7 +28,7 @@ def trim(x):
return leg.legtrim(x, tol=1e-6)
-class TestConstants(object):
+class TestConstants:
def test_legdomain(self):
assert_equal(leg.legdomain, [-1, 1])
@@ -43,7 +43,7 @@ class TestConstants(object):
assert_equal(leg.legx, [0, 1])
-class TestArithmetic(object):
+class TestArithmetic:
x = np.linspace(-1, 1, 100)
def test_legadd(self):
@@ -110,7 +110,7 @@ class TestArithmetic(object):
assert_equal(trim(res), trim(tgt), err_msg=msg)
-class TestEvaluation(object):
+class TestEvaluation:
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([2., 2., 2.])
c2d = np.einsum('i,j->ij', c1d, c1d)
@@ -204,7 +204,7 @@ class TestEvaluation(object):
assert_(res.shape == (2, 3)*3)
-class TestIntegral(object):
+class TestIntegral:
def test_legint(self):
# check exceptions
@@ -306,7 +306,7 @@ class TestIntegral(object):
assert_almost_equal(res, tgt)
-class TestDerivative(object):
+class TestDerivative:
def test_legder(self):
# check exceptions
@@ -346,7 +346,7 @@ class TestDerivative(object):
assert_almost_equal(res, tgt)
-class TestVander(object):
+class TestVander:
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
@@ -394,7 +394,7 @@ class TestVander(object):
assert_(van.shape == (1, 5, 24))
-class TestFitting(object):
+class TestFitting:
def test_legfit(self):
def f(x):
@@ -471,7 +471,7 @@ class TestFitting(object):
assert_almost_equal(coef1, coef2)
-class TestCompanion(object):
+class TestCompanion:
def test_raises(self):
assert_raises(ValueError, leg.legcompanion, [])
@@ -486,7 +486,7 @@ class TestCompanion(object):
assert_(leg.legcompanion([1, 2])[0, 0] == -.5)
-class TestGauss(object):
+class TestGauss:
def test_100(self):
x, w = leg.leggauss(100)
@@ -505,7 +505,7 @@ class TestGauss(object):
assert_almost_equal(w.sum(), tgt)
-class TestMisc(object):
+class TestMisc:
def test_legfromroots(self):
res = leg.legfromroots([])
diff --git a/numpy/polynomial/tests/test_polynomial.py b/numpy/polynomial/tests/test_polynomial.py
index a3d9817c0..c90075dfe 100644
--- a/numpy/polynomial/tests/test_polynomial.py
+++ b/numpy/polynomial/tests/test_polynomial.py
@@ -27,7 +27,7 @@ T9 = [0, 9, 0, -120, 0, 432, 0, -576, 0, 256]
Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9]
-class TestConstants(object):
+class TestConstants:
def test_polydomain(self):
assert_equal(poly.polydomain, [-1, 1])
@@ -42,7 +42,7 @@ class TestConstants(object):
assert_equal(poly.polyx, [0, 1])
-class TestArithmetic(object):
+class TestArithmetic:
def test_polyadd(self):
for i in range(5):
@@ -112,7 +112,7 @@ class TestArithmetic(object):
assert_equal(trim(res), trim(tgt), err_msg=msg)
-class TestEvaluation(object):
+class TestEvaluation:
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([1., 2., 3.])
c2d = np.einsum('i,j->ij', c1d, c1d)
@@ -285,7 +285,7 @@ class TestEvaluation(object):
assert_(res.shape == (2, 3)*3)
-class TestIntegral(object):
+class TestIntegral:
def test_polyint(self):
# check exceptions
@@ -384,7 +384,7 @@ class TestIntegral(object):
assert_almost_equal(res, tgt)
-class TestDerivative(object):
+class TestDerivative:
def test_polyder(self):
# check exceptions
@@ -424,7 +424,7 @@ class TestDerivative(object):
assert_almost_equal(res, tgt)
-class TestVander(object):
+class TestVander:
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
@@ -472,7 +472,7 @@ class TestVander(object):
assert_(van.shape == (1, 5, 24))
-class TestCompanion(object):
+class TestCompanion:
def test_raises(self):
assert_raises(ValueError, poly.polycompanion, [])
@@ -487,7 +487,7 @@ class TestCompanion(object):
assert_(poly.polycompanion([1, 2])[0, 0] == -.5)
-class TestMisc(object):
+class TestMisc:
def test_polyfromroots(self):
res = poly.polyfromroots([])
diff --git a/numpy/polynomial/tests/test_polyutils.py b/numpy/polynomial/tests/test_polyutils.py
index 09a53d752..1b27f53b5 100644
--- a/numpy/polynomial/tests/test_polyutils.py
+++ b/numpy/polynomial/tests/test_polyutils.py
@@ -8,7 +8,7 @@ from numpy.testing import (
)
-class TestMisc(object):
+class TestMisc:
def test_trimseq(self):
for i in range(5):
@@ -41,7 +41,7 @@ class TestMisc(object):
assert_equal(pu.trimcoef(coef, 2), [0])
-class TestDomain(object):
+class TestDomain:
def test_getdomain(self):
# test for real values
diff --git a/numpy/polynomial/tests/test_printing.py b/numpy/polynomial/tests/test_printing.py
index 1d0885de0..049d3af2f 100644
--- a/numpy/polynomial/tests/test_printing.py
+++ b/numpy/polynomial/tests/test_printing.py
@@ -2,7 +2,7 @@ import numpy.polynomial as poly
from numpy.testing import assert_equal
-class TestStr(object):
+class TestStr:
def test_polynomial_str(self):
res = str(poly.Polynomial([0, 1]))
tgt = 'poly([0. 1.])'
@@ -34,7 +34,7 @@ class TestStr(object):
assert_equal(res, tgt)
-class TestRepr(object):
+class TestRepr:
def test_polynomial_str(self):
res = repr(poly.Polynomial([0, 1]))
tgt = 'Polynomial([0., 1.], domain=[-1, 1], window=[-1, 1])'
diff --git a/numpy/random/tests/test_direct.py b/numpy/random/tests/test_direct.py
index 9f77f0ad2..4fa69a402 100644
--- a/numpy/random/tests/test_direct.py
+++ b/numpy/random/tests/test_direct.py
@@ -145,7 +145,7 @@ def test_seedsequence():
assert len(dummy.spawn(10)) == 10
-class Base(object):
+class Base:
dtype = np.uint64
data2 = data1 = {}
@@ -410,7 +410,7 @@ class TestSFC64(Base):
cls.invalid_init_values = [(-1,)]
-class TestDefaultRNG(object):
+class TestDefaultRNG:
def test_seed(self):
for args in [(), (None,), (1234,), ([1234, 5678],)]:
rg = default_rng(*args)
diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py
index 5b3d5f08a..6f4407373 100644
--- a/numpy/random/tests/test_generator_mt19937.py
+++ b/numpy/random/tests/test_generator_mt19937.py
@@ -19,7 +19,7 @@ def endpoint(request):
return request.param
-class TestSeed(object):
+class TestSeed:
def test_scalar(self):
s = Generator(MT19937(0))
assert_equal(s.integers(1000), 479)
@@ -55,7 +55,7 @@ class TestSeed(object):
assert_raises(ValueError, Generator, MT19937)
-class TestBinomial(object):
+class TestBinomial:
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
@@ -70,7 +70,7 @@ class TestBinomial(object):
assert_raises(ValueError, random.binomial, 1, np.nan)
-class TestMultinomial(object):
+class TestMultinomial:
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
@@ -116,7 +116,7 @@ class TestMultinomial(object):
assert_array_equal(non_contig, contig)
-class TestMultivariateHypergeometric(object):
+class TestMultivariateHypergeometric:
def setup(self):
self.seed = 8675309
@@ -250,7 +250,7 @@ class TestMultivariateHypergeometric(object):
assert_array_equal(sample, expected)
-class TestSetState(object):
+class TestSetState:
def setup(self):
self.seed = 1234567890
self.rg = Generator(MT19937(self.seed))
@@ -284,7 +284,7 @@ class TestSetState(object):
self.rg.negative_binomial(0.5, 0.5)
-class TestIntegers(object):
+class TestIntegers:
rfunc = random.integers
# valid integer/boolean types
@@ -637,7 +637,7 @@ class TestIntegers(object):
assert chi2 < chi2max
-class TestRandomDist(object):
+class TestRandomDist:
# Make sure the random distribution returns the correct value for a
# given seed
@@ -1565,7 +1565,7 @@ class TestRandomDist(object):
assert_array_equal(actual, desired)
-class TestBroadcast(object):
+class TestBroadcast:
# tests that functions that broadcast behave
# correctly when presented with non-scalar arguments
def setup(self):
@@ -2117,7 +2117,7 @@ class TestBroadcast(object):
assert_array_equal(actual, desired)
-class TestThread(object):
+class TestThread:
# make sure each state produces the same sequence even in threads
def setup(self):
self.seeds = range(4)
@@ -2164,7 +2164,7 @@ class TestThread(object):
# See Issue #4263
-class TestSingleEltArrayInput(object):
+class TestSingleEltArrayInput:
def setup(self):
self.argOne = np.array([2])
self.argTwo = np.array([3])
diff --git a/numpy/random/tests/test_generator_mt19937_regressions.py b/numpy/random/tests/test_generator_mt19937_regressions.py
index 7ca8b9f3c..fb0aac335 100644
--- a/numpy/random/tests/test_generator_mt19937_regressions.py
+++ b/numpy/random/tests/test_generator_mt19937_regressions.py
@@ -7,7 +7,7 @@ from numpy.random import Generator, MT19937
mt19937 = Generator(MT19937())
-class TestRegression(object):
+class TestRegression:
def test_VonMises_range(self):
# Make sure generated random variables are in [-pi, pi].
@@ -136,7 +136,7 @@ class TestRegression(object):
assert_array_equal(perm, np.array([2, 0, 1]))
assert_array_equal(orig, np.arange(3).view(N))
- class M(object):
+ class M:
a = np.arange(5)
def __array__(self):
diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py
index 81d74650e..a9aa15083 100644
--- a/numpy/random/tests/test_random.py
+++ b/numpy/random/tests/test_random.py
@@ -10,7 +10,7 @@ from numpy import random
import sys
-class TestSeed(object):
+class TestSeed:
def test_scalar(self):
s = np.random.RandomState(0)
assert_equal(s.randint(1000), 684)
@@ -49,7 +49,7 @@ class TestSeed(object):
[4, 5, 6]])
-class TestBinomial(object):
+class TestBinomial:
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
@@ -64,7 +64,7 @@ class TestBinomial(object):
assert_raises(ValueError, random.binomial, 1, np.nan)
-class TestMultinomial(object):
+class TestMultinomial:
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
@@ -92,7 +92,7 @@ class TestMultinomial(object):
float(1))
-class TestSetState(object):
+class TestSetState:
def setup(self):
self.seed = 1234567890
self.prng = random.RandomState(self.seed)
@@ -140,7 +140,7 @@ class TestSetState(object):
self.prng.negative_binomial(0.5, 0.5)
-class TestRandint(object):
+class TestRandint:
rfunc = np.random.randint
@@ -278,7 +278,7 @@ class TestRandint(object):
assert_equal(type(sample), dt)
-class TestRandomDist(object):
+class TestRandomDist:
# Make sure the random distribution returns the correct value for a
# given seed
@@ -973,7 +973,7 @@ class TestRandomDist(object):
assert_array_equal(actual, desired)
-class TestBroadcast(object):
+class TestBroadcast:
# tests that functions that broadcast behave
# correctly when presented with non-scalar arguments
def setup(self):
@@ -1543,7 +1543,7 @@ class TestBroadcast(object):
assert_raises(ValueError, logseries, bad_p_two * 3)
-class TestThread(object):
+class TestThread:
# make sure each state produces the same sequence even in threads
def setup(self):
self.seeds = range(4)
@@ -1587,7 +1587,7 @@ class TestThread(object):
# See Issue #4263
-class TestSingleEltArrayInput(object):
+class TestSingleEltArrayInput:
def setup(self):
self.argOne = np.array([2])
self.argTwo = np.array([3])
diff --git a/numpy/random/tests/test_randomstate.py b/numpy/random/tests/test_randomstate.py
index c12b685ad..ebe8558ba 100644
--- a/numpy/random/tests/test_randomstate.py
+++ b/numpy/random/tests/test_randomstate.py
@@ -61,7 +61,7 @@ def assert_mt19937_state_equal(a, b):
assert_equal(a['gauss'], b['gauss'])
-class TestSeed(object):
+class TestSeed:
def test_scalar(self):
s = random.RandomState(0)
assert_equal(s.randint(1000), 684)
@@ -108,7 +108,7 @@ class TestSeed(object):
assert_raises(ValueError, random.RandomState, MT19937)
-class TestBinomial(object):
+class TestBinomial:
def test_n_zero(self):
# Tests the corner case of n == 0 for the binomial distribution.
# binomial(0, p) should be zero for any p in [0, 1].
@@ -123,7 +123,7 @@ class TestBinomial(object):
assert_raises(ValueError, random.binomial, 1, np.nan)
-class TestMultinomial(object):
+class TestMultinomial:
def test_basic(self):
random.multinomial(100, [0.2, 0.8])
@@ -168,7 +168,7 @@ class TestMultinomial(object):
assert_array_equal(non_contig, contig)
-class TestSetState(object):
+class TestSetState:
def setup(self):
self.seed = 1234567890
self.random_state = random.RandomState(self.seed)
@@ -255,7 +255,7 @@ class TestSetState(object):
assert repr(self.random_state).startswith('RandomState(MT19937)')
-class TestRandint(object):
+class TestRandint:
rfunc = random.randint
@@ -392,7 +392,7 @@ class TestRandint(object):
assert_equal(type(sample), dt)
-class TestRandomDist(object):
+class TestRandomDist:
# Make sure the random distribution returns the correct value for a
# given seed
@@ -1245,7 +1245,7 @@ class TestRandomDist(object):
assert_array_equal(actual, desired)
-class TestBroadcast(object):
+class TestBroadcast:
# tests that functions that broadcast behave
# correctly when presented with non-scalar arguments
def setup(self):
@@ -1832,7 +1832,7 @@ class TestBroadcast(object):
assert_raises(ValueError, logseries, bad_p_two * 3)
-class TestThread(object):
+class TestThread:
# make sure each state produces the same sequence even in threads
def setup(self):
self.seeds = range(4)
@@ -1879,7 +1879,7 @@ class TestThread(object):
# See Issue #4263
-class TestSingleEltArrayInput(object):
+class TestSingleEltArrayInput:
def setup(self):
self.argOne = np.array([2])
self.argTwo = np.array([3])
diff --git a/numpy/random/tests/test_randomstate_regression.py b/numpy/random/tests/test_randomstate_regression.py
index bdc2214b6..3be9edf02 100644
--- a/numpy/random/tests/test_randomstate_regression.py
+++ b/numpy/random/tests/test_randomstate_regression.py
@@ -11,7 +11,7 @@ import numpy as np
from numpy import random
-class TestRegression(object):
+class TestRegression:
def test_VonMises_range(self):
# Make sure generated random variables are in [-pi, pi].
@@ -147,7 +147,7 @@ class TestRegression(object):
assert_array_equal(perm, np.array([0, 2, 1]))
assert_array_equal(orig, np.arange(3).view(N))
- class M(object):
+ class M:
a = np.arange(5)
def __array__(self):
diff --git a/numpy/random/tests/test_regression.py b/numpy/random/tests/test_regression.py
index e70505cec..7d77a31d8 100644
--- a/numpy/random/tests/test_regression.py
+++ b/numpy/random/tests/test_regression.py
@@ -7,7 +7,7 @@ from numpy.compat import long
import numpy as np
-class TestRegression(object):
+class TestRegression:
def test_VonMises_range(self):
# Make sure generated random variables are in [-pi, pi].
@@ -143,7 +143,7 @@ class TestRegression(object):
assert_array_equal(perm, np.array([0, 2, 1]))
assert_array_equal(orig, np.arange(3).view(N))
- class M(object):
+ class M:
a = np.arange(5)
def __array__(self):
diff --git a/numpy/random/tests/test_smoke.py b/numpy/random/tests/test_smoke.py
index cdaac5ebb..ebfc6825e 100644
--- a/numpy/random/tests/test_smoke.py
+++ b/numpy/random/tests/test_smoke.py
@@ -91,7 +91,7 @@ def warmup(rg, n=None):
rg.random(n, dtype=np.float32)
-class RNG(object):
+class RNG:
@classmethod
def setup_class(cls):
# Overridden in test classes. Place holder to silence IDE noise
diff --git a/numpy/testing/_private/noseclasses.py b/numpy/testing/_private/noseclasses.py
index 7cad24620..493bacfdd 100644
--- a/numpy/testing/_private/noseclasses.py
+++ b/numpy/testing/_private/noseclasses.py
@@ -266,7 +266,7 @@ class NumpyDoctest(npd.Doctest):
return npd.Doctest.wantFile(self, file)
-class Unplugger(object):
+class Unplugger:
""" Nose plugin to remove named plugin late in loading
By default it removes the "doctest" plugin.
diff --git a/numpy/testing/_private/nosetester.py b/numpy/testing/_private/nosetester.py
index 6226eeb3c..4ca5267ce 100644
--- a/numpy/testing/_private/nosetester.py
+++ b/numpy/testing/_private/nosetester.py
@@ -110,7 +110,7 @@ def run_module_suite(file_to_run=None, argv=None):
nose.run(argv=argv, addplugins=[KnownFailurePlugin()])
-class NoseTester(object):
+class NoseTester:
"""
Nose test runner.
@@ -454,11 +454,6 @@ class NoseTester(object):
# This is very specific, so using the fragile module filter
# is fine
import threading
- sup.filter(DeprecationWarning,
- r"sys\.exc_clear\(\) not supported in 3\.x",
- module=threading)
- sup.filter(DeprecationWarning, message=r"in 3\.x, __setslice__")
- sup.filter(DeprecationWarning, message=r"in 3\.x, __getslice__")
sup.filter(DeprecationWarning, message=r"buffer\(\) not supported in 3\.x")
sup.filter(DeprecationWarning, message=r"CObject type is not supported in 3\.x")
sup.filter(DeprecationWarning, message=r"comparing unequal types not supported in 3\.x")
diff --git a/numpy/testing/_private/parameterized.py b/numpy/testing/_private/parameterized.py
index 2134cca88..dbfb4807c 100644
--- a/numpy/testing/_private/parameterized.py
+++ b/numpy/testing/_private/parameterized.py
@@ -293,10 +293,10 @@ def detect_runner():
_test_runner_guess = None
return _test_runner_guess
-class parameterized(object):
+class parameterized:
""" Parameterize a test case::
- class TestInt(object):
+ class TestInt:
@parameterized([
("A", 10),
("F", 15),
diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py
index 1e118b538..914491b71 100644
--- a/numpy/testing/_private/utils.py
+++ b/numpy/testing/_private/utils.py
@@ -2001,7 +2001,7 @@ class clear_and_catch_warnings(warnings.catch_warnings):
mod.__warningregistry__.update(self._warnreg_copies[mod])
-class suppress_warnings(object):
+class suppress_warnings:
"""
Context manager and decorator doing much the same as
``warnings.catch_warnings``.
@@ -2216,8 +2216,7 @@ class suppress_warnings(object):
del self._filters
def _showwarning(self, message, category, filename, lineno,
- *args, **kwargs):
- use_warnmsg = kwargs.pop("use_warnmsg", None)
+ *args, use_warnmsg=None, **kwargs):
for cat, _, pattern, mod, rec in (
self._suppressions + self._tmp_suppressions)[::-1]:
if (issubclass(category, cat) and
diff --git a/numpy/testing/print_coercion_tables.py b/numpy/testing/print_coercion_tables.py
index c11b31968..84d46b59b 100755
--- a/numpy/testing/print_coercion_tables.py
+++ b/numpy/testing/print_coercion_tables.py
@@ -5,7 +5,7 @@
import numpy as np
# Generic object that can be added, but doesn't do anything else
-class GenericObject(object):
+class GenericObject:
def __init__(self, v):
self.v = v
diff --git a/numpy/testing/tests/test_decorators.py b/numpy/testing/tests/test_decorators.py
index fc8d764c2..77f8b66ba 100644
--- a/numpy/testing/tests/test_decorators.py
+++ b/numpy/testing/tests/test_decorators.py
@@ -21,7 +21,7 @@ else:
@pytest.mark.skipif(not HAVE_NOSE, reason="Needs nose")
-class TestNoseDecorators(object):
+class TestNoseDecorators:
# These tests are run in a class for simplicity while still
# getting a report on each, skipped or success.
diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py
index f752c63f3..232ca0e83 100644
--- a/numpy/testing/tests/test_utils.py
+++ b/numpy/testing/tests/test_utils.py
@@ -18,7 +18,7 @@ from numpy.testing import (
from numpy.core.overrides import ARRAY_FUNCTION_ENABLED
-class _GenericTest(object):
+class _GenericTest:
def _test_equal(self, a, b):
self._assert_func(a, b)
@@ -209,7 +209,7 @@ class TestArrayEqual(_GenericTest):
self._test_not_equal(b, a)
-class TestBuildErrorMessage(object):
+class TestBuildErrorMessage:
def test_build_err_msg_defaults(self):
x = np.array([1.00001, 2.00002, 3.00003])
@@ -616,7 +616,7 @@ class TestAlmostEqual(_GenericTest):
self._assert_func(a, a)
-class TestApproxEqual(object):
+class TestApproxEqual:
def setup(self):
self._assert_func = assert_approx_equal
@@ -659,7 +659,7 @@ class TestApproxEqual(object):
assert_raises(AssertionError, lambda: self._assert_func(ainf, anan))
-class TestArrayAssertLess(object):
+class TestArrayAssertLess:
def setup(self):
self._assert_func = assert_array_less
@@ -769,7 +769,7 @@ class TestArrayAssertLess(object):
@pytest.mark.skip(reason="The raises decorator depends on Nose")
-class TestRaises(object):
+class TestRaises:
def setup(self):
class MyException(Exception):
@@ -803,7 +803,7 @@ class TestRaises(object):
raise AssertionError("should have raised an AssertionError")
-class TestWarns(object):
+class TestWarns:
def test_warn(self):
def f():
@@ -854,7 +854,7 @@ class TestWarns(object):
raise AssertionError("wrong warning caught by assert_warn")
-class TestAssertAllclose(object):
+class TestAssertAllclose:
def test_simple(self):
x = 1e-3
@@ -924,7 +924,7 @@ class TestAssertAllclose(object):
assert_('Max relative difference: 0.5' in msg)
-class TestArrayAlmostEqualNulp(object):
+class TestArrayAlmostEqualNulp:
def test_float64_pass(self):
# The number of units of least precision
@@ -1121,7 +1121,7 @@ class TestArrayAlmostEqualNulp(object):
xi, y + y*1j, nulp)
-class TestULP(object):
+class TestULP:
def test_equal(self):
x = np.random.randn(10)
@@ -1177,7 +1177,7 @@ class TestULP(object):
maxulp=maxulp))
-class TestStringEqual(object):
+class TestStringEqual:
def test_simple(self):
assert_string_equal("hello", "hello")
assert_string_equal("hello\nmultiline", "hello\nmultiline")
@@ -1239,7 +1239,7 @@ def test_warn_len_equal_call_scenarios():
# check that no assertion is uncaught
# parallel scenario -- no warning issued yet
- class mod(object):
+ class mod:
pass
mod_inst = mod()
@@ -1249,7 +1249,7 @@ def test_warn_len_equal_call_scenarios():
# serial test scenario -- the __warningregistry__
# attribute should be present
- class mod(object):
+ class mod:
def __init__(self):
self.__warningregistry__ = {'warning1':1,
'warning2':2}
@@ -1524,7 +1524,7 @@ def test_clear_and_catch_warnings_inherit():
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
-class TestAssertNoGcCycles(object):
+class TestAssertNoGcCycles:
""" Test assert_no_gc_cycles """
def test_passes(self):
def no_cycle():
@@ -1558,7 +1558,7 @@ class TestAssertNoGcCycles(object):
error, instead of hanging forever trying to clear it.
"""
- class ReferenceCycleInDel(object):
+ class ReferenceCycleInDel:
"""
An object that not only contains a reference cycle, but creates new
cycles whenever it's garbage-collected and its __del__ runs
diff --git a/numpy/tests/test_ctypeslib.py b/numpy/tests/test_ctypeslib.py
index 5e19f06ce..af3730df1 100644
--- a/numpy/tests/test_ctypeslib.py
+++ b/numpy/tests/test_ctypeslib.py
@@ -35,7 +35,7 @@ else:
reason="ctypes not available in this python")
@pytest.mark.skipif(sys.platform == 'cygwin',
reason="Known to fail on cygwin")
-class TestLoadLibrary(object):
+class TestLoadLibrary:
def test_basic(self):
try:
# Should succeed
@@ -61,7 +61,7 @@ class TestLoadLibrary(object):
print(msg)
-class TestNdpointer(object):
+class TestNdpointer:
def test_dtype(self):
dt = np.intc
p = ndpointer(dtype=dt)
@@ -128,7 +128,7 @@ class TestNdpointer(object):
@pytest.mark.skipif(ctypes is None,
reason="ctypes not available on this python installation")
-class TestNdpointerCFunc(object):
+class TestNdpointerCFunc:
def test_arguments(self):
""" Test that arguments are coerced from arrays """
c_forward_pointer.restype = ctypes.c_void_p
@@ -184,7 +184,7 @@ class TestNdpointerCFunc(object):
@pytest.mark.skipif(ctypes is None,
reason="ctypes not available on this python installation")
-class TestAsArray(object):
+class TestAsArray:
def test_array(self):
from ctypes import c_int
@@ -275,7 +275,7 @@ class TestAsArray(object):
@pytest.mark.skipif(ctypes is None,
reason="ctypes not available on this python installation")
-class TestAsCtypesType(object):
+class TestAsCtypesType:
""" Test conversion from dtypes to ctypes types """
def test_scalar(self):
dt = np.dtype('<u2')
diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py
index 4ee84034d..48dd42a9f 100644
--- a/numpy/tests/test_public_api.py
+++ b/numpy/tests/test_public_api.py
@@ -225,7 +225,6 @@ PRIVATE_BUT_PRESENT_MODULES = ['numpy.' + s for s in [
"distutils.command.install_data",
"distutils.command.install_headers",
"distutils.command.sdist",
- "distutils.compat",
"distutils.conv_template",
"distutils.core",
"distutils.extension",
diff --git a/pytest.ini b/pytest.ini
index 74faefd6e..141c2f6ef 100644
--- a/pytest.ini
+++ b/pytest.ini
@@ -14,9 +14,6 @@ filterwarnings =
# Matrix PendingDeprecationWarning.
ignore:the matrix subclass is not
# Ignore python2.7 -3 warnings
- ignore:sys\.exc_clear\(\) not supported in 3\.x:DeprecationWarning
- ignore:in 3\.x, __setslice__:DeprecationWarning
- ignore:in 3\.x, __getslice__:DeprecationWarning
ignore:buffer\(\) not supported in 3\.x:DeprecationWarning
ignore:CObject type is not supported in 3\.x:DeprecationWarning
ignore:comparing unequal types not supported in 3\.x:DeprecationWarning
diff --git a/setup.py b/setup.py
index 20bfc657c..e207e2787 100755
--- a/setup.py
+++ b/setup.py
@@ -23,6 +23,7 @@ import os
import sys
import subprocess
import textwrap
+import sysconfig
if sys.version_info[:2] < (3, 6):
@@ -225,6 +226,40 @@ class sdist_checked(sdist):
sdist.run(self)
+def get_build_overrides():
+ """
+ Custom build commands to add `-std=c99` to compilation
+ """
+ from numpy.distutils.command.build_clib import build_clib
+ from numpy.distutils.command.build_ext import build_ext
+
+ def _is_using_gcc(obj):
+ is_gcc = False
+ if obj.compiler.compiler_type == 'unix':
+ cc = sysconfig.get_config_var("CC")
+ if not cc:
+ cc = ""
+ compiler_name = os.path.basename(cc)
+ is_gcc = "gcc" in compiler_name
+ return is_gcc
+
+ class new_build_clib(build_clib):
+ def build_a_library(self, build_info, lib_name, libraries):
+ if _is_using_gcc(self):
+ args = build_info.get('extra_compiler_args') or []
+ args.append('-std=c99')
+ build_info['extra_compiler_args'] = args
+ build_clib.build_a_library(self, build_info, lib_name, libraries)
+
+ class new_build_ext(build_ext):
+ def build_extension(self, ext):
+ if _is_using_gcc(self):
+ if '-std=c99' not in ext.extra_compile_args:
+ ext.extra_compile_args.append('-std=c99')
+ build_ext.build_extension(self, ext)
+ return new_build_clib, new_build_ext
+
+
def generate_cython():
cwd = os.path.abspath(os.path.dirname(__file__))
print("Cythonizing sources")
@@ -387,6 +422,8 @@ def setup_package():
'f2py%s.%s = numpy.f2py.f2py2e:main' % sys.version_info[:2],
]
+ cmdclass={"sdist": sdist_checked,
+ }
metadata = dict(
name = 'numpy',
maintainer = "NumPy Developers",
@@ -405,8 +442,7 @@ def setup_package():
classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f],
platforms = ["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"],
test_suite='nose.collector',
- cmdclass={"sdist": sdist_checked,
- },
+ cmdclass=cmdclass,
python_requires='>=3.5',
zip_safe=False,
entry_points={
@@ -430,6 +466,8 @@ def setup_package():
generate_cython()
metadata['configuration'] = configuration
+ # Customize extension building
+ cmdclass['build_clib'], cmdclass['build_ext'] = get_build_overrides()
else:
# Version number is added to metadata inside configuration() if build
# is run.
diff --git a/tools/allocation_tracking/alloc_hook.pyx b/tools/allocation_tracking/alloc_hook.pyx
index d1e656f90..eeefe1704 100644
--- a/tools/allocation_tracking/alloc_hook.pyx
+++ b/tools/allocation_tracking/alloc_hook.pyx
@@ -22,7 +22,7 @@ cdef void pyhook(void *old, void *new, size_t size, void *user_data):
PyLong_FromVoidPtr(new),
size)
-class NumpyAllocHook(object):
+class NumpyAllocHook:
def __init__(self, callback):
self.callback = callback
diff --git a/tools/allocation_tracking/track_allocations.py b/tools/allocation_tracking/track_allocations.py
index a997df64a..94d07d50e 100644
--- a/tools/allocation_tracking/track_allocations.py
+++ b/tools/allocation_tracking/track_allocations.py
@@ -3,7 +3,7 @@ import gc
import inspect
from alloc_hook import NumpyAllocHook
-class AllocationTracker(object):
+class AllocationTracker:
def __init__(self, threshold=0):
'''track numpy allocations of size threshold bytes or more.'''
diff --git a/tools/npy_tempita/__init__.py b/tools/npy_tempita/__init__.py
index 59f568094..97905a16e 100644
--- a/tools/npy_tempita/__init__.py
+++ b/tools/npy_tempita/__init__.py
@@ -90,7 +90,7 @@ def get_file_template(name, from_template):
get_template=from_template.get_template)
-class Template(object):
+class Template:
default_namespace = {
'start_braces': '{{',
@@ -438,7 +438,7 @@ class bunch(dict):
############################################################
-class html(object):
+class html:
def __init__(self, value):
self.value = value
@@ -520,7 +520,7 @@ def sub_html(content, **kw):
return tmpl.substitute(kw)
-class TemplateDef(object):
+class TemplateDef:
def __init__(self, template, func_name, func_signature,
body, ns, pos, bound_self=None):
self._template = template
@@ -597,7 +597,7 @@ class TemplateDef(object):
return values
-class TemplateObject(object):
+class TemplateObject:
def __init__(self, name):
self.__name = name
@@ -607,7 +607,7 @@ class TemplateObject(object):
return '<%s %s>' % (self.__class__.__name__, self.__name)
-class TemplateObjectGetter(object):
+class TemplateObjectGetter:
def __init__(self, template_obj):
self.__template_obj = template_obj
@@ -620,7 +620,7 @@ class TemplateObjectGetter(object):
self.__class__.__name__, self.__template_obj)
-class _Empty(object):
+class _Empty:
def __call__(self, *args, **kw):
return self
diff --git a/tools/npy_tempita/_looper.py b/tools/npy_tempita/_looper.py
index 0135852e3..23121fe9e 100644
--- a/tools/npy_tempita/_looper.py
+++ b/tools/npy_tempita/_looper.py
@@ -23,7 +23,7 @@ from .compat3 import basestring_
__all__ = ['looper']
-class looper(object):
+class looper:
"""
Helper for looping (particularly in templates)
@@ -45,7 +45,7 @@ class looper(object):
self.__class__.__name__, self.seq)
-class looper_iter(object):
+class looper_iter:
def __init__(self, seq):
self.seq = list(seq)
@@ -65,7 +65,7 @@ class looper_iter(object):
next = __next__
-class loop_pos(object):
+class loop_pos:
def __init__(self, seq, pos):
self.seq = seq
diff --git a/tools/refguide_check.py b/tools/refguide_check.py
index c647fb2ed..3b587b77b 100644
--- a/tools/refguide_check.py
+++ b/tools/refguide_check.py
@@ -795,7 +795,7 @@ def _run_doctests(tests, full_name, verbose, doctest_warnings):
def out(msg):
output.append(msg)
- class MyStderr(object):
+ class MyStderr:
"""
Redirect stderr to the current stdout
"""