summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--doc/cdoc/Doxyfile29
-rw-r--r--doc/cdoc/Makefile10
-rw-r--r--doc/cdoc/README31
-rwxr-xr-xdoc/cdoc/numpyfilter.py104
-rw-r--r--doc/release/upcoming_changes/20000.deprecation.rst5
-rw-r--r--doc/source/reference/arrays.datetime.rst18
-rw-r--r--doc/source/reference/c-api/array.rst60
-rw-r--r--doc/source/reference/c-api/types-and-structures.rst69
-rw-r--r--numpy/__init__.pyi59
-rw-r--r--numpy/array_api/_creation_functions.py4
-rw-r--r--numpy/array_api/_data_type_functions.py4
-rw-r--r--numpy/array_api/_typing.py30
-rw-r--r--numpy/core/_add_newdocs.py3
-rw-r--r--numpy/core/fromnumeric.py6
-rw-r--r--numpy/core/function_base.pyi13
-rw-r--r--numpy/core/src/multiarray/datetime.c22
-rw-r--r--numpy/core/src/multiarray/item_selection.c10
-rw-r--r--numpy/core/src/multiarray/iterators.c36
-rw-r--r--numpy/core/src/multiarray/usertypes.c67
-rw-r--r--numpy/core/tests/test_datetime.py37
-rw-r--r--numpy/core/tests/test_deprecations.py23
-rw-r--r--numpy/core/tests/test_multiarray.py68
-rw-r--r--numpy/core/tests/test_numeric.py6
-rw-r--r--numpy/distutils/fcompiler/nag.py12
-rw-r--r--numpy/f2py/cb_rules.py22
-rwxr-xr-xnumpy/f2py/crackfortran.py6
-rwxr-xr-xnumpy/f2py/rules.py504
-rw-r--r--numpy/f2py/symbolic.py2
-rw-r--r--numpy/f2py/tests/test_callback.py6
-rw-r--r--numpy/f2py/tests/test_crackfortran.py17
-rw-r--r--numpy/lib/function_base.py18
-rw-r--r--numpy/lib/function_base.pyi308
-rw-r--r--numpy/lib/tests/test_function_base.py107
-rw-r--r--numpy/ma/core.py44
-rw-r--r--numpy/ma/tests/test_core.py50
-rw-r--r--numpy/random/src/distributions/random_hypergeometric.c2
-rw-r--r--numpy/typing/__init__.py54
-rw-r--r--numpy/typing/_add_docstring.py15
-rw-r--r--numpy/typing/_callable.pyi (renamed from numpy/typing/_callable.py)8
-rw-r--r--numpy/typing/_dtype_like.py21
-rw-r--r--numpy/typing/_extended_precision.py3
-rw-r--r--numpy/typing/mypy_plugin.py41
-rw-r--r--numpy/typing/tests/data/fail/arithmetic.pyi (renamed from numpy/typing/tests/data/fail/arithmetic.py)0
-rw-r--r--numpy/typing/tests/data/fail/array_constructors.pyi (renamed from numpy/typing/tests/data/fail/array_constructors.py)0
-rw-r--r--numpy/typing/tests/data/fail/array_like.pyi (renamed from numpy/typing/tests/data/fail/array_like.py)0
-rw-r--r--numpy/typing/tests/data/fail/array_pad.pyi (renamed from numpy/typing/tests/data/fail/array_pad.py)0
-rw-r--r--numpy/typing/tests/data/fail/arrayprint.pyi (renamed from numpy/typing/tests/data/fail/arrayprint.py)0
-rw-r--r--numpy/typing/tests/data/fail/arrayterator.pyi (renamed from numpy/typing/tests/data/fail/arrayterator.py)0
-rw-r--r--numpy/typing/tests/data/fail/bitwise_ops.pyi (renamed from numpy/typing/tests/data/fail/bitwise_ops.py)0
-rw-r--r--numpy/typing/tests/data/fail/char.pyi (renamed from numpy/typing/tests/data/fail/char.py)0
-rw-r--r--numpy/typing/tests/data/fail/comparisons.pyi (renamed from numpy/typing/tests/data/fail/comparisons.py)0
-rw-r--r--numpy/typing/tests/data/fail/constants.pyi (renamed from numpy/typing/tests/data/fail/constants.py)0
-rw-r--r--numpy/typing/tests/data/fail/datasource.pyi (renamed from numpy/typing/tests/data/fail/datasource.py)0
-rw-r--r--numpy/typing/tests/data/fail/dtype.pyi (renamed from numpy/typing/tests/data/fail/dtype.py)0
-rw-r--r--numpy/typing/tests/data/fail/einsumfunc.pyi (renamed from numpy/typing/tests/data/fail/einsumfunc.py)0
-rw-r--r--numpy/typing/tests/data/fail/flatiter.pyi (renamed from numpy/typing/tests/data/fail/flatiter.py)0
-rw-r--r--numpy/typing/tests/data/fail/fromnumeric.pyi (renamed from numpy/typing/tests/data/fail/fromnumeric.py)0
-rw-r--r--numpy/typing/tests/data/fail/index_tricks.pyi (renamed from numpy/typing/tests/data/fail/index_tricks.py)0
-rw-r--r--numpy/typing/tests/data/fail/lib_function_base.pyi19
-rw-r--r--numpy/typing/tests/data/fail/lib_utils.pyi (renamed from numpy/typing/tests/data/fail/lib_utils.py)0
-rw-r--r--numpy/typing/tests/data/fail/lib_version.pyi (renamed from numpy/typing/tests/data/fail/lib_version.py)0
-rw-r--r--numpy/typing/tests/data/fail/linalg.pyi (renamed from numpy/typing/tests/data/fail/linalg.py)0
-rw-r--r--numpy/typing/tests/data/fail/memmap.pyi (renamed from numpy/typing/tests/data/fail/memmap.py)0
-rw-r--r--numpy/typing/tests/data/fail/modules.pyi (renamed from numpy/typing/tests/data/fail/modules.py)0
-rw-r--r--numpy/typing/tests/data/fail/multiarray.pyi (renamed from numpy/typing/tests/data/fail/multiarray.py)0
-rw-r--r--numpy/typing/tests/data/fail/ndarray.pyi (renamed from numpy/typing/tests/data/fail/ndarray.py)0
-rw-r--r--numpy/typing/tests/data/fail/ndarray_misc.pyi (renamed from numpy/typing/tests/data/fail/ndarray_misc.py)4
-rw-r--r--numpy/typing/tests/data/fail/nditer.pyi (renamed from numpy/typing/tests/data/fail/nditer.py)0
-rw-r--r--numpy/typing/tests/data/fail/nested_sequence.pyi (renamed from numpy/typing/tests/data/fail/nested_sequence.py)0
-rw-r--r--numpy/typing/tests/data/fail/npyio.pyi (renamed from numpy/typing/tests/data/fail/npyio.py)0
-rw-r--r--numpy/typing/tests/data/fail/numerictypes.pyi (renamed from numpy/typing/tests/data/fail/numerictypes.py)0
-rw-r--r--numpy/typing/tests/data/fail/random.pyi (renamed from numpy/typing/tests/data/fail/random.py)0
-rw-r--r--numpy/typing/tests/data/fail/rec.pyi (renamed from numpy/typing/tests/data/fail/rec.py)0
-rw-r--r--numpy/typing/tests/data/fail/scalars.pyi (renamed from numpy/typing/tests/data/fail/scalars.py)0
-rw-r--r--numpy/typing/tests/data/fail/stride_tricks.pyi (renamed from numpy/typing/tests/data/fail/stride_tricks.py)0
-rw-r--r--numpy/typing/tests/data/fail/testing.pyi (renamed from numpy/typing/tests/data/fail/testing.py)0
-rw-r--r--numpy/typing/tests/data/fail/twodim_base.pyi (renamed from numpy/typing/tests/data/fail/twodim_base.py)0
-rw-r--r--numpy/typing/tests/data/fail/type_check.pyi (renamed from numpy/typing/tests/data/fail/type_check.py)0
-rw-r--r--numpy/typing/tests/data/fail/ufunc_config.pyi (renamed from numpy/typing/tests/data/fail/ufunc_config.py)0
-rw-r--r--numpy/typing/tests/data/fail/ufunclike.pyi (renamed from numpy/typing/tests/data/fail/ufunclike.py)0
-rw-r--r--numpy/typing/tests/data/fail/ufuncs.pyi (renamed from numpy/typing/tests/data/fail/ufuncs.py)0
-rw-r--r--numpy/typing/tests/data/fail/warnings_and_errors.pyi (renamed from numpy/typing/tests/data/fail/warnings_and_errors.py)0
-rw-r--r--numpy/typing/tests/data/misc/extended_precision.pyi (renamed from numpy/typing/tests/data/misc/extended_precision.py)0
-rw-r--r--numpy/typing/tests/data/reveal/arithmetic.pyi (renamed from numpy/typing/tests/data/reveal/arithmetic.py)0
-rw-r--r--numpy/typing/tests/data/reveal/array_constructors.pyi (renamed from numpy/typing/tests/data/reveal/array_constructors.py)0
-rw-r--r--numpy/typing/tests/data/reveal/arraypad.pyi (renamed from numpy/typing/tests/data/reveal/arraypad.py)0
-rw-r--r--numpy/typing/tests/data/reveal/arrayprint.pyi (renamed from numpy/typing/tests/data/reveal/arrayprint.py)0
-rw-r--r--numpy/typing/tests/data/reveal/arraysetops.pyi (renamed from numpy/typing/tests/data/reveal/arraysetops.py)0
-rw-r--r--numpy/typing/tests/data/reveal/arrayterator.pyi (renamed from numpy/typing/tests/data/reveal/arrayterator.py)0
-rw-r--r--numpy/typing/tests/data/reveal/bitwise_ops.pyi (renamed from numpy/typing/tests/data/reveal/bitwise_ops.py)0
-rw-r--r--numpy/typing/tests/data/reveal/char.pyi (renamed from numpy/typing/tests/data/reveal/char.py)0
-rw-r--r--numpy/typing/tests/data/reveal/comparisons.pyi (renamed from numpy/typing/tests/data/reveal/comparisons.py)0
-rw-r--r--numpy/typing/tests/data/reveal/constants.pyi (renamed from numpy/typing/tests/data/reveal/constants.py)0
-rw-r--r--numpy/typing/tests/data/reveal/ctypeslib.pyi (renamed from numpy/typing/tests/data/reveal/ctypeslib.py)0
-rw-r--r--numpy/typing/tests/data/reveal/datasource.pyi (renamed from numpy/typing/tests/data/reveal/datasource.py)0
-rw-r--r--numpy/typing/tests/data/reveal/dtype.pyi (renamed from numpy/typing/tests/data/reveal/dtype.py)0
-rw-r--r--numpy/typing/tests/data/reveal/einsumfunc.pyi (renamed from numpy/typing/tests/data/reveal/einsumfunc.py)0
-rw-r--r--numpy/typing/tests/data/reveal/flatiter.pyi (renamed from numpy/typing/tests/data/reveal/flatiter.py)0
-rw-r--r--numpy/typing/tests/data/reveal/fromnumeric.pyi (renamed from numpy/typing/tests/data/reveal/fromnumeric.py)0
-rw-r--r--numpy/typing/tests/data/reveal/getlimits.pyi (renamed from numpy/typing/tests/data/reveal/getlimits.py)0
-rw-r--r--numpy/typing/tests/data/reveal/index_tricks.pyi (renamed from numpy/typing/tests/data/reveal/index_tricks.py)0
-rw-r--r--numpy/typing/tests/data/reveal/lib_function_base.pyi99
-rw-r--r--numpy/typing/tests/data/reveal/lib_utils.pyi (renamed from numpy/typing/tests/data/reveal/lib_utils.py)0
-rw-r--r--numpy/typing/tests/data/reveal/lib_version.pyi (renamed from numpy/typing/tests/data/reveal/lib_version.py)0
-rw-r--r--numpy/typing/tests/data/reveal/linalg.pyi (renamed from numpy/typing/tests/data/reveal/linalg.py)0
-rw-r--r--numpy/typing/tests/data/reveal/memmap.pyi (renamed from numpy/typing/tests/data/reveal/memmap.py)0
-rw-r--r--numpy/typing/tests/data/reveal/mod.pyi (renamed from numpy/typing/tests/data/reveal/mod.py)0
-rw-r--r--numpy/typing/tests/data/reveal/modules.pyi (renamed from numpy/typing/tests/data/reveal/modules.py)0
-rw-r--r--numpy/typing/tests/data/reveal/multiarray.pyi (renamed from numpy/typing/tests/data/reveal/multiarray.py)0
-rw-r--r--numpy/typing/tests/data/reveal/nbit_base_example.pyi (renamed from numpy/typing/tests/data/reveal/nbit_base_example.py)0
-rw-r--r--numpy/typing/tests/data/reveal/ndarray_conversion.pyi (renamed from numpy/typing/tests/data/reveal/ndarray_conversion.py)0
-rw-r--r--numpy/typing/tests/data/reveal/ndarray_misc.pyi (renamed from numpy/typing/tests/data/reveal/ndarray_misc.py)11
-rw-r--r--numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi (renamed from numpy/typing/tests/data/reveal/ndarray_shape_manipulation.py)0
-rw-r--r--numpy/typing/tests/data/reveal/nditer.pyi (renamed from numpy/typing/tests/data/reveal/nditer.py)0
-rw-r--r--numpy/typing/tests/data/reveal/nested_sequence.pyi (renamed from numpy/typing/tests/data/reveal/nested_sequence.py)0
-rw-r--r--numpy/typing/tests/data/reveal/npyio.pyi (renamed from numpy/typing/tests/data/reveal/npyio.py)0
-rw-r--r--numpy/typing/tests/data/reveal/numeric.pyi (renamed from numpy/typing/tests/data/reveal/numeric.py)0
-rw-r--r--numpy/typing/tests/data/reveal/numerictypes.pyi (renamed from numpy/typing/tests/data/reveal/numerictypes.py)0
-rw-r--r--numpy/typing/tests/data/reveal/random.pyi (renamed from numpy/typing/tests/data/reveal/random.py)0
-rw-r--r--numpy/typing/tests/data/reveal/rec.pyi (renamed from numpy/typing/tests/data/reveal/rec.py)0
-rw-r--r--numpy/typing/tests/data/reveal/scalars.pyi (renamed from numpy/typing/tests/data/reveal/scalars.py)0
-rw-r--r--numpy/typing/tests/data/reveal/shape_base.pyi (renamed from numpy/typing/tests/data/reveal/shape_base.py)0
-rw-r--r--numpy/typing/tests/data/reveal/stride_tricks.pyi (renamed from numpy/typing/tests/data/reveal/stride_tricks.py)0
-rw-r--r--numpy/typing/tests/data/reveal/testing.pyi (renamed from numpy/typing/tests/data/reveal/testing.py)0
-rw-r--r--numpy/typing/tests/data/reveal/twodim_base.pyi (renamed from numpy/typing/tests/data/reveal/twodim_base.py)0
-rw-r--r--numpy/typing/tests/data/reveal/type_check.pyi (renamed from numpy/typing/tests/data/reveal/type_check.py)0
-rw-r--r--numpy/typing/tests/data/reveal/ufunc_config.pyi (renamed from numpy/typing/tests/data/reveal/ufunc_config.py)0
-rw-r--r--numpy/typing/tests/data/reveal/ufunclike.pyi (renamed from numpy/typing/tests/data/reveal/ufunclike.py)0
-rw-r--r--numpy/typing/tests/data/reveal/ufuncs.pyi (renamed from numpy/typing/tests/data/reveal/ufuncs.py)0
-rw-r--r--numpy/typing/tests/data/reveal/warnings_and_errors.pyi (renamed from numpy/typing/tests/data/reveal/warnings_and_errors.py)0
-rw-r--r--numpy/typing/tests/test_runtime.py4
-rw-r--r--numpy/typing/tests/test_typing.py112
-rw-r--r--tools/linter.py1
133 files changed, 1489 insertions, 715 deletions
diff --git a/doc/cdoc/Doxyfile b/doc/cdoc/Doxyfile
deleted file mode 100644
index c9c386e4e..000000000
--- a/doc/cdoc/Doxyfile
+++ /dev/null
@@ -1,29 +0,0 @@
-# Doxyfile for NumPy C API
-# See http://www.doxygen.nl/manual/config.html
-PROJECT_NAME = numpy
-PROJECT_NUMBER = 2.0.0
-OUTPUT_DIRECTORY = build
-STRIP_FROM_PATH = ../../numpy/core
-INHERIT_DOCS = YES
-TAB_SIZE = 8
-OPTIMIZE_OUTPUT_FOR_C = YES
-EXTRACT_ALL = YES
-EXTRACT_PRIVATE = YES
-EXTRACT_STATIC = YES
-CASE_SENSE_NAMES = NO
-INPUT = ../../numpy/core/src \
- ../../numpy/core/include
-FILE_PATTERNS = *.h *.c *.src
-RECURSIVE = YES
-INPUT_FILTER = ./numpyfilter.py
-REFERENCED_BY_RELATION = YES
-REFERENCES_RELATION = YES
-ALPHABETICAL_INDEX = NO
-GENERATE_HTML = YES
-HTML_TIMESTAMP = YES
-GENERATE_TREEVIEW = YES
-SEARCHENGINE = NO
-GENERATE_LATEX = NO
-PAPER_TYPE = a4wide
-GENERATE_XML = NO
-HAVE_DOT = NO
diff --git a/doc/cdoc/Makefile b/doc/cdoc/Makefile
deleted file mode 100644
index 8b9deada8..000000000
--- a/doc/cdoc/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-all: build
-
-build:
- doxygen
-
-clean:
- rm -rf build
-
-.PHONY: all build clean
-
diff --git a/doc/cdoc/README b/doc/cdoc/README
deleted file mode 100644
index a5363cfa1..000000000
--- a/doc/cdoc/README
+++ /dev/null
@@ -1,31 +0,0 @@
-cdoc
-====
-
-This is a simple Doxygen project for building NumPy C code documentation,
-with docstrings extracted from the C sources themselves.
-
-The understood syntax for documentation in the C source is
-
- /*
- * Some text in reStructuredText format
- */
- int function_to_which_the_text_applies()
- {
- ...
- }
-
- /*
- * More text in reStructuredText format
- */
- struct
- {
- int variable_1; /* Documentation for variable_1 */
-
- /*
- * Documentation for variable_2
- */
- int variable_2;
- } struct_name_t;
-
-Please do not use JavaDoc or Doxygen-specific formatting at the moment.
-
diff --git a/doc/cdoc/numpyfilter.py b/doc/cdoc/numpyfilter.py
deleted file mode 100755
index d3cfe18f0..000000000
--- a/doc/cdoc/numpyfilter.py
+++ /dev/null
@@ -1,104 +0,0 @@
-#!/usr/bin/env python3
-"""
-numpyfilter.py [-h] inputfile
-
-Interpret C comments as ReStructuredText, and replace them by the HTML output.
-Also, add Doxygen /** and /**< syntax automatically where appropriate.
-
-"""
-import sys
-import re
-import os
-import textwrap
-
-from numpy.compat import pickle
-
-CACHE_FILE = 'build/rst-cache.pck'
-
-def main():
- import argparse
-
- parser = argparse.ArgumentParser(usage=__doc__.strip())
- parser.add_argument('input_file', help='input file')
- args = parser.parse_args()
-
- comment_re = re.compile(r'(\n.*?)/\*(.*?)\*/', re.S)
-
- cache = load_cache()
-
- try:
- with open(args.input_file, 'r') as f:
- text = f.read()
- text = comment_re.sub(lambda m: process_match(m, cache), text)
- sys.stdout.write(text)
- finally:
- save_cache(cache)
-
-def filter_comment(text):
- if text.startswith('NUMPY_API'):
- text = text[9:].strip()
- if text.startswith('UFUNC_API'):
- text = text[9:].strip()
-
- html = render_html(text)
- return html
-
-def process_match(m, cache=None):
- pre, rawtext = m.groups()
-
- preline = pre.split("\n")[-1]
-
- if cache is not None and rawtext in cache:
- text = cache[rawtext]
- else:
- text = re.compile(r'^\s*\*', re.M).sub('', rawtext)
- text = textwrap.dedent(text)
- text = filter_comment(text)
-
- if cache is not None:
- cache[rawtext] = text
-
- if preline.strip():
- return pre + "/**< " + text + " */"
- else:
- return pre + "/** " + text + " */"
-
-def load_cache():
- if os.path.exists(CACHE_FILE):
- with open(CACHE_FILE, 'rb') as f:
- try:
- cache = pickle.load(f)
- except Exception:
- cache = {}
- else:
- cache = {}
- return cache
-
-def save_cache(cache):
- with open(CACHE_FILE + '.new', 'wb') as f:
- pickle.dump(cache, f)
- os.rename(CACHE_FILE + '.new', CACHE_FILE)
-
-def render_html(text):
- import docutils.parsers.rst
- import docutils.writers.html4css1
- import docutils.core
-
- docutils.parsers.rst.roles.DEFAULT_INTERPRETED_ROLE = 'title-reference'
- writer = docutils.writers.html4css1.Writer()
- parts = docutils.core.publish_parts(
- text,
- writer=writer,
- settings_overrides = dict(halt_level=5,
- traceback=True,
- default_reference_context='title-reference',
- stylesheet_path='',
- # security settings:
- raw_enabled=0,
- file_insertion_enabled=0,
- _disable_config=1,
- )
- )
- return parts['html_body']
-
-if __name__ == "__main__": main()
diff --git a/doc/release/upcoming_changes/20000.deprecation.rst b/doc/release/upcoming_changes/20000.deprecation.rst
new file mode 100644
index 000000000..e0a56cd47
--- /dev/null
+++ b/doc/release/upcoming_changes/20000.deprecation.rst
@@ -0,0 +1,5 @@
+Passing boolean ``kth`` values to (arg-)partition has been deprecated
+---------------------------------------------------------------------
+`~numpy.partition` and `~numpy.argpartition` would previously accept boolean
+values for the ``kth`` parameter, which would subsequently be converted into
+integers. This behavior has now been deprecated.
diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst
index e3b8d270d..63c93821b 100644
--- a/doc/source/reference/arrays.datetime.rst
+++ b/doc/source/reference/arrays.datetime.rst
@@ -25,7 +25,7 @@ form of the string, and can be either a :ref:`date unit <arrays.dtypes.dateunits
:ref:`time unit <arrays.dtypes.timeunits>`. The date units are years ('Y'),
months ('M'), weeks ('W'), and days ('D'), while the time units are
hours ('h'), minutes ('m'), seconds ('s'), milliseconds ('ms'), and
-some additional SI-prefix seconds-based units. The datetime64 data type
+some additional SI-prefix seconds-based units. The datetime64 data type
also accepts the string "NAT", in any combination of lowercase/uppercase
letters, for a "Not A Time" value.
@@ -74,6 +74,18 @@ datetime type with generic units.
array(['2001-01-01T12:00:00.000', '2002-02-03T13:56:03.172'],
dtype='datetime64[ms]')
+An array of datetimes can be constructed from integers representing
+POSIX timestamps with the given unit.
+
+.. admonition:: Example
+
+ >>> np.array([0, 1577836800], dtype='datetime64[s]')
+ array(['1970-01-01T00:00:00', '2020-01-01T00:00:00'],
+ dtype='datetime64[s]')
+
+ >>> np.array([0, 1577836800000]).astype('datetime64[ms]')
+ array(['1970-01-01T00:00:00.000', '2020-01-01T00:00:00.000'],
+ dtype='datetime64[ms]')
The datetime type works with many common NumPy functions, for
example :func:`arange` can be used to generate ranges of dates.
@@ -120,9 +132,9 @@ Datetime and Timedelta Arithmetic
NumPy allows the subtraction of two Datetime values, an operation which
produces a number with a time unit. Because NumPy doesn't have a physical
quantities system in its core, the timedelta64 data type was created
-to complement datetime64. The arguments for timedelta64 are a number,
+to complement datetime64. The arguments for timedelta64 are a number,
to represent the number of units, and a date/time unit, such as
-(D)ay, (M)onth, (Y)ear, (h)ours, (m)inutes, or (s)econds. The timedelta64
+(D)ay, (M)onth, (Y)ear, (h)ours, (m)inutes, or (s)econds. The timedelta64
data type also accepts the string "NAT" in place of the number for a "Not A Time" value.
.. admonition:: Example
diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst
index 9006c9b31..6a135fd71 100644
--- a/doc/source/reference/c-api/array.rst
+++ b/doc/source/reference/c-api/array.rst
@@ -519,34 +519,40 @@ From other objects
:c:data:`NPY_ARRAY_CARRAY`
- .. c:macro:: NPY_ARRAY_IN_ARRAY
+..
+ dedented to allow internal linking, pending a refactoring
- :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_ALIGNED`
+.. c:macro:: NPY_ARRAY_IN_ARRAY
+
+ :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_ALIGNED`
.. c:macro:: NPY_ARRAY_IN_FARRAY
:c:data:`NPY_ARRAY_F_CONTIGUOUS` \| :c:data:`NPY_ARRAY_ALIGNED`
- .. c:macro:: NPY_OUT_ARRAY
+.. c:macro:: NPY_OUT_ARRAY
- :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_WRITEABLE` \|
- :c:data:`NPY_ARRAY_ALIGNED`
+ :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_WRITEABLE` \|
+ :c:data:`NPY_ARRAY_ALIGNED`
- .. c:macro:: NPY_ARRAY_OUT_ARRAY
+.. c:macro:: NPY_ARRAY_OUT_ARRAY
- :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_ALIGNED` \|
- :c:data:`NPY_ARRAY_WRITEABLE`
+ :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_ALIGNED` \|
+ :c:data:`NPY_ARRAY_WRITEABLE`
.. c:macro:: NPY_ARRAY_OUT_FARRAY
:c:data:`NPY_ARRAY_F_CONTIGUOUS` \| :c:data:`NPY_ARRAY_WRITEABLE` \|
:c:data:`NPY_ARRAY_ALIGNED`
- .. c:macro:: NPY_ARRAY_INOUT_ARRAY
+..
+ dedented to allow internal linking, pending a refactoring
- :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_WRITEABLE` \|
- :c:data:`NPY_ARRAY_ALIGNED` \| :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` \|
- :c:data:`NPY_ARRAY_UPDATEIFCOPY`
+.. c:macro:: NPY_ARRAY_INOUT_ARRAY
+
+ :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_WRITEABLE` \|
+ :c:data:`NPY_ARRAY_ALIGNED` \| :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` \|
+ :c:data:`NPY_ARRAY_UPDATEIFCOPY`
.. c:macro:: NPY_ARRAY_INOUT_FARRAY
@@ -584,26 +590,32 @@ From other objects
did not have the _ARRAY_ macro namespace in them. That form
of the constant names is deprecated in 1.7.
- .. c:macro:: NPY_ARRAY_NOTSWAPPED
+..
+ dedented to allow internal linking, pending a refactoring
+
+.. c:macro:: NPY_ARRAY_NOTSWAPPED
- Make sure the returned array has a data-type descriptor that is in
- machine byte-order, over-riding any specification in the *dtype*
- argument. Normally, the byte-order requirement is determined by
- the *dtype* argument. If this flag is set and the dtype argument
- does not indicate a machine byte-order descriptor (or is NULL and
- the object is already an array with a data-type descriptor that is
- not in machine byte- order), then a new data-type descriptor is
- created and used with its byte-order field set to native.
+ Make sure the returned array has a data-type descriptor that is in
+ machine byte-order, over-riding any specification in the *dtype*
+ argument. Normally, the byte-order requirement is determined by
+ the *dtype* argument. If this flag is set and the dtype argument
+ does not indicate a machine byte-order descriptor (or is NULL and
+ the object is already an array with a data-type descriptor that is
+ not in machine byte- order), then a new data-type descriptor is
+ created and used with its byte-order field set to native.
.. c:macro:: NPY_ARRAY_BEHAVED_NS
:c:data:`NPY_ARRAY_ALIGNED` \| :c:data:`NPY_ARRAY_WRITEABLE` \|
:c:data:`NPY_ARRAY_NOTSWAPPED`
- .. c:macro:: NPY_ARRAY_ELEMENTSTRIDES
+..
+ dedented to allow internal linking, pending a refactoring
+
+.. c:macro:: NPY_ARRAY_ELEMENTSTRIDES
- Make sure the returned array has strides that are multiples of the
- element size.
+ Make sure the returned array has strides that are multiples of the
+ element size.
.. c:function:: PyObject* PyArray_FromArray( \
PyArrayObject* op, PyArray_Descr* newtype, int requirements)
diff --git a/doc/source/reference/c-api/types-and-structures.rst b/doc/source/reference/c-api/types-and-structures.rst
index a4a9734c5..605a4ae71 100644
--- a/doc/source/reference/c-api/types-and-structures.rst
+++ b/doc/source/reference/c-api/types-and-structures.rst
@@ -286,48 +286,54 @@ PyArrayDescr_Type and PyArray_Descr
array like behavior. Each bit in this member is a flag which are named
as:
- .. c:macro:: NPY_ITEM_REFCOUNT
+..
+ dedented to allow internal linking, pending a refactoring
- Indicates that items of this data-type must be reference
- counted (using :c:func:`Py_INCREF` and :c:func:`Py_DECREF` ).
+.. c:macro:: NPY_ITEM_REFCOUNT
+
+ Indicates that items of this data-type must be reference
+ counted (using :c:func:`Py_INCREF` and :c:func:`Py_DECREF` ).
.. c:macro:: NPY_ITEM_HASOBJECT
Same as :c:data:`NPY_ITEM_REFCOUNT`.
- .. c:macro:: NPY_LIST_PICKLE
+..
+ dedented to allow internal linking, pending a refactoring
+
+.. c:macro:: NPY_LIST_PICKLE
- Indicates arrays of this data-type must be converted to a list
- before pickling.
+ Indicates arrays of this data-type must be converted to a list
+ before pickling.
- .. c:macro:: NPY_ITEM_IS_POINTER
+.. c:macro:: NPY_ITEM_IS_POINTER
- Indicates the item is a pointer to some other data-type
+ Indicates the item is a pointer to some other data-type
- .. c:macro:: NPY_NEEDS_INIT
+.. c:macro:: NPY_NEEDS_INIT
- Indicates memory for this data-type must be initialized (set
- to 0) on creation.
+ Indicates memory for this data-type must be initialized (set
+ to 0) on creation.
- .. c:macro:: NPY_NEEDS_PYAPI
+.. c:macro:: NPY_NEEDS_PYAPI
- Indicates this data-type requires the Python C-API during
- access (so don't give up the GIL if array access is going to
- be needed).
+ Indicates this data-type requires the Python C-API during
+ access (so don't give up the GIL if array access is going to
+ be needed).
- .. c:macro:: NPY_USE_GETITEM
+.. c:macro:: NPY_USE_GETITEM
- On array access use the ``f->getitem`` function pointer
- instead of the standard conversion to an array scalar. Must
- use if you don't define an array scalar to go along with
- the data-type.
+ On array access use the ``f->getitem`` function pointer
+ instead of the standard conversion to an array scalar. Must
+ use if you don't define an array scalar to go along with
+ the data-type.
- .. c:macro:: NPY_USE_SETITEM
+.. c:macro:: NPY_USE_SETITEM
- When creating a 0-d array from an array scalar use
- ``f->setitem`` instead of the standard copy from an array
- scalar. Must use if you don't define an array scalar to go
- along with the data-type.
+ When creating a 0-d array from an array scalar use
+ ``f->setitem`` instead of the standard copy from an array
+ scalar. Must use if you don't define an array scalar to go
+ along with the data-type.
.. c:macro:: NPY_FROM_FIELDS
@@ -989,14 +995,17 @@ PyUFunc_Type and PyUFuncObject
For each distinct core dimension, a set of ``UFUNC_CORE_DIM*`` flags
- .. c:macro:: UFUNC_CORE_DIM_CAN_IGNORE
+..
+ dedented to allow internal linking, pending a refactoring
+
+.. c:macro:: UFUNC_CORE_DIM_CAN_IGNORE
- if the dim name ends in ``?``
+ if the dim name ends in ``?``
- .. c:macro:: UFUNC_CORE_DIM_SIZE_INFERRED
+.. c:macro:: UFUNC_CORE_DIM_SIZE_INFERRED
- if the dim size will be determined from the operands
- and not from a :ref:`frozen <frozen>` signature
+ if the dim size will be determined from the operands
+ and not from a :ref:`frozen <frozen>` signature
.. c:member:: PyObject *identity_value
diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi
index c78d48cc6..69f18fac4 100644
--- a/numpy/__init__.pyi
+++ b/numpy/__init__.pyi
@@ -197,6 +197,7 @@ from typing import (
Final,
final,
ClassVar,
+ Set,
)
# Ensures that the stubs are picked up
@@ -828,24 +829,6 @@ class poly1d:
def integ(self, m=..., k=...): ...
def deriv(self, m=...): ...
-class vectorize:
- pyfunc: Any
- cache: Any
- signature: Any
- otypes: Any
- excluded: Any
- __doc__: Any
- def __init__(
- self,
- pyfunc,
- otypes: Any = ...,
- doc: Any = ...,
- excluded: Any = ...,
- cache: Any = ...,
- signature: Any = ...,
- ) -> None: ...
- def __call__(self, *args: Any, **kwargs: Any) -> Any: ...
-
# Some of these are aliases; others are wrappers with an identical signature
round = around
round_ = around
@@ -1181,8 +1164,6 @@ class _ArrayOrScalarCommon:
# generics and 0d arrays return builtin scalars
def tolist(self) -> Any: ...
- # TODO: Add proper signatures
- def __getitem__(self, key) -> Any: ...
@property
def __array_interface__(self): ...
@property
@@ -1679,6 +1660,26 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]):
/,
) -> ndarray[_ShapeType2, _DType]: ...
+ @overload
+ def __getitem__(self, key: Union[
+ SupportsIndex,
+ _ArrayLikeInt_co,
+ Tuple[SupportsIndex | _ArrayLikeInt_co, ...],
+ ]) -> Any: ...
+ @overload
+ def __getitem__(self, key: Union[
+ None,
+ slice,
+ ellipsis,
+ SupportsIndex,
+ _ArrayLikeInt_co,
+ Tuple[None | slice | ellipsis | _ArrayLikeInt_co | SupportsIndex, ...],
+ ]) -> ndarray[Any, _DType_co]: ...
+ @overload
+ def __getitem__(self: NDArray[void], key: str) -> NDArray[Any]: ...
+ @overload
+ def __getitem__(self: NDArray[void], key: list[str]) -> ndarray[_ShapeType, dtype[void]]: ...
+
@property
def ctypes(self) -> _ctypes[int]: ...
@property
@@ -3879,3 +3880,21 @@ class memmap(ndarray[_ShapeType, _DType_co]):
) -> Any: ...
def __getitem__(self, index): ... # TODO
def flush(self) -> None: ...
+
+class vectorize:
+ pyfunc: Callable[..., Any]
+ cache: bool
+ signature: None | str
+ otypes: None | str
+ excluded: Set[int | str]
+ __doc__: None | str
+ def __init__(
+ self,
+ pyfunc: Callable[..., Any],
+ otypes: None | str | Iterable[DTypeLike] = ...,
+ doc: None | str = ...,
+ excluded: None | Iterable[int | str] = ...,
+ cache: bool = ...,
+ signature: None | str = ...,
+ ) -> None: ...
+ def __call__(self, *args: Any, **kwargs: Any) -> NDArray[Any]: ...
diff --git a/numpy/array_api/_creation_functions.py b/numpy/array_api/_creation_functions.py
index 9f8136267..e36807468 100644
--- a/numpy/array_api/_creation_functions.py
+++ b/numpy/array_api/_creation_functions.py
@@ -134,7 +134,7 @@ def eye(
n_cols: Optional[int] = None,
/,
*,
- k: Optional[int] = 0,
+ k: int = 0,
dtype: Optional[Dtype] = None,
device: Optional[Device] = None,
) -> Array:
@@ -232,7 +232,7 @@ def linspace(
return Array._new(np.linspace(start, stop, num, dtype=dtype, endpoint=endpoint))
-def meshgrid(*arrays: Sequence[Array], indexing: str = "xy") -> List[Array, ...]:
+def meshgrid(*arrays: Array, indexing: str = "xy") -> List[Array]:
"""
Array API compatible wrapper for :py:func:`np.meshgrid <numpy.meshgrid>`.
diff --git a/numpy/array_api/_data_type_functions.py b/numpy/array_api/_data_type_functions.py
index fd92aa250..7ccbe9469 100644
--- a/numpy/array_api/_data_type_functions.py
+++ b/numpy/array_api/_data_type_functions.py
@@ -13,7 +13,7 @@ if TYPE_CHECKING:
import numpy as np
-def broadcast_arrays(*arrays: Sequence[Array]) -> List[Array]:
+def broadcast_arrays(*arrays: Array) -> List[Array]:
"""
Array API compatible wrapper for :py:func:`np.broadcast_arrays <numpy.broadcast_arrays>`.
@@ -98,7 +98,7 @@ def iinfo(type: Union[Dtype, Array], /) -> iinfo_object:
return iinfo_object(ii.bits, ii.max, ii.min)
-def result_type(*arrays_and_dtypes: Sequence[Union[Array, Dtype]]) -> Dtype:
+def result_type(*arrays_and_dtypes: Union[Array, Dtype]) -> Dtype:
"""
Array API compatible wrapper for :py:func:`np.result_type <numpy.result_type>`.
diff --git a/numpy/array_api/_typing.py b/numpy/array_api/_typing.py
index 5f937a56c..519e8463c 100644
--- a/numpy/array_api/_typing.py
+++ b/numpy/array_api/_typing.py
@@ -15,10 +15,12 @@ __all__ = [
"PyCapsule",
]
-from typing import Any, Literal, Sequence, Type, Union
+import sys
+from typing import Any, Literal, Sequence, Type, Union, TYPE_CHECKING, TypeVar
-from . import (
- Array,
+from ._array_object import Array
+from numpy import (
+ dtype,
int8,
int16,
int32,
@@ -33,12 +35,26 @@ from . import (
# This should really be recursive, but that isn't supported yet. See the
# similar comment in numpy/typing/_array_like.py
-NestedSequence = Sequence[Sequence[Any]]
+_T = TypeVar("_T")
+NestedSequence = Sequence[Sequence[_T]]
Device = Literal["cpu"]
-Dtype = Type[
- Union[int8, int16, int32, int64, uint8, uint16, uint32, uint64, float32, float64]
-]
+if TYPE_CHECKING or sys.version_info >= (3, 9):
+ Dtype = dtype[Union[
+ int8,
+ int16,
+ int32,
+ int64,
+ uint8,
+ uint16,
+ uint32,
+ uint64,
+ float32,
+ float64,
+ ]]
+else:
+ Dtype = dtype
+
SupportsDLPack = Any
SupportsBufferProtocol = Any
PyCapsule = Any
diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py
index bb0c2ea12..37f21211f 100644
--- a/numpy/core/_add_newdocs.py
+++ b/numpy/core/_add_newdocs.py
@@ -4044,6 +4044,9 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('partition',
The order of all elements in the partitions is undefined.
If provided with a sequence of kth it will partition all elements
indexed by kth of them into their sorted position at once.
+
+ .. deprecated:: 1.22.0
+ Passing booleans as index is deprecated.
axis : int, optional
Axis along which to sort. Default is -1, which means sort along the
last axis.
diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py
index 5ecb1e666..29d215ea0 100644
--- a/numpy/core/fromnumeric.py
+++ b/numpy/core/fromnumeric.py
@@ -689,6 +689,9 @@ def partition(a, kth, axis=-1, kind='introselect', order=None):
it. The order of all elements in the partitions is undefined. If
provided with a sequence of k-th it will partition all elements
indexed by k-th of them into their sorted position at once.
+
+ .. deprecated:: 1.22.0
+ Passing booleans as index is deprecated.
axis : int or None, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
@@ -781,6 +784,9 @@ def argpartition(a, kth, axis=-1, kind='introselect', order=None):
elements in the partitions is undefined. If provided with a
sequence of k-th it will partition all of them into their sorted
position at once.
+
+ .. deprecated:: 1.22.0
+ Passing booleans as index is deprecated.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If
None, the flattened array is used.
diff --git a/numpy/core/function_base.pyi b/numpy/core/function_base.pyi
index c35629aa7..68d3b3a98 100644
--- a/numpy/core/function_base.pyi
+++ b/numpy/core/function_base.pyi
@@ -1,4 +1,4 @@
-from typing import overload, Tuple, Union, Sequence, Any, SupportsIndex, Literal
+from typing import overload, Tuple, Union, Sequence, Any, SupportsIndex, Literal, List
from numpy import ndarray
from numpy.typing import ArrayLike, DTypeLike, _SupportsArray, _NumberLike_co
@@ -8,6 +8,9 @@ _ArrayLikeNested = Sequence[Sequence[Any]]
_ArrayLikeNumber = Union[
_NumberLike_co, Sequence[_NumberLike_co], ndarray, _SupportsArray, _ArrayLikeNested
]
+
+__all__: List[str]
+
@overload
def linspace(
start: _ArrayLikeNumber,
@@ -47,3 +50,11 @@ def geomspace(
dtype: DTypeLike = ...,
axis: SupportsIndex = ...,
) -> ndarray: ...
+
+# Re-exported to `np.lib.function_base`
+def add_newdoc(
+ place: str,
+ obj: str,
+ doc: str | Tuple[str, str] | List[Tuple[str, str]],
+ warn_on_python: bool = ...,
+) -> None: ...
diff --git a/numpy/core/src/multiarray/datetime.c b/numpy/core/src/multiarray/datetime.c
index b24bc0356..e0064c017 100644
--- a/numpy/core/src/multiarray/datetime.c
+++ b/numpy/core/src/multiarray/datetime.c
@@ -13,6 +13,7 @@
#include <Python.h>
#include "numpy/arrayobject.h"
+#include "numpyos.h"
#include "npy_config.h"
#include "npy_pycompat.h"
@@ -723,12 +724,21 @@ parse_datetime_extended_unit_from_string(char const *str, Py_ssize_t len,
{
char const *substr = str, *substrend = NULL;
int den = 1;
+ npy_longlong true_meta_val;
/* First comes an optional integer multiplier */
out_meta->num = (int)strtol_const(substr, &substrend, 10);
if (substr == substrend) {
out_meta->num = 1;
}
+ else {
+ // check for 32-bit integer overflow
+ char *endptr = NULL;
+ true_meta_val = NumPyOS_strtoll(substr, &endptr, 10);
+ if (true_meta_val > INT_MAX || true_meta_val < 0) {
+ goto bad_input;
+ }
+ }
substr = substrend;
/* Next comes the unit itself, followed by either '/' or the string end */
@@ -3776,7 +3786,17 @@ time_to_time_resolve_descriptors(
meta2 = get_datetime_metadata_from_dtype(loop_descrs[1]);
assert(meta2 != NULL);
- if (meta1->base == meta2->base && meta1->num == meta2->num) {
+ if ((meta1->base == meta2->base && meta1->num == meta2->num) ||
+ // handle some common metric prefix conversions
+ // 1000 fold conversions
+ ((meta2->base >= 7) && (meta1->base - meta2->base == 1)
+ && ((meta1->num / meta2->num) == 1000)) ||
+ // 10^6 fold conversions
+ ((meta2->base >= 7) && (meta1->base - meta2->base == 2)
+ && ((meta1->num / meta2->num) == 1000000)) ||
+ // 10^9 fold conversions
+ ((meta2->base >= 7) && (meta1->base - meta2->base == 3)
+ && ((meta1->num / meta2->num) == 1000000000))) {
if (byteorder_may_allow_view) {
return NPY_NO_CASTING | byteorder_may_allow_view;
}
diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c
index ad5478bbf..ee66378a9 100644
--- a/numpy/core/src/multiarray/item_selection.c
+++ b/numpy/core/src/multiarray/item_selection.c
@@ -1292,7 +1292,15 @@ partition_prep_kth_array(PyArrayObject * ktharray,
npy_intp * kth;
npy_intp nkth, i;
- if (!PyArray_CanCastSafely(PyArray_TYPE(ktharray), NPY_INTP)) {
+ if (PyArray_ISBOOL(ktharray)) {
+ /* 2021-09-29, NumPy 1.22 */
+ if (DEPRECATE(
+ "Passing booleans as partition index is deprecated"
+ " (warning added in NumPy 1.22)") < 0) {
+ return NULL;
+ }
+ }
+ else if (!PyArray_ISINTEGER(ktharray)) {
PyErr_Format(PyExc_TypeError, "Partition index must be integer");
return NULL;
}
diff --git a/numpy/core/src/multiarray/iterators.c b/numpy/core/src/multiarray/iterators.c
index 36bfaa7cf..f959162fd 100644
--- a/numpy/core/src/multiarray/iterators.c
+++ b/numpy/core/src/multiarray/iterators.c
@@ -1124,6 +1124,35 @@ NPY_NO_EXPORT PyTypeObject PyArrayIter_Type = {
/** END of Array Iterator **/
+
+static int
+set_shape_mismatch_exception(PyArrayMultiIterObject *mit, int i1, int i2)
+{
+ PyObject *shape1, *shape2, *msg;
+
+ shape1 = PyObject_GetAttrString((PyObject *) mit->iters[i1]->ao, "shape");
+ if (shape1 == NULL) {
+ return -1;
+ }
+ shape2 = PyObject_GetAttrString((PyObject *) mit->iters[i2]->ao, "shape");
+ if (shape2 == NULL) {
+ Py_DECREF(shape1);
+ return -1;
+ }
+ msg = PyUnicode_FromFormat("shape mismatch: objects cannot be broadcast "
+ "to a single shape. Mismatch is between arg %d "
+ "with shape %S and arg %d with shape %S.",
+ i1, shape1, i2, shape2);
+ Py_DECREF(shape1);
+ Py_DECREF(shape2);
+ if (msg == NULL) {
+ return -1;
+ }
+ PyErr_SetObject(PyExc_ValueError, msg);
+ Py_DECREF(msg);
+ return 0;
+}
+
/* Adjust dimensionality and strides for index object iterators
--- i.e. broadcast
*/
@@ -1132,6 +1161,7 @@ NPY_NO_EXPORT int
PyArray_Broadcast(PyArrayMultiIterObject *mit)
{
int i, nd, k, j;
+ int src_iter = -1; /* Initializing avoids a compiler warning. */
npy_intp tmp;
PyArrayIterObject *it;
@@ -1155,12 +1185,10 @@ PyArray_Broadcast(PyArrayMultiIterObject *mit)
}
if (mit->dimensions[i] == 1) {
mit->dimensions[i] = tmp;
+ src_iter = j;
}
else if (mit->dimensions[i] != tmp) {
- PyErr_SetString(PyExc_ValueError,
- "shape mismatch: objects" \
- " cannot be broadcast" \
- " to a single shape");
+ set_shape_mismatch_exception(mit, src_iter, j);
return -1;
}
}
diff --git a/numpy/core/src/multiarray/usertypes.c b/numpy/core/src/multiarray/usertypes.c
index d0cf53576..a338d712d 100644
--- a/numpy/core/src/multiarray/usertypes.c
+++ b/numpy/core/src/multiarray/usertypes.c
@@ -268,6 +268,56 @@ PyArray_RegisterDataType(PyArray_Descr *descr)
return typenum;
}
+
+/*
+ * Checks that there is no cast already cached using the new casting-impl
+ * mechanism.
+ * In that case, we do not clear out the cache (but otherwise silently
+ * continue). Users should not modify casts after they have been used,
+ * but this may also happen accidentally during setup (and may never have
+ * mattered). See https://github.com/numpy/numpy/issues/20009
+ */
+static int _warn_if_cast_exists_already(
+ PyArray_Descr *descr, int totype, char *funcname)
+{
+ PyArray_DTypeMeta *to_DType = PyArray_DTypeFromTypeNum(totype);
+ if (to_DType == NULL) {
+ return -1;
+ }
+ PyObject *cast_impl = PyDict_GetItemWithError(
+ NPY_DT_SLOTS(NPY_DTYPE(descr))->castingimpls, (PyObject *)to_DType);
+ Py_DECREF(to_DType);
+ if (cast_impl == NULL) {
+ if (PyErr_Occurred()) {
+ return -1;
+ }
+ }
+ else {
+ char *extra_msg;
+ if (cast_impl == Py_None) {
+ extra_msg = "the cast will continue to be considered impossible.";
+ }
+ else {
+ extra_msg = "the previous definition will continue to be used.";
+ }
+ Py_DECREF(cast_impl);
+ PyArray_Descr *to_descr = PyArray_DescrFromType(totype);
+ int ret = PyErr_WarnFormat(PyExc_RuntimeWarning, 1,
+ "A cast from %R to %R was registered/modified using `%s` "
+ "after the cast had been used. "
+ "This registration will have (mostly) no effect: %s\n"
+ "The most likely fix is to ensure that casts are the first "
+ "thing initialized after dtype registration. "
+ "Please contact the NumPy developers with any questions!",
+ descr, to_descr, funcname, extra_msg);
+ Py_DECREF(to_descr);
+ if (ret < 0) {
+ return -1;
+ }
+ }
+ return 0;
+}
+
/*NUMPY_API
Register Casting Function
Replaces any function currently stored.
@@ -279,14 +329,19 @@ PyArray_RegisterCastFunc(PyArray_Descr *descr, int totype,
PyObject *cobj, *key;
int ret;
- if (totype < NPY_NTYPES_ABI_COMPATIBLE) {
- descr->f->cast[totype] = castfunc;
- return 0;
- }
if (totype >= NPY_NTYPES && !PyTypeNum_ISUSERDEF(totype)) {
PyErr_SetString(PyExc_TypeError, "invalid type number.");
return -1;
}
+ if (_warn_if_cast_exists_already(
+ descr, totype, "PyArray_RegisterCastFunc") < 0) {
+ return -1;
+ }
+
+ if (totype < NPY_NTYPES_ABI_COMPATIBLE) {
+ descr->f->cast[totype] = castfunc;
+ return 0;
+ }
if (descr->f->castdict == NULL) {
descr->f->castdict = PyDict_New();
if (descr->f->castdict == NULL) {
@@ -328,6 +383,10 @@ PyArray_RegisterCanCast(PyArray_Descr *descr, int totype,
"RegisterCanCast must be user-defined.");
return -1;
}
+ if (_warn_if_cast_exists_already(
+ descr, totype, "PyArray_RegisterCanCast") < 0) {
+ return -1;
+ }
if (scalar == NPY_NOSCALAR) {
/*
diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py
index 5a490646e..69eba7ba0 100644
--- a/numpy/core/tests/test_datetime.py
+++ b/numpy/core/tests/test_datetime.py
@@ -63,6 +63,7 @@ class TestDateTime:
assert_raises(TypeError, np.dtype, 'm7')
assert_raises(TypeError, np.dtype, 'M16')
assert_raises(TypeError, np.dtype, 'm16')
+ assert_raises(TypeError, np.dtype, 'M8[3000000000ps]')
def test_datetime_casting_rules(self):
# Cannot cast safely/same_kind between timedelta and datetime
@@ -137,6 +138,42 @@ class TestDateTime:
assert_(not np.can_cast('M8[h]', 'M8', casting='same_kind'))
assert_(not np.can_cast('M8[h]', 'M8', casting='safe'))
+ def test_datetime_prefix_conversions(self):
+ # regression tests related to gh-19631;
+ # test metric prefixes from seconds down to
+ # attoseconds for bidirectional conversions
+ smaller_units = ['M8[7000ms]',
+ 'M8[2000us]',
+ 'M8[1000ns]',
+ 'M8[5000ns]',
+ 'M8[2000ps]',
+ 'M8[9000fs]',
+ 'M8[1000as]',
+ 'M8[2000000ps]',
+ 'M8[1000000as]',
+ 'M8[2000000000ps]',
+ 'M8[1000000000as]']
+ larger_units = ['M8[7s]',
+ 'M8[2ms]',
+ 'M8[us]',
+ 'M8[5us]',
+ 'M8[2ns]',
+ 'M8[9ps]',
+ 'M8[1fs]',
+ 'M8[2us]',
+ 'M8[1ps]',
+ 'M8[2ms]',
+ 'M8[1ns]']
+ for larger_unit, smaller_unit in zip(larger_units, smaller_units):
+ assert np.can_cast(larger_unit, smaller_unit, casting='safe')
+ assert np.can_cast(smaller_unit, larger_unit, casting='safe')
+
+ @pytest.mark.parametrize("unit", [
+ "s", "ms", "us", "ns", "ps", "fs", "as"])
+ def test_prohibit_negative_datetime(self, unit):
+ with assert_raises(TypeError):
+ np.array([1], dtype=f"M8[-1{unit}]")
+
def test_compare_generic_nat(self):
# regression tests for gh-6452
assert_(np.datetime64('NaT') !=
diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py
index 1d0c5dfac..898ff8075 100644
--- a/numpy/core/tests/test_deprecations.py
+++ b/numpy/core/tests/test_deprecations.py
@@ -1192,3 +1192,26 @@ class TestUFuncForcedDTypeWarning(_DeprecationTestCase):
np.maximum(arr, arr, dtype="m8[ns]") # previously used the "ns"
with pytest.warns(DeprecationWarning, match=self.message):
np.maximum.reduce(arr, dtype="m8[ns]") # never preserved the "ns"
+
+
+PARTITION_DICT = {
+ "partition method": np.arange(10).partition,
+ "argpartition method": np.arange(10).argpartition,
+ "partition function": lambda kth: np.partition(np.arange(10), kth),
+ "argpartition function": lambda kth: np.argpartition(np.arange(10), kth),
+}
+
+
+@pytest.mark.parametrize("func", PARTITION_DICT.values(), ids=PARTITION_DICT)
+class TestPartitionBoolIndex(_DeprecationTestCase):
+ # Deprecated 2021-09-29, NumPy 1.22
+ warning_cls = DeprecationWarning
+ message = "Passing booleans as partition index is deprecated"
+
+ def test_deprecated(self, func):
+ self.assert_deprecated(lambda: func(True))
+ self.assert_deprecated(lambda: func([False, True]))
+
+ def test_not_deprecated(self, func):
+ self.assert_not_deprecated(lambda: func(1))
+ self.assert_not_deprecated(lambda: func([0, 1]))
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index b5f9f8af3..0da36bbea 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -2511,27 +2511,19 @@ class TestMethods:
assert_(not isinstance(a.searchsorted(b, 'left', s), A))
assert_(not isinstance(a.searchsorted(b, 'right', s), A))
- def test_argpartition_out_of_range(self):
+ @pytest.mark.parametrize("dtype", np.typecodes["All"])
+ def test_argpartition_out_of_range(self, dtype):
# Test out of range values in kth raise an error, gh-5469
- d = np.arange(10)
+ d = np.arange(10).astype(dtype=dtype)
assert_raises(ValueError, d.argpartition, 10)
assert_raises(ValueError, d.argpartition, -11)
- # Test also for generic type argpartition, which uses sorting
- # and used to not bound check kth
- d_obj = np.arange(10, dtype=object)
- assert_raises(ValueError, d_obj.argpartition, 10)
- assert_raises(ValueError, d_obj.argpartition, -11)
- def test_partition_out_of_range(self):
+ @pytest.mark.parametrize("dtype", np.typecodes["All"])
+ def test_partition_out_of_range(self, dtype):
# Test out of range values in kth raise an error, gh-5469
- d = np.arange(10)
+ d = np.arange(10).astype(dtype=dtype)
assert_raises(ValueError, d.partition, 10)
assert_raises(ValueError, d.partition, -11)
- # Test also for generic type partition, which uses sorting
- # and used to not bound check kth
- d_obj = np.arange(10, dtype=object)
- assert_raises(ValueError, d_obj.partition, 10)
- assert_raises(ValueError, d_obj.partition, -11)
def test_argpartition_integer(self):
# Test non-integer values in kth raise an error/
@@ -2551,26 +2543,30 @@ class TestMethods:
d_obj = np.arange(10, dtype=object)
assert_raises(TypeError, d_obj.partition, 9.)
- def test_partition_empty_array(self):
+ @pytest.mark.parametrize("kth_dtype", np.typecodes["AllInteger"])
+ def test_partition_empty_array(self, kth_dtype):
# check axis handling for multidimensional empty arrays
+ kth = np.array(0, dtype=kth_dtype)[()]
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array partition with axis={0}'.format(axis)
- assert_equal(np.partition(a, 0, axis=axis), a, msg)
+ assert_equal(np.partition(a, kth, axis=axis), a, msg)
msg = 'test empty array partition with axis=None'
- assert_equal(np.partition(a, 0, axis=None), a.ravel(), msg)
+ assert_equal(np.partition(a, kth, axis=None), a.ravel(), msg)
- def test_argpartition_empty_array(self):
+ @pytest.mark.parametrize("kth_dtype", np.typecodes["AllInteger"])
+ def test_argpartition_empty_array(self, kth_dtype):
# check axis handling for multidimensional empty arrays
+ kth = np.array(0, dtype=kth_dtype)[()]
a = np.array([])
a.shape = (3, 2, 1, 0)
for axis in range(-a.ndim, a.ndim):
msg = 'test empty array argpartition with axis={0}'.format(axis)
- assert_equal(np.partition(a, 0, axis=axis),
+ assert_equal(np.partition(a, kth, axis=axis),
np.zeros_like(a, dtype=np.intp), msg)
msg = 'test empty array argpartition with axis=None'
- assert_equal(np.partition(a, 0, axis=None),
+ assert_equal(np.partition(a, kth, axis=None),
np.zeros_like(a.ravel(), dtype=np.intp), msg)
def test_partition(self):
@@ -2901,10 +2897,12 @@ class TestMethods:
assert_array_equal(np.partition(d, kth)[kth], tgt,
err_msg="data: %r\n kth: %r" % (d, kth))
- def test_argpartition_gh5524(self):
+ @pytest.mark.parametrize("kth_dtype", np.typecodes["AllInteger"])
+ def test_argpartition_gh5524(self, kth_dtype):
# A test for functionality of argpartition on lists.
- d = [6,7,3,2,9,0]
- p = np.argpartition(d,1)
+ kth = np.array(1, dtype=kth_dtype)[()]
+ d = [6, 7, 3, 2, 9, 0]
+ p = np.argpartition(d, kth)
self.assert_partitioned(np.array(d)[p],[1])
def test_flatten(self):
@@ -4200,7 +4198,7 @@ class TestArgmaxArgminCommon:
(3, 4, 1, 2), (4, 1, 2, 3)]
@pytest.mark.parametrize("size, axis", itertools.chain(*[[(size, axis)
- for axis in list(range(-len(size), len(size))) + [None]]
+ for axis in list(range(-len(size), len(size))) + [None]]
for size in sizes]))
@pytest.mark.parametrize('method', [np.argmax, np.argmin])
def test_np_argmin_argmax_keepdims(self, size, axis, method):
@@ -4221,7 +4219,7 @@ class TestArgmaxArgminCommon:
assert_equal(res, res_orig)
assert_(res.shape == new_shape)
outarray = np.empty(res.shape, dtype=res.dtype)
- res1 = method(arr, axis=axis, out=outarray,
+ res1 = method(arr, axis=axis, out=outarray,
keepdims=True)
assert_(res1 is outarray)
assert_equal(res, outarray)
@@ -4234,7 +4232,7 @@ class TestArgmaxArgminCommon:
wrong_shape[0] = 2
wrong_outarray = np.empty(wrong_shape, dtype=res.dtype)
with pytest.raises(ValueError):
- method(arr.T, axis=axis,
+ method(arr.T, axis=axis,
out=wrong_outarray, keepdims=True)
# non-contiguous arrays
@@ -4252,18 +4250,18 @@ class TestArgmaxArgminCommon:
assert_(res.shape == new_shape)
outarray = np.empty(new_shape[::-1], dtype=res.dtype)
outarray = outarray.T
- res1 = method(arr.T, axis=axis, out=outarray,
+ res1 = method(arr.T, axis=axis, out=outarray,
keepdims=True)
assert_(res1 is outarray)
assert_equal(res, outarray)
if len(size) > 0:
- # one dimension lesser for non-zero sized
+ # one dimension lesser for non-zero sized
# array should raise an error
with pytest.raises(ValueError):
- method(arr[0], axis=axis,
+ method(arr[0], axis=axis,
out=outarray, keepdims=True)
-
+
if len(size) > 0:
wrong_shape = list(new_shape)
if axis is not None:
@@ -4272,7 +4270,7 @@ class TestArgmaxArgminCommon:
wrong_shape[0] = 2
wrong_outarray = np.empty(wrong_shape, dtype=res.dtype)
with pytest.raises(ValueError):
- method(arr.T, axis=axis,
+ method(arr.T, axis=axis,
out=wrong_outarray, keepdims=True)
@pytest.mark.parametrize('method', ['max', 'min'])
@@ -4287,7 +4285,7 @@ class TestArgmaxArgminCommon:
axes.remove(i)
assert_(np.all(a_maxmin == aarg_maxmin.choose(
*a.transpose(i, *axes))))
-
+
@pytest.mark.parametrize('method', ['argmax', 'argmin'])
def test_output_shape(self, method):
# see also gh-616
@@ -4330,7 +4328,7 @@ class TestArgmaxArgminCommon:
[('argmax', np.argmax),
('argmin', np.argmin)])
def test_np_vs_ndarray(self, arr_method, np_method):
- # make sure both ndarray.argmax/argmin and
+ # make sure both ndarray.argmax/argmin and
# numpy.argmax/argmin support out/axis args
a = np.random.normal(size=(2, 3))
arg_method = getattr(a, arr_method)
@@ -4344,7 +4342,7 @@ class TestArgmaxArgminCommon:
# check keyword args
out1 = np.zeros(3, dtype=int)
out2 = np.zeros(3, dtype=int)
- assert_equal(arg_method(out=out1, axis=0),
+ assert_equal(arg_method(out=out1, axis=0),
np_method(a, out=out2, axis=0))
assert_equal(out1, out2)
@@ -4438,7 +4436,7 @@ class TestArgmax:
assert_equal(np.argmax(arr), pos, err_msg="%r" % arr)
assert_equal(arr[np.argmax(arr)], val, err_msg="%r" % arr)
-
+
def test_maximum_signed_integers(self):
a = np.array([1, 2**7 - 1, -2**7], dtype=np.int8)
diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py
index 4510333a1..e36f76c53 100644
--- a/numpy/core/tests/test_numeric.py
+++ b/numpy/core/tests/test_numeric.py
@@ -3511,6 +3511,12 @@ class TestBroadcast:
assert_raises(ValueError, np.broadcast, 1, **{'x': 1})
+ def test_shape_mismatch_error_message(self):
+ with pytest.raises(ValueError, match=r"arg 0 with shape \(1, 3\) and "
+ r"arg 2 with shape \(2,\)"):
+ np.broadcast([[1, 2, 3]], [[4], [5]], [6, 7])
+
+
class TestKeepdims:
class sub_array(np.ndarray):
diff --git a/numpy/distutils/fcompiler/nag.py b/numpy/distutils/fcompiler/nag.py
index fb85c821a..939201f44 100644
--- a/numpy/distutils/fcompiler/nag.py
+++ b/numpy/distutils/fcompiler/nag.py
@@ -15,9 +15,6 @@ class BaseNAGFCompiler(FCompiler):
return None
def get_flags_linker_so(self):
- if sys.platform == 'darwin':
- return ['-unsharedf95',
- '-Wl,-bundle,-flat_namespace,-undefined,suppress']
return ["-Wl,-shared"]
def get_flags_opt(self):
return ['-O4']
@@ -39,6 +36,10 @@ class NAGFCompiler(BaseNAGFCompiler):
'ranlib' : ["ranlib"]
}
+ def get_flags_linker_so(self):
+ if sys.platform == 'darwin':
+ return ['-unsharedf95', '-Wl,-bundle,-flat_namespace,-undefined,suppress']
+ return BaseNAGFCompiler.get_flags_linker_so(self)
def get_flags_arch(self):
version = self.get_version()
if version and version < '5.1':
@@ -63,6 +64,11 @@ class NAGFORCompiler(BaseNAGFCompiler):
'ranlib' : ["ranlib"]
}
+ def get_flags_linker_so(self):
+ if sys.platform == 'darwin':
+ return ['-unsharedrts',
+ '-Wl,-bundle,-flat_namespace,-undefined,suppress']
+ return BaseNAGFCompiler.get_flags_linker_so(self)
def get_flags_debug(self):
version = self.get_version()
if version and version > '6.1':
diff --git a/numpy/f2py/cb_rules.py b/numpy/f2py/cb_rules.py
index 5c9ddb00a..4848233d4 100644
--- a/numpy/f2py/cb_rules.py
+++ b/numpy/f2py/cb_rules.py
@@ -191,7 +191,7 @@ capi_return_pt:
'maxnofargs': '#maxnofargs#',
'nofoptargs': '#nofoptargs#',
'docstr': """\
-\tdef #argname#(#docsignature#): return #docreturn#\\n\\
+ def #argname#(#docsignature#): return #docreturn#\\n\\
#docstrsigns#""",
'latexdocstr': """
{{}\\verb@def #argname#(#latexdocsignature#): return #docreturn#@{}}
@@ -219,10 +219,10 @@ cb_rout_rules = [
'noargs': '',
'setdims': '/*setdims*/',
'docstrsigns': '', 'latexdocstrsigns': '',
- 'docstrreq': '\tRequired arguments:',
- 'docstropt': '\tOptional arguments:',
- 'docstrout': '\tReturn objects:',
- 'docstrcbs': '\tCall-back functions:',
+ 'docstrreq': ' Required arguments:',
+ 'docstropt': ' Optional arguments:',
+ 'docstrout': ' Return objects:',
+ 'docstrcbs': ' Call-back functions:',
'docreturn': '', 'docsign': '', 'docsignopt': '',
'latexdocstrreq': '\\noindent Required arguments:',
'latexdocstropt': '\\noindent Optional arguments:',
@@ -306,7 +306,7 @@ return_value
'string.h', 'GETSCALARFROMPYTUPLE', '#ctype#'],
'_check': iscomplexfunction
},
- {'docstrout': '\t\t#pydocsignout#',
+ {'docstrout': ' #pydocsignout#',
'latexdocstrout': ['\\item[]{{}\\verb@#pydocsignout#@{}}',
{hasnote: '--- #note#'}],
'docreturn': '#rname#,',
@@ -316,9 +316,9 @@ return_value
cb_arg_rules = [
{ # Doc
- 'docstropt': {l_and(isoptional, isintent_nothide): '\t\t#pydocsign#'},
- 'docstrreq': {l_and(isrequired, isintent_nothide): '\t\t#pydocsign#'},
- 'docstrout': {isintent_out: '\t\t#pydocsignout#'},
+ 'docstropt': {l_and(isoptional, isintent_nothide): ' #pydocsign#'},
+ 'docstrreq': {l_and(isrequired, isintent_nothide): ' #pydocsign#'},
+ 'docstrout': {isintent_out: ' #pydocsignout#'},
'latexdocstropt': {l_and(isoptional, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}',
{hasnote: '--- #note#'}]},
'latexdocstrreq': {l_and(isrequired, isintent_nothide): ['\\item[]{{}\\verb@#pydocsign#@{}}',
@@ -492,7 +492,7 @@ def buildcallbacks(m):
def buildcallback(rout, um):
from . import capi_maps
- outmess('\tConstructing call-back function "cb_%s_in_%s"\n' %
+ outmess(' Constructing call-back function "cb_%s_in_%s"\n' %
(rout['name'], um))
args, depargs = getargs(rout)
capi_maps.depargs = depargs
@@ -612,6 +612,6 @@ def buildcallback(rout, um):
'latexdocstr': ar['latexdocstr'],
'argname': rd['argname']
}
- outmess('\t %s\n' % (ar['docstrshort']))
+ outmess(' %s\n' % (ar['docstrshort']))
return
################## Build call-back function #############
diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py
index c68aba5c8..67675af45 100755
--- a/numpy/f2py/crackfortran.py
+++ b/numpy/f2py/crackfortran.py
@@ -2227,7 +2227,9 @@ def _get_depend_dict(name, vars, deps):
if '=' in vars[name] and not isstring(vars[name]):
for word in word_pattern.findall(vars[name]['=']):
- if word not in words and word in vars:
+ # The word_pattern may return values that are not
+ # only variables, they can be string content for instance
+ if word not in words and word in vars and word != name:
words.append(word)
for word in words[:]:
for w in deps.get(word, []) \
@@ -2671,7 +2673,7 @@ def analyzevars(block):
init = init.tostring(
language=symbolic.Language.C)
vars[v]['='] = init
- # n needs to be initialzed before v. So,
+ # n needs to be initialized before v. So,
# making v dependent on n and on any
# variables in solver or d.
vars[v]['depend'] = [n] + deps
diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py
index 66f11f6b5..24777df5b 100755
--- a/numpy/f2py/rules.py
+++ b/numpy/f2py/rules.py
@@ -174,67 +174,67 @@ static PyObject *#modulename#_module;
static FortranDataDef f2py_routine_defs[] = {
#routine_defs#
-\t{NULL}
+ {NULL}
};
static PyMethodDef f2py_module_methods[] = {
#pymethoddef#
-\t{NULL,NULL}
+ {NULL,NULL}
};
static struct PyModuleDef moduledef = {
-\tPyModuleDef_HEAD_INIT,
-\t"#modulename#",
-\tNULL,
-\t-1,
-\tf2py_module_methods,
-\tNULL,
-\tNULL,
-\tNULL,
-\tNULL
+ PyModuleDef_HEAD_INIT,
+ "#modulename#",
+ NULL,
+ -1,
+ f2py_module_methods,
+ NULL,
+ NULL,
+ NULL,
+ NULL
};
PyMODINIT_FUNC PyInit_#modulename#(void) {
-\tint i;
-\tPyObject *m,*d, *s, *tmp;
-\tm = #modulename#_module = PyModule_Create(&moduledef);
-\tPy_SET_TYPE(&PyFortran_Type, &PyType_Type);
-\timport_array();
-\tif (PyErr_Occurred())
-\t\t{PyErr_SetString(PyExc_ImportError, \"can't initialize module #modulename# (failed to import numpy)\"); return m;}
-\td = PyModule_GetDict(m);
-\ts = PyUnicode_FromString(\"#f2py_version#\");
-\tPyDict_SetItemString(d, \"__version__\", s);
-\tPy_DECREF(s);
-\ts = PyUnicode_FromString(
-\t\t\"This module '#modulename#' is auto-generated with f2py (version:#f2py_version#).\\nFunctions:\\n\"\n#docs#\".\");
-\tPyDict_SetItemString(d, \"__doc__\", s);
-\tPy_DECREF(s);
-\ts = PyUnicode_FromString(\"""" + numpy_version + """\");
-\tPyDict_SetItemString(d, \"__f2py_numpy_version__\", s);
-\tPy_DECREF(s);
-\t#modulename#_error = PyErr_NewException (\"#modulename#.error\", NULL, NULL);
-\t/*
-\t * Store the error object inside the dict, so that it could get deallocated.
-\t * (in practice, this is a module, so it likely will not and cannot.)
-\t */
-\tPyDict_SetItemString(d, \"_#modulename#_error\", #modulename#_error);
-\tPy_DECREF(#modulename#_error);
-\tfor(i=0;f2py_routine_defs[i].name!=NULL;i++) {
-\t\ttmp = PyFortranObject_NewAsAttr(&f2py_routine_defs[i]);
-\t\tPyDict_SetItemString(d, f2py_routine_defs[i].name, tmp);
-\t\tPy_DECREF(tmp);
-\t}
+ int i;
+ PyObject *m,*d, *s, *tmp;
+ m = #modulename#_module = PyModule_Create(&moduledef);
+ Py_SET_TYPE(&PyFortran_Type, &PyType_Type);
+ import_array();
+ if (PyErr_Occurred())
+ {PyErr_SetString(PyExc_ImportError, \"can't initialize module #modulename# (failed to import numpy)\"); return m;}
+ d = PyModule_GetDict(m);
+ s = PyUnicode_FromString(\"#f2py_version#\");
+ PyDict_SetItemString(d, \"__version__\", s);
+ Py_DECREF(s);
+ s = PyUnicode_FromString(
+ \"This module '#modulename#' is auto-generated with f2py (version:#f2py_version#).\\nFunctions:\\n\"\n#docs#\".\");
+ PyDict_SetItemString(d, \"__doc__\", s);
+ Py_DECREF(s);
+ s = PyUnicode_FromString(\"""" + numpy_version + """\");
+ PyDict_SetItemString(d, \"__f2py_numpy_version__\", s);
+ Py_DECREF(s);
+ #modulename#_error = PyErr_NewException (\"#modulename#.error\", NULL, NULL);
+ /*
+ * Store the error object inside the dict, so that it could get deallocated.
+ * (in practice, this is a module, so it likely will not and cannot.)
+ */
+ PyDict_SetItemString(d, \"_#modulename#_error\", #modulename#_error);
+ Py_DECREF(#modulename#_error);
+ for(i=0;f2py_routine_defs[i].name!=NULL;i++) {
+ tmp = PyFortranObject_NewAsAttr(&f2py_routine_defs[i]);
+ PyDict_SetItemString(d, f2py_routine_defs[i].name, tmp);
+ Py_DECREF(tmp);
+ }
#initf2pywraphooks#
#initf90modhooks#
#initcommonhooks#
#interface_usercode#
#ifdef F2PY_REPORT_ATEXIT
-\tif (! PyErr_Occurred())
-\t\ton_exit(f2py_report_on_exit,(void*)\"#modulename#\");
+ if (! PyErr_Occurred())
+ on_exit(f2py_report_on_exit,(void*)\"#modulename#\");
#endif
-\treturn m;
+ return m;
}
#ifdef __cplusplus
}
@@ -326,7 +326,7 @@ f2py_stop_clock();
'externroutines': '#declfortranroutine#',
'doc': '#docreturn##name#(#docsignature#)',
'docshort': '#docreturn##name#(#docsignatureshort#)',
- 'docs': '"\t#docreturn##name#(#docsignature#)\\n"\n',
+ 'docs': '" #docreturn##name#(#docsignature#)\\n"\n',
'need': ['arrayobject.h', 'CFUNCSMESS', 'MINMAX'],
'cppmacros': {debugcapi: '#define DEBUGCFUNCS'},
'latexdoc': ['\\subsection{Wrapper function \\texttt{#texname#}}\n',
@@ -400,25 +400,25 @@ rout_rules = [
ismoduleroutine: '',
isdummyroutine: ''
},
- 'routine_def': {l_not(l_or(ismoduleroutine, isintent_c, isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
- l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},',
- l_and(l_not(ismoduleroutine), isdummyroutine): '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
+ 'routine_def': {l_not(l_or(ismoduleroutine, isintent_c, isdummyroutine)): ' {\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
+ l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): ' {\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},',
+ l_and(l_not(ismoduleroutine), isdummyroutine): ' {\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
},
'need': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): 'F_FUNC'},
'callfortranroutine': [
{debugcapi: [
- """\tfprintf(stderr,\"debug-capi:Fortran subroutine `#fortranname#(#callfortran#)\'\\n\");"""]},
+ """ fprintf(stderr,\"debug-capi:Fortran subroutine `#fortranname#(#callfortran#)\'\\n\");"""]},
{hasexternals: """\
-\t\tif (#setjmpbuf#) {
-\t\t\tf2py_success = 0;
-\t\t} else {"""},
- {isthreadsafe: '\t\t\tPy_BEGIN_ALLOW_THREADS'},
- {hascallstatement: '''\t\t\t\t#callstatement#;
-\t\t\t\t/*(*f2py_func)(#callfortran#);*/'''},
+ if (#setjmpbuf#) {
+ f2py_success = 0;
+ } else {"""},
+ {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'},
+ {hascallstatement: ''' #callstatement#;
+ /*(*f2py_func)(#callfortran#);*/'''},
{l_not(l_or(hascallstatement, isdummyroutine))
- : '\t\t\t\t(*f2py_func)(#callfortran#);'},
- {isthreadsafe: '\t\t\tPy_END_ALLOW_THREADS'},
- {hasexternals: """\t\t}"""}
+ : ' (*f2py_func)(#callfortran#);'},
+ {isthreadsafe: ' Py_END_ALLOW_THREADS'},
+ {hasexternals: """ }"""}
],
'_check': l_and(issubroutine, l_not(issubroutine_wrap)),
}, { # Wrapped function
@@ -427,8 +427,8 @@ rout_rules = [
isdummyroutine: '',
},
- 'routine_def': {l_not(l_or(ismoduleroutine, isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
- isdummyroutine: '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
+ 'routine_def': {l_not(l_or(ismoduleroutine, isdummyroutine)): ' {\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
+ isdummyroutine: ' {\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
},
'initf2pywraphook': {l_not(l_or(ismoduleroutine, isdummyroutine)): '''
{
@@ -445,18 +445,18 @@ rout_rules = [
'need': {l_not(l_or(ismoduleroutine, isdummyroutine)): ['F_WRAPPEDFUNC', 'F_FUNC']},
'callfortranroutine': [
{debugcapi: [
- """\tfprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]},
+ """ fprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]},
{hasexternals: """\
-\tif (#setjmpbuf#) {
-\t\tf2py_success = 0;
-\t} else {"""},
- {isthreadsafe: '\tPy_BEGIN_ALLOW_THREADS'},
+ if (#setjmpbuf#) {
+ f2py_success = 0;
+ } else {"""},
+ {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'},
{l_not(l_or(hascallstatement, isdummyroutine))
- : '\t(*f2py_func)(#callfortran#);'},
+ : ' (*f2py_func)(#callfortran#);'},
{hascallstatement:
- '\t#callstatement#;\n\t/*(*f2py_func)(#callfortran#);*/'},
- {isthreadsafe: '\tPy_END_ALLOW_THREADS'},
- {hasexternals: '\t}'}
+ ' #callstatement#;\n /*(*f2py_func)(#callfortran#);*/'},
+ {isthreadsafe: ' Py_END_ALLOW_THREADS'},
+ {hasexternals: ' }'}
],
'_check': isfunction_wrap,
}, { # Wrapped subroutine
@@ -465,8 +465,8 @@ rout_rules = [
isdummyroutine: '',
},
- 'routine_def': {l_not(l_or(ismoduleroutine, isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
- isdummyroutine: '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
+ 'routine_def': {l_not(l_or(ismoduleroutine, isdummyroutine)): ' {\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
+ isdummyroutine: ' {\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
},
'initf2pywraphook': {l_not(l_or(ismoduleroutine, isdummyroutine)): '''
{
@@ -483,18 +483,18 @@ rout_rules = [
'need': {l_not(l_or(ismoduleroutine, isdummyroutine)): ['F_WRAPPEDFUNC', 'F_FUNC']},
'callfortranroutine': [
{debugcapi: [
- """\tfprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]},
+ """ fprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]},
{hasexternals: """\
-\tif (#setjmpbuf#) {
-\t\tf2py_success = 0;
-\t} else {"""},
- {isthreadsafe: '\tPy_BEGIN_ALLOW_THREADS'},
+ if (#setjmpbuf#) {
+ f2py_success = 0;
+ } else {"""},
+ {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'},
{l_not(l_or(hascallstatement, isdummyroutine))
- : '\t(*f2py_func)(#callfortran#);'},
+ : ' (*f2py_func)(#callfortran#);'},
{hascallstatement:
- '\t#callstatement#;\n\t/*(*f2py_func)(#callfortran#);*/'},
- {isthreadsafe: '\tPy_END_ALLOW_THREADS'},
- {hasexternals: '\t}'}
+ ' #callstatement#;\n /*(*f2py_func)(#callfortran#);*/'},
+ {isthreadsafe: ' Py_END_ALLOW_THREADS'},
+ {hasexternals: ' }'}
],
'_check': issubroutine_wrap,
}, { # Function
@@ -505,13 +505,13 @@ rout_rules = [
{hasresultnote: '--- #resultnote#'}],
'callfortranroutine': [{l_and(debugcapi, isstringfunction): """\
#ifdef USESCOMPAQFORTRAN
-\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callcompaqfortran#)\\n\");
+ fprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callcompaqfortran#)\\n\");
#else
-\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\");
+ fprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\");
#endif
"""},
{l_and(debugcapi, l_not(isstringfunction)): """\
-\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\");
+ fprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\");
"""}
],
'_check': l_and(isfunction, l_not(isfunction_wrap))
@@ -520,32 +520,32 @@ rout_rules = [
l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): 'extern #ctype# #fortranname#(#callprotoargument#);',
isdummyroutine: ''
},
- 'routine_def': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
- l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},',
- isdummyroutine: '\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
+ 'routine_def': {l_and(l_not(l_or(ismoduleroutine, isintent_c)), l_not(isdummyroutine)): ' {\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
+ l_and(l_not(ismoduleroutine), isintent_c, l_not(isdummyroutine)): ' {\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},',
+ isdummyroutine: ' {\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
},
- 'decl': [{iscomplexfunction_warn: '\t#ctype# #name#_return_value={0,0};',
- l_not(iscomplexfunction): '\t#ctype# #name#_return_value=0;'},
+ 'decl': [{iscomplexfunction_warn: ' #ctype# #name#_return_value={0,0};',
+ l_not(iscomplexfunction): ' #ctype# #name#_return_value=0;'},
{iscomplexfunction:
- '\tPyObject *#name#_return_value_capi = Py_None;'}
+ ' PyObject *#name#_return_value_capi = Py_None;'}
],
'callfortranroutine': [
{hasexternals: """\
-\tif (#setjmpbuf#) {
-\t\tf2py_success = 0;
-\t} else {"""},
- {isthreadsafe: '\tPy_BEGIN_ALLOW_THREADS'},
- {hascallstatement: '''\t#callstatement#;
-/*\t#name#_return_value = (*f2py_func)(#callfortran#);*/
+ if (#setjmpbuf#) {
+ f2py_success = 0;
+ } else {"""},
+ {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'},
+ {hascallstatement: ''' #callstatement#;
+/* #name#_return_value = (*f2py_func)(#callfortran#);*/
'''},
{l_not(l_or(hascallstatement, isdummyroutine))
- : '\t#name#_return_value = (*f2py_func)(#callfortran#);'},
- {isthreadsafe: '\tPy_END_ALLOW_THREADS'},
- {hasexternals: '\t}'},
+ : ' #name#_return_value = (*f2py_func)(#callfortran#);'},
+ {isthreadsafe: ' Py_END_ALLOW_THREADS'},
+ {hasexternals: ' }'},
{l_and(debugcapi, iscomplexfunction)
- : '\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value.r,#name#_return_value.i);'},
- {l_and(debugcapi, l_not(iscomplexfunction)): '\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value);'}],
- 'pyobjfrom': {iscomplexfunction: '\t#name#_return_value_capi = pyobj_from_#ctype#1(#name#_return_value);'},
+ : ' fprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value.r,#name#_return_value.i);'},
+ {l_and(debugcapi, l_not(iscomplexfunction)): ' fprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value);'}],
+ 'pyobjfrom': {iscomplexfunction: ' #name#_return_value_capi = pyobj_from_#ctype#1(#name#_return_value);'},
'need': [{l_not(isdummyroutine): 'F_FUNC'},
{iscomplexfunction: 'pyobj_from_#ctype#1'},
{islong_longfunction: 'long_long'},
@@ -557,50 +557,50 @@ rout_rules = [
}, { # String function # in use for --no-wrap
'declfortranroutine': 'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);',
'routine_def': {l_not(l_or(ismoduleroutine, isintent_c)):
- '\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
+ ' {\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
l_and(l_not(ismoduleroutine), isintent_c):
- '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},'
+ ' {\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},'
},
- 'decl': ['\t#ctype# #name#_return_value = NULL;',
- '\tint #name#_return_value_len = 0;'],
+ 'decl': [' #ctype# #name#_return_value = NULL;',
+ ' int #name#_return_value_len = 0;'],
'callfortran':'#name#_return_value,#name#_return_value_len,',
- 'callfortranroutine':['\t#name#_return_value_len = #rlength#;',
- '\tif ((#name#_return_value = (string)malloc('
+ 'callfortranroutine':[' #name#_return_value_len = #rlength#;',
+ ' if ((#name#_return_value = (string)malloc('
'#name#_return_value_len+1) == NULL) {',
- '\t\tPyErr_SetString(PyExc_MemoryError, \"out of memory\");',
- '\t\tf2py_success = 0;',
- '\t} else {',
- "\t\t(#name#_return_value)[#name#_return_value_len] = '\\0';",
- '\t}',
- '\tif (f2py_success) {',
+ ' PyErr_SetString(PyExc_MemoryError, \"out of memory\");',
+ ' f2py_success = 0;',
+ ' } else {',
+ " (#name#_return_value)[#name#_return_value_len] = '\\0';",
+ ' }',
+ ' if (f2py_success) {',
{hasexternals: """\
-\t\tif (#setjmpbuf#) {
-\t\t\tf2py_success = 0;
-\t\t} else {"""},
- {isthreadsafe: '\t\tPy_BEGIN_ALLOW_THREADS'},
+ if (#setjmpbuf#) {
+ f2py_success = 0;
+ } else {"""},
+ {isthreadsafe: ' Py_BEGIN_ALLOW_THREADS'},
"""\
#ifdef USESCOMPAQFORTRAN
-\t\t(*f2py_func)(#callcompaqfortran#);
+ (*f2py_func)(#callcompaqfortran#);
#else
-\t\t(*f2py_func)(#callfortran#);
+ (*f2py_func)(#callfortran#);
#endif
""",
- {isthreadsafe: '\t\tPy_END_ALLOW_THREADS'},
- {hasexternals: '\t\t}'},
+ {isthreadsafe: ' Py_END_ALLOW_THREADS'},
+ {hasexternals: ' }'},
{debugcapi:
- '\t\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value_len,#name#_return_value);'},
- '\t} /* if (f2py_success) after (string)malloc */',
+ ' fprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value_len,#name#_return_value);'},
+ ' } /* if (f2py_success) after (string)malloc */',
],
'returnformat': '#rformat#',
'return': ',#name#_return_value',
- 'freemem': '\tSTRINGFREE(#name#_return_value);',
+ 'freemem': ' STRINGFREE(#name#_return_value);',
'need': ['F_FUNC', '#ctype#', 'STRINGFREE'],
'_check':l_and(isstringfunction, l_not(isfunction_wrap)) # ???obsolete
},
{ # Debugging
- 'routdebugenter': '\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#(#docsignature#)\\n");',
- 'routdebugleave': '\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: successful.\\n");',
- 'routdebugfailure': '\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: failure.\\n");',
+ 'routdebugenter': ' fprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#(#docsignature#)\\n");',
+ 'routdebugleave': ' fprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: successful.\\n");',
+ 'routdebugfailure': ' fprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: failure.\\n");',
'_check': debugcapi
}
]
@@ -625,16 +625,16 @@ aux_rules = [
'separatorsfor': sepdict
},
{ # Common
- 'frompyobj': ['\t/* Processing auxiliary variable #varname# */',
- {debugcapi: '\tfprintf(stderr,"#vardebuginfo#\\n");'}, ],
- 'cleanupfrompyobj': '\t/* End of cleaning variable #varname# */',
+ 'frompyobj': [' /* Processing auxiliary variable #varname# */',
+ {debugcapi: ' fprintf(stderr,"#vardebuginfo#\\n");'}, ],
+ 'cleanupfrompyobj': ' /* End of cleaning variable #varname# */',
'need': typedef_need_dict,
},
# Scalars (not complex)
{ # Common
- 'decl': '\t#ctype# #varname# = 0;',
+ 'decl': ' #ctype# #varname# = 0;',
'need': {hasinitvalue: 'math.h'},
- 'frompyobj': {hasinitvalue: '\t#varname# = #init#;'},
+ 'frompyobj': {hasinitvalue: ' #varname# = #init#;'},
'_check': l_and(isscalar, l_not(iscomplex)),
},
{
@@ -646,23 +646,23 @@ aux_rules = [
},
# Complex scalars
{ # Common
- 'decl': '\t#ctype# #varname#;',
- 'frompyobj': {hasinitvalue: '\t#varname#.r = #init.r#, #varname#.i = #init.i#;'},
+ 'decl': ' #ctype# #varname#;',
+ 'frompyobj': {hasinitvalue: ' #varname#.r = #init.r#, #varname#.i = #init.i#;'},
'_check': iscomplex
},
# String
{ # Common
- 'decl': ['\t#ctype# #varname# = NULL;',
- '\tint slen(#varname#);',
+ 'decl': [' #ctype# #varname# = NULL;',
+ ' int slen(#varname#);',
],
'need':['len..'],
'_check':isstring
},
# Array
{ # Common
- 'decl': ['\t#ctype# *#varname# = NULL;',
- '\tnpy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};',
- '\tconst int #varname#_Rank = #rank#;',
+ 'decl': [' #ctype# *#varname# = NULL;',
+ ' npy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};',
+ ' const int #varname#_Rank = #rank#;',
],
'need':['len..', {hasinitvalue: 'forcomb'}, {hasinitvalue: 'CFUNCSMESS'}],
'_check': isarray
@@ -711,9 +711,9 @@ arg_rules = [
'separatorsfor': sepdict
},
{ # Common
- 'frompyobj': ['\t/* Processing variable #varname# */',
- {debugcapi: '\tfprintf(stderr,"#vardebuginfo#\\n");'}, ],
- 'cleanupfrompyobj': '\t/* End of cleaning variable #varname# */',
+ 'frompyobj': [' /* Processing variable #varname# */',
+ {debugcapi: ' fprintf(stderr,"#vardebuginfo#\\n");'}, ],
+ 'cleanupfrompyobj': ' /* End of cleaning variable #varname# */',
'_depend': '',
'need': typedef_need_dict,
},
@@ -832,8 +832,8 @@ if (#varname#_cb.capi==Py_None) {
},
# Scalars (not complex)
{ # Common
- 'decl': '\t#ctype# #varname# = 0;',
- 'pyobjfrom': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'},
+ 'decl': ' #ctype# #varname# = 0;',
+ 'pyobjfrom': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'},
'callfortran': {isintent_c: '#varname#,', l_not(isintent_c): '&#varname#,'},
'return': {isintent_out: ',#varname#'},
'_check': l_and(isscalar, l_not(iscomplex))
@@ -841,15 +841,15 @@ if (#varname#_cb.capi==Py_None) {
'need': {hasinitvalue: 'math.h'},
'_check': l_and(isscalar, l_not(iscomplex)),
}, { # Not hidden
- 'decl': '\tPyObject *#varname#_capi = Py_None;',
+ 'decl': ' PyObject *#varname#_capi = Py_None;',
'argformat': {isrequired: 'O'},
'keyformat': {isoptional: 'O'},
'args_capi': {isrequired: ',&#varname#_capi'},
'keys_capi': {isoptional: ',&#varname#_capi'},
'pyobjfrom': {isintent_inout: """\
-\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#);
-\tif (f2py_success) {"""},
- 'closepyobjfrom': {isintent_inout: "\t} /*if (f2py_success) of #varname# pyobjfrom*/"},
+ f2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#);
+ if (f2py_success) {"""},
+ 'closepyobjfrom': {isintent_inout: " } /*if (f2py_success) of #varname# pyobjfrom*/"},
'need': {isintent_inout: 'try_pyarr_from_#ctype#'},
'_check': l_and(isscalar, l_not(iscomplex), isintent_nothide)
}, {
@@ -869,91 +869,91 @@ if (#varname#_cb.capi==Py_None) {
# ...
# from_pyobj(varname)
#
- {hasinitvalue: '\tif (#varname#_capi == Py_None) #varname# = #init#; else',
+ {hasinitvalue: ' if (#varname#_capi == Py_None) #varname# = #init#; else',
'_depend': ''},
- {l_and(isoptional, l_not(hasinitvalue)): '\tif (#varname#_capi != Py_None)',
+ {l_and(isoptional, l_not(hasinitvalue)): ' if (#varname#_capi != Py_None)',
'_depend': ''},
{l_not(islogical): '''\
-\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");
-\tif (f2py_success) {'''},
+ f2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");
+ if (f2py_success) {'''},
{islogical: '''\
-\t\t#varname# = (#ctype#)PyObject_IsTrue(#varname#_capi);
-\t\tf2py_success = 1;
-\tif (f2py_success) {'''},
+ #varname# = (#ctype#)PyObject_IsTrue(#varname#_capi);
+ f2py_success = 1;
+ if (f2py_success) {'''},
],
- 'cleanupfrompyobj': '\t} /*if (f2py_success) of #varname#*/',
+ 'cleanupfrompyobj': ' } /*if (f2py_success) of #varname#*/',
'need': {l_not(islogical): '#ctype#_from_pyobj'},
'_check': l_and(isscalar, l_not(iscomplex), isintent_nothide),
'_depend': ''
}, { # Hidden
- 'frompyobj': {hasinitvalue: '\t#varname# = #init#;'},
+ 'frompyobj': {hasinitvalue: ' #varname# = #init#;'},
'need': typedef_need_dict,
'_check': l_and(isscalar, l_not(iscomplex), isintent_hide),
'_depend': ''
}, { # Common
- 'frompyobj': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'},
+ 'frompyobj': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'},
'_check': l_and(isscalar, l_not(iscomplex)),
'_depend': ''
},
# Complex scalars
{ # Common
- 'decl': '\t#ctype# #varname#;',
+ 'decl': ' #ctype# #varname#;',
'callfortran': {isintent_c: '#varname#,', l_not(isintent_c): '&#varname#,'},
- 'pyobjfrom': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'},
+ 'pyobjfrom': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'},
'return': {isintent_out: ',#varname#_capi'},
'_check': iscomplex
}, { # Not hidden
- 'decl': '\tPyObject *#varname#_capi = Py_None;',
+ 'decl': ' PyObject *#varname#_capi = Py_None;',
'argformat': {isrequired: 'O'},
'keyformat': {isoptional: 'O'},
'args_capi': {isrequired: ',&#varname#_capi'},
'keys_capi': {isoptional: ',&#varname#_capi'},
'need': {isintent_inout: 'try_pyarr_from_#ctype#'},
'pyobjfrom': {isintent_inout: """\
-\t\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#);
-\t\tif (f2py_success) {"""},
- 'closepyobjfrom': {isintent_inout: "\t\t} /*if (f2py_success) of #varname# pyobjfrom*/"},
+ f2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#);
+ if (f2py_success) {"""},
+ 'closepyobjfrom': {isintent_inout: " } /*if (f2py_success) of #varname# pyobjfrom*/"},
'_check': l_and(iscomplex, isintent_nothide)
}, {
- 'frompyobj': [{hasinitvalue: '\tif (#varname#_capi==Py_None) {#varname#.r = #init.r#, #varname#.i = #init.i#;} else'},
+ 'frompyobj': [{hasinitvalue: ' if (#varname#_capi==Py_None) {#varname#.r = #init.r#, #varname#.i = #init.i#;} else'},
{l_and(isoptional, l_not(hasinitvalue))
- : '\tif (#varname#_capi != Py_None)'},
- '\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");'
- '\n\tif (f2py_success) {'],
- 'cleanupfrompyobj': '\t} /*if (f2py_success) of #varname# frompyobj*/',
+ : ' if (#varname#_capi != Py_None)'},
+ ' f2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");'
+ '\n if (f2py_success) {'],
+ 'cleanupfrompyobj': ' } /*if (f2py_success) of #varname# frompyobj*/',
'need': ['#ctype#_from_pyobj'],
'_check': l_and(iscomplex, isintent_nothide),
'_depend': ''
}, { # Hidden
- 'decl': {isintent_out: '\tPyObject *#varname#_capi = Py_None;'},
+ 'decl': {isintent_out: ' PyObject *#varname#_capi = Py_None;'},
'_check': l_and(iscomplex, isintent_hide)
}, {
- 'frompyobj': {hasinitvalue: '\t#varname#.r = #init.r#, #varname#.i = #init.i#;'},
+ 'frompyobj': {hasinitvalue: ' #varname#.r = #init.r#, #varname#.i = #init.i#;'},
'_check': l_and(iscomplex, isintent_hide),
'_depend': ''
}, { # Common
- 'pyobjfrom': {isintent_out: '\t#varname#_capi = pyobj_from_#ctype#1(#varname#);'},
+ 'pyobjfrom': {isintent_out: ' #varname#_capi = pyobj_from_#ctype#1(#varname#);'},
'need': ['pyobj_from_#ctype#1'],
'_check': iscomplex
}, {
- 'frompyobj': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'},
+ 'frompyobj': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'},
'_check': iscomplex,
'_depend': ''
},
# String
{ # Common
- 'decl': ['\t#ctype# #varname# = NULL;',
- '\tint slen(#varname#);',
- '\tPyObject *#varname#_capi = Py_None;'],
+ 'decl': [' #ctype# #varname# = NULL;',
+ ' int slen(#varname#);',
+ ' PyObject *#varname#_capi = Py_None;'],
'callfortran':'#varname#,',
'callfortranappend':'slen(#varname#),',
'pyobjfrom':[
{debugcapi:
- '\tfprintf(stderr,'
+ ' fprintf(stderr,'
'"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'},
# The trailing null value for Fortran is blank.
{l_and(isintent_out, l_not(isintent_c)):
- "\t\tSTRINGPADN(#varname#, slen(#varname#), ' ', '\\0');"},
+ " STRINGPADN(#varname#, slen(#varname#), ' ', '\\0');"},
],
'return': {isintent_out: ',#varname#'},
'need': ['len..',
@@ -962,18 +962,18 @@ if (#varname#_cb.capi==Py_None) {
}, { # Common
'frompyobj': [
"""\
-\tslen(#varname#) = #length#;
-\tf2py_success = #ctype#_from_pyobj(&#varname#,&slen(#varname#),#init#,"""
+ slen(#varname#) = #length#;
+ f2py_success = #ctype#_from_pyobj(&#varname#,&slen(#varname#),#init#,"""
"""#varname#_capi,\"#ctype#_from_pyobj failed in converting #nth#"""
"""`#varname#\' of #pyname# to C #ctype#\");
-\tif (f2py_success) {""",
+ if (f2py_success) {""",
# The trailing null value for Fortran is blank.
{l_not(isintent_c):
- "\t\tSTRINGPADN(#varname#, slen(#varname#), '\\0', ' ');"},
+ " STRINGPADN(#varname#, slen(#varname#), '\\0', ' ');"},
],
'cleanupfrompyobj': """\
-\t\tSTRINGFREE(#varname#);
-\t} /*if (f2py_success) of #varname#*/""",
+ STRINGFREE(#varname#);
+ } /*if (f2py_success) of #varname#*/""",
'need': ['#ctype#_from_pyobj', 'len..', 'STRINGFREE',
{l_not(isintent_c): 'STRINGPADN'}],
'_check':isstring,
@@ -985,36 +985,36 @@ if (#varname#_cb.capi==Py_None) {
'keys_capi': {isoptional: ',&#varname#_capi'},
'pyobjfrom': [
{l_and(isintent_inout, l_not(isintent_c)):
- "\t\tSTRINGPADN(#varname#, slen(#varname#), ' ', '\\0');"},
+ " STRINGPADN(#varname#, slen(#varname#), ' ', '\\0');"},
{isintent_inout: '''\
-\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi, #varname#,
-\t slen(#varname#));
-\tif (f2py_success) {'''}],
- 'closepyobjfrom': {isintent_inout: '\t} /*if (f2py_success) of #varname# pyobjfrom*/'},
+ f2py_success = try_pyarr_from_#ctype#(#varname#_capi, #varname#,
+ slen(#varname#));
+ if (f2py_success) {'''}],
+ 'closepyobjfrom': {isintent_inout: ' } /*if (f2py_success) of #varname# pyobjfrom*/'},
'need': {isintent_inout: 'try_pyarr_from_#ctype#',
l_and(isintent_inout, l_not(isintent_c)): 'STRINGPADN'},
'_check': l_and(isstring, isintent_nothide)
}, { # Hidden
'_check': l_and(isstring, isintent_hide)
}, {
- 'frompyobj': {debugcapi: '\tfprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'},
+ 'frompyobj': {debugcapi: ' fprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'},
'_check': isstring,
'_depend': ''
},
# Array
{ # Common
- 'decl': ['\t#ctype# *#varname# = NULL;',
- '\tnpy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};',
- '\tconst int #varname#_Rank = #rank#;',
- '\tPyArrayObject *capi_#varname#_tmp = NULL;',
- '\tint capi_#varname#_intent = 0;',
+ 'decl': [' #ctype# *#varname# = NULL;',
+ ' npy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};',
+ ' const int #varname#_Rank = #rank#;',
+ ' PyArrayObject *capi_#varname#_tmp = NULL;',
+ ' int capi_#varname#_intent = 0;',
],
'callfortran':'#varname#,',
'return':{isintent_out: ',capi_#varname#_tmp'},
'need': 'len..',
'_check': isarray
}, { # intent(overwrite) array
- 'decl': '\tint capi_overwrite_#varname# = 1;',
+ 'decl': ' int capi_overwrite_#varname# = 1;',
'kwlistxa': '"overwrite_#varname#",',
'xaformat': 'i',
'keys_xa': ',&capi_overwrite_#varname#',
@@ -1023,12 +1023,12 @@ if (#varname#_cb.capi==Py_None) {
'docstropt': 'overwrite_#varname# : input int, optional\\n Default: 1',
'_check': l_and(isarray, isintent_overwrite),
}, {
- 'frompyobj': '\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);',
+ 'frompyobj': ' capi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);',
'_check': l_and(isarray, isintent_overwrite),
'_depend': '',
},
{ # intent(copy) array
- 'decl': '\tint capi_overwrite_#varname# = 0;',
+ 'decl': ' int capi_overwrite_#varname# = 0;',
'kwlistxa': '"overwrite_#varname#",',
'xaformat': 'i',
'keys_xa': ',&capi_overwrite_#varname#',
@@ -1037,7 +1037,7 @@ if (#varname#_cb.capi==Py_None) {
'docstropt': 'overwrite_#varname# : input int, optional\\n Default: 0',
'_check': l_and(isarray, isintent_copy),
}, {
- 'frompyobj': '\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);',
+ 'frompyobj': ' capi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);',
'_check': l_and(isarray, isintent_copy),
'_depend': '',
}, {
@@ -1045,57 +1045,57 @@ if (#varname#_cb.capi==Py_None) {
'_check': isarray,
'_depend': ''
}, { # Not hidden
- 'decl': '\tPyObject *#varname#_capi = Py_None;',
+ 'decl': ' PyObject *#varname#_capi = Py_None;',
'argformat': {isrequired: 'O'},
'keyformat': {isoptional: 'O'},
'args_capi': {isrequired: ',&#varname#_capi'},
'keys_capi': {isoptional: ',&#varname#_capi'},
'_check': l_and(isarray, isintent_nothide)
}, {
- 'frompyobj': ['\t#setdims#;',
- '\tcapi_#varname#_intent |= #intent#;',
+ 'frompyobj': [' #setdims#;',
+ ' capi_#varname#_intent |= #intent#;',
{isintent_hide:
- '\tcapi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,Py_None);'},
+ ' capi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,Py_None);'},
{isintent_nothide:
- '\tcapi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,#varname#_capi);'},
+ ' capi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,#varname#_capi);'},
"""\
-\tif (capi_#varname#_tmp == NULL) {
-\t\tPyObject *exc, *val, *tb;
-\t\tPyErr_Fetch(&exc, &val, &tb);
-\t\tPyErr_SetString(exc ? exc : #modulename#_error,\"failed in converting #nth# `#varname#\' of #pyname# to C/Fortran array\" );
-\t\tnpy_PyErr_ChainExceptionsCause(exc, val, tb);
-\t} else {
-\t\t#varname# = (#ctype# *)(PyArray_DATA(capi_#varname#_tmp));
+ if (capi_#varname#_tmp == NULL) {
+ PyObject *exc, *val, *tb;
+ PyErr_Fetch(&exc, &val, &tb);
+ PyErr_SetString(exc ? exc : #modulename#_error,\"failed in converting #nth# `#varname#\' of #pyname# to C/Fortran array\" );
+ npy_PyErr_ChainExceptionsCause(exc, val, tb);
+ } else {
+ #varname# = (#ctype# *)(PyArray_DATA(capi_#varname#_tmp));
""",
{hasinitvalue: [
{isintent_nothide:
- '\tif (#varname#_capi == Py_None) {'},
- {isintent_hide: '\t{'},
- {iscomplexarray: '\t\t#ctype# capi_c;'},
+ ' if (#varname#_capi == Py_None) {'},
+ {isintent_hide: ' {'},
+ {iscomplexarray: ' #ctype# capi_c;'},
"""\
-\t\tint *_i,capi_i=0;
-\t\tCFUNCSMESS(\"#name#: Initializing #varname#=#init#\\n\");
-\t\tif (initforcomb(PyArray_DIMS(capi_#varname#_tmp),PyArray_NDIM(capi_#varname#_tmp),1)) {
-\t\t\twhile ((_i = nextforcomb()))
-\t\t\t\t#varname#[capi_i++] = #init#; /* fortran way */
-\t\t} else {
-\t\t\tPyObject *exc, *val, *tb;
-\t\t\tPyErr_Fetch(&exc, &val, &tb);
-\t\t\tPyErr_SetString(exc ? exc : #modulename#_error,\"Initialization of #nth# #varname# failed (initforcomb).\");
-\t\t\tnpy_PyErr_ChainExceptionsCause(exc, val, tb);
-\t\t\tf2py_success = 0;
-\t\t}
-\t}
-\tif (f2py_success) {"""]},
+ int *_i,capi_i=0;
+ CFUNCSMESS(\"#name#: Initializing #varname#=#init#\\n\");
+ if (initforcomb(PyArray_DIMS(capi_#varname#_tmp),PyArray_NDIM(capi_#varname#_tmp),1)) {
+ while ((_i = nextforcomb()))
+ #varname#[capi_i++] = #init#; /* fortran way */
+ } else {
+ PyObject *exc, *val, *tb;
+ PyErr_Fetch(&exc, &val, &tb);
+ PyErr_SetString(exc ? exc : #modulename#_error,\"Initialization of #nth# #varname# failed (initforcomb).\");
+ npy_PyErr_ChainExceptionsCause(exc, val, tb);
+ f2py_success = 0;
+ }
+ }
+ if (f2py_success) {"""]},
],
'cleanupfrompyobj': [ # note that this list will be reversed
- '\t} /*if (capi_#varname#_tmp == NULL) ... else of #varname#*/',
+ ' } /*if (capi_#varname#_tmp == NULL) ... else of #varname#*/',
{l_not(l_or(isintent_out, isintent_hide)): """\
-\tif((PyObject *)capi_#varname#_tmp!=#varname#_capi) {
-\t\tPy_XDECREF(capi_#varname#_tmp); }"""},
+ if((PyObject *)capi_#varname#_tmp!=#varname#_capi) {
+ Py_XDECREF(capi_#varname#_tmp); }"""},
{l_and(isintent_hide, l_not(isintent_out))
- : """\t\tPy_XDECREF(capi_#varname#_tmp);"""},
- {hasinitvalue: '\t} /*if (f2py_success) of #varname# init*/'},
+ : """ Py_XDECREF(capi_#varname#_tmp);"""},
+ {hasinitvalue: ' } /*if (f2py_success) of #varname# init*/'},
],
'_check': isarray,
'_depend': ''
@@ -1143,30 +1143,30 @@ if (#varname#_cb.capi==Py_None) {
check_rules = [
{
- 'frompyobj': {debugcapi: '\tfprintf(stderr,\"debug-capi:Checking `#check#\'\\n\");'},
+ 'frompyobj': {debugcapi: ' fprintf(stderr,\"debug-capi:Checking `#check#\'\\n\");'},
'need': 'len..'
}, {
- 'frompyobj': '\tCHECKSCALAR(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {',
- 'cleanupfrompyobj': '\t} /*CHECKSCALAR(#check#)*/',
+ 'frompyobj': ' CHECKSCALAR(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {',
+ 'cleanupfrompyobj': ' } /*CHECKSCALAR(#check#)*/',
'need': 'CHECKSCALAR',
'_check': l_and(isscalar, l_not(iscomplex)),
'_break': ''
}, {
- 'frompyobj': '\tCHECKSTRING(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {',
- 'cleanupfrompyobj': '\t} /*CHECKSTRING(#check#)*/',
+ 'frompyobj': ' CHECKSTRING(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {',
+ 'cleanupfrompyobj': ' } /*CHECKSTRING(#check#)*/',
'need': 'CHECKSTRING',
'_check': isstring,
'_break': ''
}, {
'need': 'CHECKARRAY',
- 'frompyobj': '\tCHECKARRAY(#check#,\"#check#\",\"#nth# #varname#\") {',
- 'cleanupfrompyobj': '\t} /*CHECKARRAY(#check#)*/',
+ 'frompyobj': ' CHECKARRAY(#check#,\"#check#\",\"#nth# #varname#\") {',
+ 'cleanupfrompyobj': ' } /*CHECKARRAY(#check#)*/',
'_check': isarray,
'_break': ''
}, {
'need': 'CHECKGENERIC',
- 'frompyobj': '\tCHECKGENERIC(#check#,\"#check#\",\"#nth# #varname#\") {',
- 'cleanupfrompyobj': '\t} /*CHECKGENERIC(#check#)*/',
+ 'frompyobj': ' CHECKGENERIC(#check#,\"#check#\",\"#nth# #varname#\") {',
+ 'cleanupfrompyobj': ' } /*CHECKGENERIC(#check#)*/',
}
]
@@ -1179,7 +1179,7 @@ def buildmodule(m, um):
"""
Return
"""
- outmess('\tBuilding module "%s"...\n' % (m['name']))
+ outmess(' Building module "%s"...\n' % (m['name']))
ret = {}
mod_rules = defmod_rules[:]
vrd = capi_maps.modsign2map(m)
@@ -1281,7 +1281,7 @@ def buildmodule(m, um):
ret['csrc'] = fn
with open(fn, 'w') as f:
f.write(ar['modulebody'].replace('\t', 2 * ' '))
- outmess('\tWrote C/API module "%s" to file "%s"\n' % (m['name'], fn))
+ outmess(' Wrote C/API module "%s" to file "%s"\n' % (m['name'], fn))
if options['dorestdoc']:
fn = os.path.join(
@@ -1289,7 +1289,7 @@ def buildmodule(m, um):
with open(fn, 'w') as f:
f.write('.. -*- rest -*-\n')
f.write('\n'.join(ar['restdoc']))
- outmess('\tReST Documentation is saved to file "%s/%smodule.rest"\n' %
+ outmess(' ReST Documentation is saved to file "%s/%smodule.rest"\n' %
(options['buildpath'], vrd['modulename']))
if options['dolatexdoc']:
fn = os.path.join(
@@ -1304,7 +1304,7 @@ def buildmodule(m, um):
f.write('\n'.join(ar['latexdoc']))
if 'shortlatex' not in options:
f.write('\\end{document}')
- outmess('\tDocumentation is saved to file "%s/%smodule.tex"\n' %
+ outmess(' Documentation is saved to file "%s/%smodule.tex"\n' %
(options['buildpath'], vrd['modulename']))
if funcwrappers:
wn = os.path.join(options['buildpath'], vrd['f2py_wrapper_output'])
@@ -1329,7 +1329,7 @@ def buildmodule(m, um):
lines.append(l + '\n')
lines = ''.join(lines).replace('\n &\n', '\n')
f.write(lines)
- outmess('\tFortran 77 wrappers are saved to "%s"\n' % (wn))
+ outmess(' Fortran 77 wrappers are saved to "%s"\n' % (wn))
if funcwrappers2:
wn = os.path.join(
options['buildpath'], '%s-f2pywrappers2.f90' % (vrd['modulename']))
@@ -1356,7 +1356,7 @@ def buildmodule(m, um):
lines.append(l + '\n')
lines = ''.join(lines).replace('\n &\n', '\n')
f.write(lines)
- outmess('\tFortran 90 wrappers are saved to "%s"\n' % (wn))
+ outmess(' Fortran 90 wrappers are saved to "%s"\n' % (wn))
return ret
################## Build C/API function #############
@@ -1372,10 +1372,10 @@ def buildapi(rout):
var = rout['vars']
if ismoduleroutine(rout):
- outmess('\t\t\tConstructing wrapper function "%s.%s"...\n' %
+ outmess(' Constructing wrapper function "%s.%s"...\n' %
(rout['modulename'], rout['name']))
else:
- outmess('\t\tConstructing wrapper function "%s"...\n' % (rout['name']))
+ outmess(' Constructing wrapper function "%s"...\n' % (rout['name']))
# Routine
vrd = capi_maps.routsign2map(rout)
rd = dictappend({}, vrd)
@@ -1477,9 +1477,9 @@ def buildapi(rout):
ar = applyrules(routine_rules, rd)
if ismoduleroutine(rout):
- outmess('\t\t\t %s\n' % (ar['docshort']))
+ outmess(' %s\n' % (ar['docshort']))
else:
- outmess('\t\t %s\n' % (ar['docshort']))
+ outmess(' %s\n' % (ar['docshort']))
return ar, wrap
diff --git a/numpy/f2py/symbolic.py b/numpy/f2py/symbolic.py
index b747a75f9..1b7b35458 100644
--- a/numpy/f2py/symbolic.py
+++ b/numpy/f2py/symbolic.py
@@ -1098,7 +1098,7 @@ def as_term_coeff(obj):
if len(obj.data) == 1:
(term, coeff), = obj.data.items()
return term, coeff
- # TODO: find common divisior of coefficients
+ # TODO: find common divisor of coefficients
if obj.op is Op.APPLY and obj.data[0] is ArithOp.DIV:
t, c = as_term_coeff(obj.data[1][0])
return as_apply(ArithOp.DIV, t, obj.data[1][1]), c
diff --git a/numpy/f2py/tests/test_callback.py b/numpy/f2py/tests/test_callback.py
index 2cb429ec2..d07e98357 100644
--- a/numpy/f2py/tests/test_callback.py
+++ b/numpy/f2py/tests/test_callback.py
@@ -107,9 +107,9 @@ cf2py intent(out) r
-----
Call-back functions::
- def fun(): return a
- Return objects:
- a : int
+ def fun(): return a
+ Return objects:
+ a : int
""")
assert_equal(self.module.t.__doc__, expected)
diff --git a/numpy/f2py/tests/test_crackfortran.py b/numpy/f2py/tests/test_crackfortran.py
index b1503c1e0..039e085b4 100644
--- a/numpy/f2py/tests/test_crackfortran.py
+++ b/numpy/f2py/tests/test_crackfortran.py
@@ -264,3 +264,20 @@ class TestDimSpec(util.F2PyTest):
# the same sized array
sz1, _ = get_arr_size(n1)
assert sz == sz1, (n, n1, sz, sz1)
+
+
+class TestModuleDeclaration():
+ def test_dependencies(self, tmp_path):
+ f_path = tmp_path / "mod.f90"
+ with f_path.open('w') as ff:
+ ff.write(textwrap.dedent("""\
+ module foo
+ type bar
+ character(len = 4) :: text
+ end type bar
+ type(bar), parameter :: abar = bar('abar')
+ end module foo
+ """))
+ mod = crackfortran.crackfortran([str(f_path)])
+ assert len(mod) == 1
+ assert mod[0]['vars']['abar']['='] == "bar('abar')"
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index 80eaf8acf..3ca566f73 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -2804,9 +2804,9 @@ def blackman(M):
"""
if M < 1:
- return array([])
+ return array([], dtype=np.result_type(M, 0.0))
if M == 1:
- return ones(1, float)
+ return ones(1, dtype=np.result_type(M, 0.0))
n = arange(1-M, M, 2)
return 0.42 + 0.5*cos(pi*n/(M-1)) + 0.08*cos(2.0*pi*n/(M-1))
@@ -2913,9 +2913,9 @@ def bartlett(M):
"""
if M < 1:
- return array([])
+ return array([], dtype=np.result_type(M, 0.0))
if M == 1:
- return ones(1, float)
+ return ones(1, dtype=np.result_type(M, 0.0))
n = arange(1-M, M, 2)
return where(less_equal(n, 0), 1 + n/(M-1), 1 - n/(M-1))
@@ -3017,9 +3017,9 @@ def hanning(M):
"""
if M < 1:
- return array([])
+ return array([], dtype=np.result_type(M, 0.0))
if M == 1:
- return ones(1, float)
+ return ones(1, dtype=np.result_type(M, 0.0))
n = arange(1-M, M, 2)
return 0.5 + 0.5*cos(pi*n/(M-1))
@@ -3117,9 +3117,9 @@ def hamming(M):
"""
if M < 1:
- return array([])
+ return array([], dtype=np.result_type(M, 0.0))
if M == 1:
- return ones(1, float)
+ return ones(1, dtype=np.result_type(M, 0.0))
n = arange(1-M, M, 2)
return 0.54 + 0.46*cos(pi*n/(M-1))
@@ -3396,7 +3396,7 @@ def kaiser(M, beta):
"""
if M == 1:
- return np.array([1.])
+ return np.ones(1, dtype=np.result_type(M, 0.0))
n = arange(0, M)
alpha = (M-1)/2.0
return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta))
diff --git a/numpy/lib/function_base.pyi b/numpy/lib/function_base.pyi
index 69c615c9c..cbbc87e65 100644
--- a/numpy/lib/function_base.pyi
+++ b/numpy/lib/function_base.pyi
@@ -1,7 +1,48 @@
-from typing import List
+import sys
+from typing import (
+ Literal as L,
+ List,
+ Type,
+ Sequence,
+ Tuple,
+ Union,
+ Any,
+ TypeVar,
+ Iterator,
+ overload,
+ Callable,
+ Protocol,
+ SupportsIndex,
+ Iterable,
+)
+
+if sys.version_info >= (3, 10):
+ from typing import TypeGuard
+else:
+ from typing_extensions import TypeGuard
from numpy import (
vectorize as vectorize,
+ dtype,
+ generic,
+ floating,
+ complexfloating,
+ object_,
+ _OrderKACF,
+)
+
+from numpy.typing import (
+ NDArray,
+ ArrayLike,
+ DTypeLike,
+ _ShapeLike,
+ _ScalarLike_co,
+ _SupportsDType,
+ _FiniteNestedSequence,
+ _SupportsArray,
+ _ArrayLikeComplex_co,
+ _ArrayLikeFloat_co,
+ _ArrayLikeObject_co,
)
from numpy.core.function_base import (
@@ -12,30 +53,261 @@ from numpy.core.multiarray import (
add_docstring as add_docstring,
bincount as bincount,
)
+
from numpy.core.umath import _add_newdoc_ufunc
+_T = TypeVar("_T")
+_T_co = TypeVar("_T_co", covariant=True)
+_SCT = TypeVar("_SCT", bound=generic)
+_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
+
+_2Tuple = Tuple[_T, _T]
+_ArrayLike = _FiniteNestedSequence[_SupportsArray[dtype[_SCT]]]
+_DTypeLike = Union[
+ dtype[_SCT],
+ Type[_SCT],
+ _SupportsDType[dtype[_SCT]],
+]
+
+class _TrimZerosSequence(Protocol[_T_co]):
+ def __len__(self) -> int: ...
+ def __getitem__(self, key: slice, /) -> _T_co: ...
+ def __iter__(self) -> Iterator[Any]: ...
+
+class _SupportsWriteFlush(Protocol):
+ def write(self, s: str, /) -> object: ...
+ def flush(self) -> object: ...
+
__all__: List[str]
add_newdoc_ufunc = _add_newdoc_ufunc
-def rot90(m, k=..., axes = ...): ...
-def flip(m, axis=...): ...
-def iterable(y): ...
-def average(a, axis=..., weights=..., returned=...): ...
-def asarray_chkfinite(a, dtype=..., order=...): ...
-def piecewise(x, condlist, funclist, *args, **kw): ...
-def select(condlist, choicelist, default=...): ...
-def copy(a, order=..., subok=...): ...
-def gradient(f, *varargs, axis=..., edge_order=...): ...
-def diff(a, n=..., axis=..., prepend = ..., append = ...): ...
+@overload
+def rot90(
+ m: _ArrayLike[_SCT],
+ k: int = ...,
+ axes: Tuple[int, int] = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def rot90(
+ m: ArrayLike,
+ k: int = ...,
+ axes: Tuple[int, int] = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def flip(m: _SCT, axis: None = ...) -> _SCT: ...
+@overload
+def flip(m: _ScalarLike_co, axis: None = ...) -> Any: ...
+@overload
+def flip(m: _ArrayLike[_SCT], axis: None | _ShapeLike = ...) -> NDArray[_SCT]: ...
+@overload
+def flip(m: ArrayLike, axis: None | _ShapeLike = ...) -> NDArray[Any]: ...
+
+def iterable(y: object) -> TypeGuard[Iterable[Any]]: ...
+
+@overload
+def average(
+ a: _ArrayLikeFloat_co,
+ axis: None = ...,
+ weights: None | _ArrayLikeFloat_co= ...,
+ returned: L[False] = ...,
+) -> floating[Any]: ...
+@overload
+def average(
+ a: _ArrayLikeComplex_co,
+ axis: None = ...,
+ weights: None | _ArrayLikeComplex_co = ...,
+ returned: L[False] = ...,
+) -> complexfloating[Any, Any]: ...
+@overload
+def average(
+ a: _ArrayLikeObject_co,
+ axis: None = ...,
+ weights: None | Any = ...,
+ returned: L[False] = ...,
+) -> Any: ...
+@overload
+def average(
+ a: _ArrayLikeFloat_co,
+ axis: None = ...,
+ weights: None | _ArrayLikeFloat_co= ...,
+ returned: L[True] = ...,
+) -> _2Tuple[floating[Any]]: ...
+@overload
+def average(
+ a: _ArrayLikeComplex_co,
+ axis: None = ...,
+ weights: None | _ArrayLikeComplex_co = ...,
+ returned: L[True] = ...,
+) -> _2Tuple[complexfloating[Any, Any]]: ...
+@overload
+def average(
+ a: _ArrayLikeObject_co,
+ axis: None = ...,
+ weights: None | Any = ...,
+ returned: L[True] = ...,
+) -> _2Tuple[Any]: ...
+@overload
+def average(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None | _ShapeLike = ...,
+ weights: None | Any = ...,
+ returned: L[False] = ...,
+) -> Any: ...
+@overload
+def average(
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+ axis: None | _ShapeLike = ...,
+ weights: None | Any = ...,
+ returned: L[True] = ...,
+) -> _2Tuple[Any]: ...
+
+@overload
+def asarray_chkfinite(
+ a: _ArrayLike[_SCT],
+ dtype: None = ...,
+ order: _OrderKACF = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def asarray_chkfinite(
+ a: object,
+ dtype: None = ...,
+ order: _OrderKACF = ...,
+) -> NDArray[Any]: ...
+@overload
+def asarray_chkfinite(
+ a: Any,
+ dtype: _DTypeLike[_SCT],
+ order: _OrderKACF = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def asarray_chkfinite(
+ a: Any,
+ dtype: DTypeLike,
+ order: _OrderKACF = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def piecewise(
+ x: _ArrayLike[_SCT],
+ condlist: ArrayLike,
+ funclist: Sequence[Any | Callable[..., Any]],
+ *args: Any,
+ **kw: Any,
+) -> NDArray[_SCT]: ...
+@overload
+def piecewise(
+ x: ArrayLike,
+ condlist: ArrayLike,
+ funclist: Sequence[Any | Callable[..., Any]],
+ *args: Any,
+ **kw: Any,
+) -> NDArray[Any]: ...
+
+def select(
+ condlist: Sequence[ArrayLike],
+ choicelist: Sequence[ArrayLike],
+ default: ArrayLike = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def copy(
+ a: _ArrayType,
+ order: _OrderKACF,
+ subok: L[True],
+) -> _ArrayType: ...
+@overload
+def copy(
+ a: _ArrayType,
+ order: _OrderKACF = ...,
+ *,
+ subok: L[True],
+) -> _ArrayType: ...
+@overload
+def copy(
+ a: _ArrayLike[_SCT],
+ order: _OrderKACF = ...,
+ subok: L[False] = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def copy(
+ a: ArrayLike,
+ order: _OrderKACF = ...,
+ subok: L[False] = ...,
+) -> NDArray[Any]: ...
+
+def gradient(
+ f: ArrayLike,
+ *varargs: ArrayLike,
+ axis: None | _ShapeLike = ...,
+ edge_order: L[1, 2] = ...,
+) -> Any: ...
+
+@overload
+def diff(
+ a: _T,
+ n: L[0],
+ axis: SupportsIndex = ...,
+ prepend: ArrayLike = ...,
+ append: ArrayLike = ...,
+) -> _T: ...
+@overload
+def diff(
+ a: ArrayLike,
+ n: int = ...,
+ axis: SupportsIndex = ...,
+ prepend: ArrayLike = ...,
+ append: ArrayLike = ...,
+) -> NDArray[Any]: ...
+
+# TODO
def interp(x, xp, fp, left=..., right=..., period=...): ...
-def angle(z, deg=...): ...
-def unwrap(p, discont = ..., axis=..., *, period=...): ...
-def sort_complex(a): ...
-def trim_zeros(filt, trim=...): ...
-def extract(condition, arr): ...
-def place(arr, mask, vals): ...
-def disp(mesg, device=..., linefeed=...): ...
+
+@overload
+def angle(z: _ArrayLikeFloat_co, deg: bool = ...) -> floating[Any]: ...
+@overload
+def angle(z: _ArrayLikeComplex_co, deg: bool = ...) -> complexfloating[Any, Any]: ...
+@overload
+def angle(z: _ArrayLikeObject_co, deg: bool = ...) -> Any: ...
+
+@overload
+def unwrap(
+ p: _ArrayLikeFloat_co,
+ discont: None | float = ...,
+ axis: int = ...,
+ *,
+ period: float = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def unwrap(
+ p: _ArrayLikeObject_co,
+ discont: None | float = ...,
+ axis: int = ...,
+ *,
+ period: float = ...,
+) -> NDArray[object_]: ...
+
+def sort_complex(a: ArrayLike) -> NDArray[complexfloating[Any, Any]]: ...
+
+def trim_zeros(
+ filt: _TrimZerosSequence[_T],
+ trim: L["f", "b", "fb", "bf"] = ...,
+) -> _T: ...
+
+@overload
+def extract(condition: ArrayLike, arr: _ArrayLike[_SCT]) -> NDArray[_SCT]: ...
+@overload
+def extract(condition: ArrayLike, arr: ArrayLike) -> NDArray[Any]: ...
+
+def place(arr: NDArray[Any], mask: ArrayLike, vals: Any) -> None: ...
+
+def disp(
+ mesg: object,
+ device: None | _SupportsWriteFlush = ...,
+ linefeed: bool = ...,
+) -> None: ...
+
def cov(m, y=..., rowvar=..., bias=..., ddof=..., fweights=..., aweights=..., *, dtype=...): ...
def corrcoef(x, y=..., rowvar=..., bias = ..., ddof = ..., *, dtype=...): ...
def blackman(M): ...
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index 5f27ea655..66110b479 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -1528,7 +1528,7 @@ class TestVectorize:
([('x',)], [('y',), ()]))
assert_equal(nfb._parse_gufunc_signature('(),(a,b,c),(d)->(d,e)'),
([(), ('a', 'b', 'c'), ('d',)], [('d', 'e')]))
-
+
# Tests to check if whitespaces are ignored
assert_equal(nfb._parse_gufunc_signature('(x )->()'), ([('x',)], [()]))
assert_equal(nfb._parse_gufunc_signature('( x , y )->( )'),
@@ -1853,35 +1853,116 @@ class TestUnwrap:
assert sm_discont.dtype == wrap_uneven.dtype
+@pytest.mark.parametrize(
+ "dtype", "O" + np.typecodes["AllInteger"] + np.typecodes["Float"]
+)
+@pytest.mark.parametrize("M", [0, 1, 10])
class TestFilterwindows:
- def test_hanning(self):
+ def test_hanning(self, dtype: str, M: int) -> None:
+ scalar = np.array(M, dtype=dtype)[()]
+
+ w = hanning(scalar)
+ if dtype == "O":
+ ref_dtype = np.float64
+ else:
+ ref_dtype = np.result_type(scalar.dtype, np.float64)
+ assert w.dtype == ref_dtype
+
# check symmetry
- w = hanning(10)
assert_equal(w, flipud(w))
+
# check known value
- assert_almost_equal(np.sum(w, axis=0), 4.500, 4)
+ if scalar < 1:
+ assert_array_equal(w, np.array([]))
+ elif scalar == 1:
+ assert_array_equal(w, np.ones(1))
+ else:
+ assert_almost_equal(np.sum(w, axis=0), 4.500, 4)
+
+ def test_hamming(self, dtype: str, M: int) -> None:
+ scalar = np.array(M, dtype=dtype)[()]
+
+ w = hamming(scalar)
+ if dtype == "O":
+ ref_dtype = np.float64
+ else:
+ ref_dtype = np.result_type(scalar.dtype, np.float64)
+ assert w.dtype == ref_dtype
+
+ # check symmetry
+ assert_equal(w, flipud(w))
+
+ # check known value
+ if scalar < 1:
+ assert_array_equal(w, np.array([]))
+ elif scalar == 1:
+ assert_array_equal(w, np.ones(1))
+ else:
+ assert_almost_equal(np.sum(w, axis=0), 4.9400, 4)
+
+ def test_bartlett(self, dtype: str, M: int) -> None:
+ scalar = np.array(M, dtype=dtype)[()]
+
+ w = bartlett(scalar)
+ if dtype == "O":
+ ref_dtype = np.float64
+ else:
+ ref_dtype = np.result_type(scalar.dtype, np.float64)
+ assert w.dtype == ref_dtype
- def test_hamming(self):
# check symmetry
- w = hamming(10)
assert_equal(w, flipud(w))
+
# check known value
- assert_almost_equal(np.sum(w, axis=0), 4.9400, 4)
+ if scalar < 1:
+ assert_array_equal(w, np.array([]))
+ elif scalar == 1:
+ assert_array_equal(w, np.ones(1))
+ else:
+ assert_almost_equal(np.sum(w, axis=0), 4.4444, 4)
+
+ def test_blackman(self, dtype: str, M: int) -> None:
+ scalar = np.array(M, dtype=dtype)[()]
+
+ w = blackman(scalar)
+ if dtype == "O":
+ ref_dtype = np.float64
+ else:
+ ref_dtype = np.result_type(scalar.dtype, np.float64)
+ assert w.dtype == ref_dtype
- def test_bartlett(self):
# check symmetry
- w = bartlett(10)
assert_equal(w, flipud(w))
+
# check known value
- assert_almost_equal(np.sum(w, axis=0), 4.4444, 4)
+ if scalar < 1:
+ assert_array_equal(w, np.array([]))
+ elif scalar == 1:
+ assert_array_equal(w, np.ones(1))
+ else:
+ assert_almost_equal(np.sum(w, axis=0), 3.7800, 4)
+
+ def test_kaiser(self, dtype: str, M: int) -> None:
+ scalar = np.array(M, dtype=dtype)[()]
+
+ w = kaiser(scalar, 0)
+ if dtype == "O":
+ ref_dtype = np.float64
+ else:
+ ref_dtype = np.result_type(scalar.dtype, np.float64)
+ assert w.dtype == ref_dtype
- def test_blackman(self):
# check symmetry
- w = blackman(10)
assert_equal(w, flipud(w))
+
# check known value
- assert_almost_equal(np.sum(w, axis=0), 3.7800, 4)
+ if scalar < 1:
+ assert_array_equal(w, np.array([]))
+ elif scalar == 1:
+ assert_array_equal(w, np.ones(1))
+ else:
+ assert_almost_equal(np.sum(w, axis=0), 10, 15)
class TestTrapz:
diff --git a/numpy/ma/core.py b/numpy/ma/core.py
index 7e2d744a3..2ff1667ba 100644
--- a/numpy/ma/core.py
+++ b/numpy/ma/core.py
@@ -68,13 +68,13 @@ __all__ = [
'masked_singleton', 'masked_values', 'masked_where', 'max', 'maximum',
'maximum_fill_value', 'mean', 'min', 'minimum', 'minimum_fill_value',
'mod', 'multiply', 'mvoid', 'ndim', 'negative', 'nomask', 'nonzero',
- 'not_equal', 'ones', 'outer', 'outerproduct', 'power', 'prod',
+ 'not_equal', 'ones', 'ones_like', 'outer', 'outerproduct', 'power', 'prod',
'product', 'ptp', 'put', 'putmask', 'ravel', 'remainder',
'repeat', 'reshape', 'resize', 'right_shift', 'round', 'round_',
'set_fill_value', 'shape', 'sin', 'sinh', 'size', 'soften_mask',
'sometrue', 'sort', 'sqrt', 'squeeze', 'std', 'subtract', 'sum',
'swapaxes', 'take', 'tan', 'tanh', 'trace', 'transpose', 'true_divide',
- 'var', 'where', 'zeros',
+ 'var', 'where', 'zeros', 'zeros_like',
]
MaskType = np.bool_
@@ -8159,8 +8159,18 @@ arange = _convert2ma(
np_ret='arange : ndarray',
np_ma_ret='arange : MaskedArray',
)
-clip = np.clip
-diff = np.diff
+clip = _convert2ma(
+ 'clip',
+ params=dict(fill_value=None, hardmask=False),
+ np_ret='clipped_array : ndarray',
+ np_ma_ret='clipped_array : MaskedArray',
+)
+diff = _convert2ma(
+ 'diff',
+ params=dict(fill_value=None, hardmask=False),
+ np_ret='diff : ndarray',
+ np_ma_ret='diff : MaskedArray',
+)
empty = _convert2ma(
'empty',
params=dict(fill_value=None, hardmask=False),
@@ -8188,22 +8198,40 @@ identity = _convert2ma(
np_ret='out : ndarray',
np_ma_ret='out : MaskedArray',
)
-indices = np.indices
+indices = _convert2ma(
+ 'indices',
+ params=dict(fill_value=None, hardmask=False),
+ np_ret='grid : one ndarray or tuple of ndarrays',
+ np_ma_ret='grid : one MaskedArray or tuple of MaskedArrays',
+)
ones = _convert2ma(
'ones',
params=dict(fill_value=None, hardmask=False),
np_ret='out : ndarray',
np_ma_ret='out : MaskedArray',
)
-ones_like = np.ones_like
-squeeze = np.squeeze
+ones_like = _convert2ma(
+ 'ones_like',
+ np_ret='out : ndarray',
+ np_ma_ret='out : MaskedArray',
+)
+squeeze = _convert2ma(
+ 'squeeze',
+ params=dict(fill_value=None, hardmask=False),
+ np_ret='squeezed : ndarray',
+ np_ma_ret='squeezed : MaskedArray',
+)
zeros = _convert2ma(
'zeros',
params=dict(fill_value=None, hardmask=False),
np_ret='out : ndarray',
np_ma_ret='out : MaskedArray',
)
-zeros_like = np.zeros_like
+zeros_like = _convert2ma(
+ 'zeros_like',
+ np_ret='out : ndarray',
+ np_ma_ret='out : MaskedArray',
+)
def append(a, b, axis=None):
diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py
index 7e9522b3a..bf95c999a 100644
--- a/numpy/ma/tests/test_core.py
+++ b/numpy/ma/tests/test_core.py
@@ -43,9 +43,9 @@ from numpy.ma.core import (
masked_less, masked_less_equal, masked_not_equal, masked_outside,
masked_print_option, masked_values, masked_where, max, maximum,
maximum_fill_value, min, minimum, minimum_fill_value, mod, multiply,
- mvoid, nomask, not_equal, ones, outer, power, product, put, putmask,
- ravel, repeat, reshape, resize, shape, sin, sinh, sometrue, sort, sqrt,
- subtract, sum, take, tan, tanh, transpose, where, zeros,
+ mvoid, nomask, not_equal, ones, ones_like, outer, power, product, put,
+ putmask, ravel, repeat, reshape, resize, shape, sin, sinh, sometrue, sort,
+ sqrt, subtract, sum, take, tan, tanh, transpose, where, zeros, zeros_like,
)
from numpy.compat import pickle
@@ -3229,6 +3229,50 @@ class TestMaskedArrayMethods:
b = a.view(masked_array)
assert_(np.may_share_memory(a.mask, b.mask))
+ def test_zeros(self):
+ # Tests zeros/like
+ datatype = [('a', int), ('b', float), ('c', '|S8')]
+ a = masked_array([(1, 1.1, '1.1'), (2, 2.2, '2.2'), (3, 3.3, '3.3')],
+ dtype=datatype)
+ assert_equal(len(a.fill_value.item()), len(datatype))
+
+ b = zeros(len(a), dtype=datatype)
+ assert_equal(b.shape, a.shape)
+ assert_equal(b.fill_value, a.fill_value)
+
+ b = zeros_like(a)
+ assert_equal(b.shape, a.shape)
+ assert_equal(b.fill_value, a.fill_value)
+
+ # check zeros_like mask handling
+ a = masked_array([1, 2, 3], mask=[False, True, False])
+ b = zeros_like(a)
+ assert_(not np.may_share_memory(a.mask, b.mask))
+ b = a.view()
+ assert_(np.may_share_memory(a.mask, b.mask))
+
+ def test_ones(self):
+ # Tests ones/like
+ datatype = [('a', int), ('b', float), ('c', '|S8')]
+ a = masked_array([(1, 1.1, '1.1'), (2, 2.2, '2.2'), (3, 3.3, '3.3')],
+ dtype=datatype)
+ assert_equal(len(a.fill_value.item()), len(datatype))
+
+ b = ones(len(a), dtype=datatype)
+ assert_equal(b.shape, a.shape)
+ assert_equal(b.fill_value, a.fill_value)
+
+ b = ones_like(a)
+ assert_equal(b.shape, a.shape)
+ assert_equal(b.fill_value, a.fill_value)
+
+ # check ones_like mask handling
+ a = masked_array([1, 2, 3], mask=[False, True, False])
+ b = ones_like(a)
+ assert_(not np.may_share_memory(a.mask, b.mask))
+ b = a.view()
+ assert_(np.may_share_memory(a.mask, b.mask))
+
@suppress_copy_mask_on_assignment
def test_put(self):
# Tests put.
diff --git a/numpy/random/src/distributions/random_hypergeometric.c b/numpy/random/src/distributions/random_hypergeometric.c
index 0da49bd62..d8510bfca 100644
--- a/numpy/random/src/distributions/random_hypergeometric.c
+++ b/numpy/random/src/distributions/random_hypergeometric.c
@@ -155,7 +155,7 @@ static int64_t hypergeometric_hrua(bitgen_t *bitgen_state,
c = sqrt(var + 0.5);
/*
- * h is 2*s_hat (See Stadlober's theses (1989), Eq. (5.17); or
+ * h is 2*s_hat (See Stadlober's thesis (1989), Eq. (5.17); or
* Stadlober (1990), Eq. 8). s_hat is the scale of the "table mountain"
* function that dominates the scaled hypergeometric PMF ("scaled" means
* normalized to have a maximum value of 1).
diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py
index 1e366eb34..d5cfbf5ac 100644
--- a/numpy/typing/__init__.py
+++ b/numpy/typing/__init__.py
@@ -114,8 +114,9 @@ runtime, they're not necessarily considered as sub-classes.
Timedelta64
~~~~~~~~~~~
-The `~numpy.timedelta64` class is not considered a subclass of `~numpy.signedinteger`,
-the former only inheriting from `~numpy.generic` while static type checking.
+The `~numpy.timedelta64` class is not considered a subclass of
+`~numpy.signedinteger`, the former only inheriting from `~numpy.generic`
+while static type checking.
0D arrays
~~~~~~~~~
@@ -154,8 +155,10 @@ API
# NOTE: The API section will be appended with additional entries
# further down in this file
+from __future__ import annotations
+
from numpy import ufunc
-from typing import TYPE_CHECKING, List, final
+from typing import TYPE_CHECKING, final
if not TYPE_CHECKING:
__all__ = ["ArrayLike", "DTypeLike", "NBitBase", "NDArray"]
@@ -166,14 +169,14 @@ else:
#
# Declare to mypy that `__all__` is a list of strings without assigning
# an explicit value
- __all__: List[str]
- __path__: List[str]
+ __all__: list[str]
+ __path__: list[str]
@final # Disallow the creation of arbitrary `NBitBase` subclasses
class NBitBase:
"""
- An object representing `numpy.number` precision during static type checking.
+ A type representing `numpy.number` precision during static type checking.
Used exclusively for the purpose static type checking, `NBitBase`
represents the base of a hierarchical set of subclasses.
@@ -184,9 +187,9 @@ class NBitBase:
Examples
--------
- Below is a typical usage example: `NBitBase` is herein used for annotating a
- function that takes a float and integer of arbitrary precision as arguments
- and returns a new float of whichever precision is largest
+ Below is a typical usage example: `NBitBase` is herein used for annotating
+ a function that takes a float and integer of arbitrary precision
+ as arguments and returns a new float of whichever precision is largest
(*e.g.* ``np.float16 + np.int64 -> np.float64``).
.. code-block:: python
@@ -226,14 +229,29 @@ class NBitBase:
# Silence errors about subclassing a `@final`-decorated class
-class _256Bit(NBitBase): ... # type: ignore[misc]
-class _128Bit(_256Bit): ... # type: ignore[misc]
-class _96Bit(_128Bit): ... # type: ignore[misc]
-class _80Bit(_96Bit): ... # type: ignore[misc]
-class _64Bit(_80Bit): ... # type: ignore[misc]
-class _32Bit(_64Bit): ... # type: ignore[misc]
-class _16Bit(_32Bit): ... # type: ignore[misc]
-class _8Bit(_16Bit): ... # type: ignore[misc]
+class _256Bit(NBitBase): # type: ignore[misc]
+ pass
+
+class _128Bit(_256Bit): # type: ignore[misc]
+ pass
+
+class _96Bit(_128Bit): # type: ignore[misc]
+ pass
+
+class _80Bit(_96Bit): # type: ignore[misc]
+ pass
+
+class _64Bit(_80Bit): # type: ignore[misc]
+ pass
+
+class _32Bit(_64Bit): # type: ignore[misc]
+ pass
+
+class _16Bit(_32Bit): # type: ignore[misc]
+ pass
+
+class _8Bit(_16Bit): # type: ignore[misc]
+ pass
from ._nested_sequence import _NestedSequence
@@ -363,7 +381,7 @@ else:
_GUFunc_Nin2_Nout1 = ufunc
# Clean up the namespace
-del TYPE_CHECKING, final, List, ufunc
+del TYPE_CHECKING, final, ufunc
if __doc__ is not None:
from ._add_docstring import _docstrings
diff --git a/numpy/typing/_add_docstring.py b/numpy/typing/_add_docstring.py
index 846b67042..10d77f516 100644
--- a/numpy/typing/_add_docstring.py
+++ b/numpy/typing/_add_docstring.py
@@ -50,16 +50,17 @@ def _parse_docstrings() -> str:
new_lines.append("")
else:
new_lines.append(f"{indent}{line}")
- s = "\n".join(new_lines)
- # Done.
- type_list_ret.append(f""".. data:: {name}\n :value: {value}\n {s}""")
+ s = "\n".join(new_lines)
+ s_block = f""".. data:: {name}\n :value: {value}\n {s}"""
+ type_list_ret.append(s_block)
return "\n".join(type_list_ret)
add_newdoc('ArrayLike', 'typing.Union[...]',
"""
- A `~typing.Union` representing objects that can be coerced into an `~numpy.ndarray`.
+ A `~typing.Union` representing objects that can be coerced
+ into an `~numpy.ndarray`.
Among others this includes the likes of:
@@ -88,7 +89,8 @@ add_newdoc('ArrayLike', 'typing.Union[...]',
add_newdoc('DTypeLike', 'typing.Union[...]',
"""
- A `~typing.Union` representing objects that can be coerced into a `~numpy.dtype`.
+ A `~typing.Union` representing objects that can be coerced
+ into a `~numpy.dtype`.
Among others this includes the likes of:
@@ -101,7 +103,8 @@ add_newdoc('DTypeLike', 'typing.Union[...]',
See Also
--------
:ref:`Specifying and constructing data types <arrays.dtypes.constructing>`
- A comprehensive overview of all objects that can be coerced into data types.
+ A comprehensive overview of all objects that can be coerced
+ into data types.
Examples
--------
diff --git a/numpy/typing/_callable.py b/numpy/typing/_callable.pyi
index 44ad5c291..e1149f26a 100644
--- a/numpy/typing/_callable.py
+++ b/numpy/typing/_callable.pyi
@@ -49,6 +49,8 @@ from ._generic_alias import NDArray
_T1 = TypeVar("_T1")
_T2 = TypeVar("_T2")
+_T1_contra = TypeVar("_T1_contra", contravariant=True)
+_T2_contra = TypeVar("_T2_contra", contravariant=True)
_2Tuple = Tuple[_T1, _T1]
_NBit1 = TypeVar("_NBit1", bound=NBitBase)
@@ -318,8 +320,8 @@ class _ComplexOp(Protocol[_NBit1]):
class _NumberOp(Protocol):
def __call__(self, other: _NumberLike_co, /) -> Any: ...
-class _ComparisonOp(Protocol[_T1, _T2]):
+class _ComparisonOp(Protocol[_T1_contra, _T2_contra]):
@overload
- def __call__(self, other: _T1, /) -> bool_: ...
+ def __call__(self, other: _T1_contra, /) -> bool_: ...
@overload
- def __call__(self, other: _T2, /) -> NDArray[bool_]: ...
+ def __call__(self, other: _T2_contra, /) -> NDArray[bool_]: ...
diff --git a/numpy/typing/_dtype_like.py b/numpy/typing/_dtype_like.py
index 0955f5b18..c9bf1a137 100644
--- a/numpy/typing/_dtype_like.py
+++ b/numpy/typing/_dtype_like.py
@@ -1,4 +1,14 @@
-from typing import Any, List, Sequence, Tuple, Union, Type, TypeVar, Protocol, TypedDict
+from typing import (
+ Any,
+ List,
+ Sequence,
+ Tuple,
+ Union,
+ Type,
+ TypeVar,
+ Protocol,
+ TypedDict,
+)
import numpy as np
@@ -55,18 +65,23 @@ class _DTypeDictBase(TypedDict):
names: Sequence[str]
formats: Sequence[_DTypeLikeNested]
+
# Mandatory + optional keys
class _DTypeDict(_DTypeDictBase, total=False):
+ # Only `str` elements are usable as indexing aliases,
+ # but `titles` can in principle accept any object
offsets: Sequence[int]
- titles: Sequence[Any] # Only `str` elements are usable as indexing aliases, but all objects are legal
+ titles: Sequence[Any]
itemsize: int
aligned: bool
+
# A protocol for anything with the dtype attribute
class _SupportsDType(Protocol[_DType_co]):
@property
def dtype(self) -> _DType_co: ...
+
# Would create a dtype[np.void]
_VoidDTypeLike = Union[
# (flexible_dtype, itemsize)
@@ -93,7 +108,7 @@ DTypeLike = Union[
# default data type (float64)
None,
# array-scalar types and generic types
- Type[Any], # TODO: enumerate these when we add type hints for numpy scalars
+ Type[Any], # NOTE: We're stuck with `Type[Any]` due to object dtypes
# anything with a dtype attribute
_SupportsDType[DType[Any]],
# character codes, type strings or comma-separated fields, e.g., 'float64'
diff --git a/numpy/typing/_extended_precision.py b/numpy/typing/_extended_precision.py
index 0900bc659..edc1778ce 100644
--- a/numpy/typing/_extended_precision.py
+++ b/numpy/typing/_extended_precision.py
@@ -1,4 +1,5 @@
-"""A module with platform-specific extended precision `numpy.number` subclasses.
+"""A module with platform-specific extended precision
+`numpy.number` subclasses.
The subclasses are defined here (instead of ``__init__.pyi``) such
that they can be imported conditionally via the numpy's mypy plugin.
diff --git a/numpy/typing/mypy_plugin.py b/numpy/typing/mypy_plugin.py
index 091980d65..5ac75f94d 100644
--- a/numpy/typing/mypy_plugin.py
+++ b/numpy/typing/mypy_plugin.py
@@ -33,7 +33,8 @@ To enable the plugin, one must add it to their mypy `configuration file`_:
from __future__ import annotations
-import typing as t
+from collections.abc import Iterable
+from typing import Final, TYPE_CHECKING, Callable
import numpy as np
@@ -44,15 +45,15 @@ try:
from mypy.nodes import MypyFile, ImportFrom, Statement
from mypy.build import PRI_MED
- _HookFunc = t.Callable[[AnalyzeTypeContext], Type]
+ _HookFunc = Callable[[AnalyzeTypeContext], Type]
MYPY_EX: None | ModuleNotFoundError = None
except ModuleNotFoundError as ex:
MYPY_EX = ex
-__all__: t.List[str] = []
+__all__: list[str] = []
-def _get_precision_dict() -> t.Dict[str, str]:
+def _get_precision_dict() -> dict[str, str]:
names = [
("_NBitByte", np.byte),
("_NBitShort", np.short),
@@ -73,7 +74,7 @@ def _get_precision_dict() -> t.Dict[str, str]:
return ret
-def _get_extended_precision_list() -> t.List[str]:
+def _get_extended_precision_list() -> list[str]:
extended_types = [np.ulonglong, np.longlong, np.longdouble, np.clongdouble]
extended_names = {
"uint128",
@@ -107,13 +108,13 @@ def _get_c_intp_name() -> str:
#: A dictionary mapping type-aliases in `numpy.typing._nbit` to
#: concrete `numpy.typing.NBitBase` subclasses.
-_PRECISION_DICT: t.Final = _get_precision_dict()
+_PRECISION_DICT: Final = _get_precision_dict()
#: A list with the names of all extended precision `np.number` subclasses.
-_EXTENDED_PRECISION_LIST: t.Final = _get_extended_precision_list()
+_EXTENDED_PRECISION_LIST: Final = _get_extended_precision_list()
#: The name of the ctypes quivalent of `np.intp`
-_C_INTP: t.Final = _get_c_intp_name()
+_C_INTP: Final = _get_c_intp_name()
def _hook(ctx: AnalyzeTypeContext) -> Type:
@@ -124,20 +125,19 @@ def _hook(ctx: AnalyzeTypeContext) -> Type:
return api.named_type(name_new)
-if t.TYPE_CHECKING or MYPY_EX is None:
- def _index(iterable: t.Iterable[Statement], id: str) -> int:
+if TYPE_CHECKING or MYPY_EX is None:
+ def _index(iterable: Iterable[Statement], id: str) -> int:
"""Identify the first ``ImportFrom`` instance the specified `id`."""
for i, value in enumerate(iterable):
if getattr(value, "id", None) == id:
return i
- else:
- raise ValueError("Failed to identify a `ImportFrom` instance "
- f"with the following id: {id!r}")
+ raise ValueError("Failed to identify a `ImportFrom` instance "
+ f"with the following id: {id!r}")
def _override_imports(
file: MypyFile,
module: str,
- imports: t.List[t.Tuple[str, t.Optional[str]]],
+ imports: list[tuple[str, None | str]],
) -> None:
"""Override the first `module`-based import with new `imports`."""
# Construct a new `from module import y` statement
@@ -145,7 +145,7 @@ if t.TYPE_CHECKING or MYPY_EX is None:
import_obj.is_top_level = True
# Replace the first `module`-based import statement with `import_obj`
- for lst in [file.defs, file.imports]: # type: t.List[Statement]
+ for lst in [file.defs, file.imports]: # type: list[Statement]
i = _index(lst, module)
lst[i] = import_obj
@@ -153,7 +153,8 @@ if t.TYPE_CHECKING or MYPY_EX is None:
"""A mypy plugin for handling versus numpy-specific typing tasks."""
def get_type_analyze_hook(self, fullname: str) -> None | _HookFunc:
- """Set the precision of platform-specific `numpy.number` subclasses.
+ """Set the precision of platform-specific `numpy.number`
+ subclasses.
For example: `numpy.int_`, `numpy.longlong` and `numpy.longdouble`.
"""
@@ -161,7 +162,9 @@ if t.TYPE_CHECKING or MYPY_EX is None:
return _hook
return None
- def get_additional_deps(self, file: MypyFile) -> t.List[t.Tuple[int, str, int]]:
+ def get_additional_deps(
+ self, file: MypyFile
+ ) -> list[tuple[int, str, int]]:
"""Handle all import-based overrides.
* Import platform-specific extended-precision `numpy.number`
@@ -184,11 +187,11 @@ if t.TYPE_CHECKING or MYPY_EX is None:
)
return ret
- def plugin(version: str) -> t.Type[_NumpyPlugin]:
+ def plugin(version: str) -> type[_NumpyPlugin]:
"""An entry-point for mypy."""
return _NumpyPlugin
else:
- def plugin(version: str) -> t.Type[_NumpyPlugin]:
+ def plugin(version: str) -> type[_NumpyPlugin]:
"""An entry-point for mypy."""
raise MYPY_EX
diff --git a/numpy/typing/tests/data/fail/arithmetic.py b/numpy/typing/tests/data/fail/arithmetic.pyi
index 02bbffa53..02bbffa53 100644
--- a/numpy/typing/tests/data/fail/arithmetic.py
+++ b/numpy/typing/tests/data/fail/arithmetic.pyi
diff --git a/numpy/typing/tests/data/fail/array_constructors.py b/numpy/typing/tests/data/fail/array_constructors.pyi
index 0e2250513..0e2250513 100644
--- a/numpy/typing/tests/data/fail/array_constructors.py
+++ b/numpy/typing/tests/data/fail/array_constructors.pyi
diff --git a/numpy/typing/tests/data/fail/array_like.py b/numpy/typing/tests/data/fail/array_like.pyi
index 3bbd29061..3bbd29061 100644
--- a/numpy/typing/tests/data/fail/array_like.py
+++ b/numpy/typing/tests/data/fail/array_like.pyi
diff --git a/numpy/typing/tests/data/fail/array_pad.py b/numpy/typing/tests/data/fail/array_pad.pyi
index 2be51a871..2be51a871 100644
--- a/numpy/typing/tests/data/fail/array_pad.py
+++ b/numpy/typing/tests/data/fail/array_pad.pyi
diff --git a/numpy/typing/tests/data/fail/arrayprint.py b/numpy/typing/tests/data/fail/arrayprint.pyi
index 86297a0b2..86297a0b2 100644
--- a/numpy/typing/tests/data/fail/arrayprint.py
+++ b/numpy/typing/tests/data/fail/arrayprint.pyi
diff --git a/numpy/typing/tests/data/fail/arrayterator.py b/numpy/typing/tests/data/fail/arrayterator.pyi
index c50fb2ec4..c50fb2ec4 100644
--- a/numpy/typing/tests/data/fail/arrayterator.py
+++ b/numpy/typing/tests/data/fail/arrayterator.pyi
diff --git a/numpy/typing/tests/data/fail/bitwise_ops.py b/numpy/typing/tests/data/fail/bitwise_ops.pyi
index ee9090007..ee9090007 100644
--- a/numpy/typing/tests/data/fail/bitwise_ops.py
+++ b/numpy/typing/tests/data/fail/bitwise_ops.pyi
diff --git a/numpy/typing/tests/data/fail/char.py b/numpy/typing/tests/data/fail/char.pyi
index 320f05df5..320f05df5 100644
--- a/numpy/typing/tests/data/fail/char.py
+++ b/numpy/typing/tests/data/fail/char.pyi
diff --git a/numpy/typing/tests/data/fail/comparisons.py b/numpy/typing/tests/data/fail/comparisons.pyi
index 0432177e2..0432177e2 100644
--- a/numpy/typing/tests/data/fail/comparisons.py
+++ b/numpy/typing/tests/data/fail/comparisons.pyi
diff --git a/numpy/typing/tests/data/fail/constants.py b/numpy/typing/tests/data/fail/constants.pyi
index 324cbe9fa..324cbe9fa 100644
--- a/numpy/typing/tests/data/fail/constants.py
+++ b/numpy/typing/tests/data/fail/constants.pyi
diff --git a/numpy/typing/tests/data/fail/datasource.py b/numpy/typing/tests/data/fail/datasource.pyi
index 345277d45..345277d45 100644
--- a/numpy/typing/tests/data/fail/datasource.py
+++ b/numpy/typing/tests/data/fail/datasource.pyi
diff --git a/numpy/typing/tests/data/fail/dtype.py b/numpy/typing/tests/data/fail/dtype.pyi
index 0f3810f3c..0f3810f3c 100644
--- a/numpy/typing/tests/data/fail/dtype.py
+++ b/numpy/typing/tests/data/fail/dtype.pyi
diff --git a/numpy/typing/tests/data/fail/einsumfunc.py b/numpy/typing/tests/data/fail/einsumfunc.pyi
index 33722f861..33722f861 100644
--- a/numpy/typing/tests/data/fail/einsumfunc.py
+++ b/numpy/typing/tests/data/fail/einsumfunc.pyi
diff --git a/numpy/typing/tests/data/fail/flatiter.py b/numpy/typing/tests/data/fail/flatiter.pyi
index 544ffbe4a..544ffbe4a 100644
--- a/numpy/typing/tests/data/fail/flatiter.py
+++ b/numpy/typing/tests/data/fail/flatiter.pyi
diff --git a/numpy/typing/tests/data/fail/fromnumeric.py b/numpy/typing/tests/data/fail/fromnumeric.pyi
index 8fafed1b7..8fafed1b7 100644
--- a/numpy/typing/tests/data/fail/fromnumeric.py
+++ b/numpy/typing/tests/data/fail/fromnumeric.pyi
diff --git a/numpy/typing/tests/data/fail/index_tricks.py b/numpy/typing/tests/data/fail/index_tricks.pyi
index c508bf3ae..c508bf3ae 100644
--- a/numpy/typing/tests/data/fail/index_tricks.py
+++ b/numpy/typing/tests/data/fail/index_tricks.pyi
diff --git a/numpy/typing/tests/data/fail/lib_function_base.pyi b/numpy/typing/tests/data/fail/lib_function_base.pyi
new file mode 100644
index 000000000..019bd7f01
--- /dev/null
+++ b/numpy/typing/tests/data/fail/lib_function_base.pyi
@@ -0,0 +1,19 @@
+from typing import Any
+
+import numpy as np
+import numpy.typing as npt
+
+AR_m: npt.NDArray[np.timedelta64]
+AR_f8: npt.NDArray[np.float64]
+AR_c16: npt.NDArray[np.complex128]
+
+np.average(AR_m) # E: incompatible type
+np.select(1, [AR_f8]) # E: incompatible type
+np.angle(AR_m) # E: incompatible type
+np.unwrap(AR_m) # E: incompatible type
+np.unwrap(AR_c16) # E: incompatible type
+np.trim_zeros(1) # E: incompatible type
+np.place(1, [True], 1.5) # E: incompatible type
+np.vectorize(1) # E: incompatible type
+np.add_newdoc("__main__", 1.5, "docstring") # E: incompatible type
+np.place(AR_f8, slice(None), 5) # E: incompatible type
diff --git a/numpy/typing/tests/data/fail/lib_utils.py b/numpy/typing/tests/data/fail/lib_utils.pyi
index e16c926aa..e16c926aa 100644
--- a/numpy/typing/tests/data/fail/lib_utils.py
+++ b/numpy/typing/tests/data/fail/lib_utils.pyi
diff --git a/numpy/typing/tests/data/fail/lib_version.py b/numpy/typing/tests/data/fail/lib_version.pyi
index 2758cfe40..2758cfe40 100644
--- a/numpy/typing/tests/data/fail/lib_version.py
+++ b/numpy/typing/tests/data/fail/lib_version.pyi
diff --git a/numpy/typing/tests/data/fail/linalg.py b/numpy/typing/tests/data/fail/linalg.pyi
index da9390328..da9390328 100644
--- a/numpy/typing/tests/data/fail/linalg.py
+++ b/numpy/typing/tests/data/fail/linalg.pyi
diff --git a/numpy/typing/tests/data/fail/memmap.py b/numpy/typing/tests/data/fail/memmap.pyi
index 434870b60..434870b60 100644
--- a/numpy/typing/tests/data/fail/memmap.py
+++ b/numpy/typing/tests/data/fail/memmap.pyi
diff --git a/numpy/typing/tests/data/fail/modules.py b/numpy/typing/tests/data/fail/modules.pyi
index 59e724f22..59e724f22 100644
--- a/numpy/typing/tests/data/fail/modules.py
+++ b/numpy/typing/tests/data/fail/modules.pyi
diff --git a/numpy/typing/tests/data/fail/multiarray.py b/numpy/typing/tests/data/fail/multiarray.pyi
index 22bcf8c92..22bcf8c92 100644
--- a/numpy/typing/tests/data/fail/multiarray.py
+++ b/numpy/typing/tests/data/fail/multiarray.pyi
diff --git a/numpy/typing/tests/data/fail/ndarray.py b/numpy/typing/tests/data/fail/ndarray.pyi
index 5a5130d40..5a5130d40 100644
--- a/numpy/typing/tests/data/fail/ndarray.py
+++ b/numpy/typing/tests/data/fail/ndarray.pyi
diff --git a/numpy/typing/tests/data/fail/ndarray_misc.py b/numpy/typing/tests/data/fail/ndarray_misc.pyi
index cf3fedc45..8320a44f3 100644
--- a/numpy/typing/tests/data/fail/ndarray_misc.py
+++ b/numpy/typing/tests/data/fail/ndarray_misc.pyi
@@ -35,3 +35,7 @@ AR_M.__int__() # E: Invalid self argument
AR_M.__float__() # E: Invalid self argument
AR_M.__complex__() # E: Invalid self argument
AR_b.__index__() # E: Invalid self argument
+
+AR_f8[1.5] # E: No overload variant
+AR_f8["field_a"] # E: No overload variant
+AR_f8[["field_a", "field_b"]] # E: Invalid index type
diff --git a/numpy/typing/tests/data/fail/nditer.py b/numpy/typing/tests/data/fail/nditer.pyi
index 1e8e37ee5..1e8e37ee5 100644
--- a/numpy/typing/tests/data/fail/nditer.py
+++ b/numpy/typing/tests/data/fail/nditer.pyi
diff --git a/numpy/typing/tests/data/fail/nested_sequence.py b/numpy/typing/tests/data/fail/nested_sequence.pyi
index e28661a05..e28661a05 100644
--- a/numpy/typing/tests/data/fail/nested_sequence.py
+++ b/numpy/typing/tests/data/fail/nested_sequence.pyi
diff --git a/numpy/typing/tests/data/fail/npyio.py b/numpy/typing/tests/data/fail/npyio.pyi
index c91b4c9cb..c91b4c9cb 100644
--- a/numpy/typing/tests/data/fail/npyio.py
+++ b/numpy/typing/tests/data/fail/npyio.pyi
diff --git a/numpy/typing/tests/data/fail/numerictypes.py b/numpy/typing/tests/data/fail/numerictypes.pyi
index a5c2814ef..a5c2814ef 100644
--- a/numpy/typing/tests/data/fail/numerictypes.py
+++ b/numpy/typing/tests/data/fail/numerictypes.pyi
diff --git a/numpy/typing/tests/data/fail/random.py b/numpy/typing/tests/data/fail/random.pyi
index c4d1e3e3e..c4d1e3e3e 100644
--- a/numpy/typing/tests/data/fail/random.py
+++ b/numpy/typing/tests/data/fail/random.pyi
diff --git a/numpy/typing/tests/data/fail/rec.py b/numpy/typing/tests/data/fail/rec.pyi
index a57f1ba27..a57f1ba27 100644
--- a/numpy/typing/tests/data/fail/rec.py
+++ b/numpy/typing/tests/data/fail/rec.pyi
diff --git a/numpy/typing/tests/data/fail/scalars.py b/numpy/typing/tests/data/fail/scalars.pyi
index 94fe3f71e..94fe3f71e 100644
--- a/numpy/typing/tests/data/fail/scalars.py
+++ b/numpy/typing/tests/data/fail/scalars.pyi
diff --git a/numpy/typing/tests/data/fail/stride_tricks.py b/numpy/typing/tests/data/fail/stride_tricks.pyi
index f2bfba743..f2bfba743 100644
--- a/numpy/typing/tests/data/fail/stride_tricks.py
+++ b/numpy/typing/tests/data/fail/stride_tricks.pyi
diff --git a/numpy/typing/tests/data/fail/testing.py b/numpy/typing/tests/data/fail/testing.pyi
index e753a9810..e753a9810 100644
--- a/numpy/typing/tests/data/fail/testing.py
+++ b/numpy/typing/tests/data/fail/testing.pyi
diff --git a/numpy/typing/tests/data/fail/twodim_base.py b/numpy/typing/tests/data/fail/twodim_base.pyi
index ab34a374c..ab34a374c 100644
--- a/numpy/typing/tests/data/fail/twodim_base.py
+++ b/numpy/typing/tests/data/fail/twodim_base.pyi
diff --git a/numpy/typing/tests/data/fail/type_check.py b/numpy/typing/tests/data/fail/type_check.pyi
index 95f52bfbd..95f52bfbd 100644
--- a/numpy/typing/tests/data/fail/type_check.py
+++ b/numpy/typing/tests/data/fail/type_check.pyi
diff --git a/numpy/typing/tests/data/fail/ufunc_config.py b/numpy/typing/tests/data/fail/ufunc_config.pyi
index f547fbb46..f547fbb46 100644
--- a/numpy/typing/tests/data/fail/ufunc_config.py
+++ b/numpy/typing/tests/data/fail/ufunc_config.pyi
diff --git a/numpy/typing/tests/data/fail/ufunclike.py b/numpy/typing/tests/data/fail/ufunclike.pyi
index 82a5f3a1d..82a5f3a1d 100644
--- a/numpy/typing/tests/data/fail/ufunclike.py
+++ b/numpy/typing/tests/data/fail/ufunclike.pyi
diff --git a/numpy/typing/tests/data/fail/ufuncs.py b/numpy/typing/tests/data/fail/ufuncs.pyi
index e827267c6..e827267c6 100644
--- a/numpy/typing/tests/data/fail/ufuncs.py
+++ b/numpy/typing/tests/data/fail/ufuncs.pyi
diff --git a/numpy/typing/tests/data/fail/warnings_and_errors.py b/numpy/typing/tests/data/fail/warnings_and_errors.pyi
index f4fa38293..f4fa38293 100644
--- a/numpy/typing/tests/data/fail/warnings_and_errors.py
+++ b/numpy/typing/tests/data/fail/warnings_and_errors.pyi
diff --git a/numpy/typing/tests/data/misc/extended_precision.py b/numpy/typing/tests/data/misc/extended_precision.pyi
index 1e495e4f3..1e495e4f3 100644
--- a/numpy/typing/tests/data/misc/extended_precision.py
+++ b/numpy/typing/tests/data/misc/extended_precision.pyi
diff --git a/numpy/typing/tests/data/reveal/arithmetic.py b/numpy/typing/tests/data/reveal/arithmetic.pyi
index 0d9132e5b..0d9132e5b 100644
--- a/numpy/typing/tests/data/reveal/arithmetic.py
+++ b/numpy/typing/tests/data/reveal/arithmetic.pyi
diff --git a/numpy/typing/tests/data/reveal/array_constructors.py b/numpy/typing/tests/data/reveal/array_constructors.pyi
index 44c85e988..44c85e988 100644
--- a/numpy/typing/tests/data/reveal/array_constructors.py
+++ b/numpy/typing/tests/data/reveal/array_constructors.pyi
diff --git a/numpy/typing/tests/data/reveal/arraypad.py b/numpy/typing/tests/data/reveal/arraypad.pyi
index 03c03fb4e..03c03fb4e 100644
--- a/numpy/typing/tests/data/reveal/arraypad.py
+++ b/numpy/typing/tests/data/reveal/arraypad.pyi
diff --git a/numpy/typing/tests/data/reveal/arrayprint.py b/numpy/typing/tests/data/reveal/arrayprint.pyi
index e797097eb..e797097eb 100644
--- a/numpy/typing/tests/data/reveal/arrayprint.py
+++ b/numpy/typing/tests/data/reveal/arrayprint.pyi
diff --git a/numpy/typing/tests/data/reveal/arraysetops.py b/numpy/typing/tests/data/reveal/arraysetops.pyi
index c8aeb03ab..c8aeb03ab 100644
--- a/numpy/typing/tests/data/reveal/arraysetops.py
+++ b/numpy/typing/tests/data/reveal/arraysetops.pyi
diff --git a/numpy/typing/tests/data/reveal/arrayterator.py b/numpy/typing/tests/data/reveal/arrayterator.pyi
index ea4e75612..ea4e75612 100644
--- a/numpy/typing/tests/data/reveal/arrayterator.py
+++ b/numpy/typing/tests/data/reveal/arrayterator.pyi
diff --git a/numpy/typing/tests/data/reveal/bitwise_ops.py b/numpy/typing/tests/data/reveal/bitwise_ops.pyi
index 6b9969568..6b9969568 100644
--- a/numpy/typing/tests/data/reveal/bitwise_ops.py
+++ b/numpy/typing/tests/data/reveal/bitwise_ops.pyi
diff --git a/numpy/typing/tests/data/reveal/char.py b/numpy/typing/tests/data/reveal/char.pyi
index dd2e76a2d..dd2e76a2d 100644
--- a/numpy/typing/tests/data/reveal/char.py
+++ b/numpy/typing/tests/data/reveal/char.pyi
diff --git a/numpy/typing/tests/data/reveal/comparisons.py b/numpy/typing/tests/data/reveal/comparisons.pyi
index 16f21cc39..16f21cc39 100644
--- a/numpy/typing/tests/data/reveal/comparisons.py
+++ b/numpy/typing/tests/data/reveal/comparisons.pyi
diff --git a/numpy/typing/tests/data/reveal/constants.py b/numpy/typing/tests/data/reveal/constants.pyi
index 9a46bfded..9a46bfded 100644
--- a/numpy/typing/tests/data/reveal/constants.py
+++ b/numpy/typing/tests/data/reveal/constants.pyi
diff --git a/numpy/typing/tests/data/reveal/ctypeslib.py b/numpy/typing/tests/data/reveal/ctypeslib.pyi
index 0c32d70ed..0c32d70ed 100644
--- a/numpy/typing/tests/data/reveal/ctypeslib.py
+++ b/numpy/typing/tests/data/reveal/ctypeslib.pyi
diff --git a/numpy/typing/tests/data/reveal/datasource.py b/numpy/typing/tests/data/reveal/datasource.pyi
index 245ac7649..245ac7649 100644
--- a/numpy/typing/tests/data/reveal/datasource.py
+++ b/numpy/typing/tests/data/reveal/datasource.pyi
diff --git a/numpy/typing/tests/data/reveal/dtype.py b/numpy/typing/tests/data/reveal/dtype.pyi
index 364d1dcab..364d1dcab 100644
--- a/numpy/typing/tests/data/reveal/dtype.py
+++ b/numpy/typing/tests/data/reveal/dtype.pyi
diff --git a/numpy/typing/tests/data/reveal/einsumfunc.py b/numpy/typing/tests/data/reveal/einsumfunc.pyi
index f1a90428d..f1a90428d 100644
--- a/numpy/typing/tests/data/reveal/einsumfunc.py
+++ b/numpy/typing/tests/data/reveal/einsumfunc.pyi
diff --git a/numpy/typing/tests/data/reveal/flatiter.py b/numpy/typing/tests/data/reveal/flatiter.pyi
index 97776dd9f..97776dd9f 100644
--- a/numpy/typing/tests/data/reveal/flatiter.py
+++ b/numpy/typing/tests/data/reveal/flatiter.pyi
diff --git a/numpy/typing/tests/data/reveal/fromnumeric.py b/numpy/typing/tests/data/reveal/fromnumeric.pyi
index bbcfbb85a..bbcfbb85a 100644
--- a/numpy/typing/tests/data/reveal/fromnumeric.py
+++ b/numpy/typing/tests/data/reveal/fromnumeric.pyi
diff --git a/numpy/typing/tests/data/reveal/getlimits.py b/numpy/typing/tests/data/reveal/getlimits.pyi
index e12723bfe..e12723bfe 100644
--- a/numpy/typing/tests/data/reveal/getlimits.py
+++ b/numpy/typing/tests/data/reveal/getlimits.pyi
diff --git a/numpy/typing/tests/data/reveal/index_tricks.py b/numpy/typing/tests/data/reveal/index_tricks.pyi
index 863d60220..863d60220 100644
--- a/numpy/typing/tests/data/reveal/index_tricks.py
+++ b/numpy/typing/tests/data/reveal/index_tricks.pyi
diff --git a/numpy/typing/tests/data/reveal/lib_function_base.pyi b/numpy/typing/tests/data/reveal/lib_function_base.pyi
new file mode 100644
index 000000000..76d54c49f
--- /dev/null
+++ b/numpy/typing/tests/data/reveal/lib_function_base.pyi
@@ -0,0 +1,99 @@
+from typing import Any
+
+import numpy as np
+import numpy.typing as npt
+
+vectorized_func: np.vectorize
+
+f8: np.float64
+AR_LIKE_f8: list[float]
+
+AR_i8: npt.NDArray[np.int64]
+AR_f8: npt.NDArray[np.float64]
+AR_c16: npt.NDArray[np.complex128]
+AR_O: npt.NDArray[np.object_]
+AR_b: npt.NDArray[np.bool_]
+AR_U: npt.NDArray[np.str_]
+CHAR_AR_U: np.chararray[Any, np.dtype[np.str_]]
+
+def func(*args: Any, **kwargs: Any) -> Any: ...
+
+reveal_type(vectorized_func.pyfunc) # E: def (*Any, **Any) -> Any
+reveal_type(vectorized_func.cache) # E: bool
+reveal_type(vectorized_func.signature) # E: Union[None, builtins.str]
+reveal_type(vectorized_func.otypes) # E: Union[None, builtins.str]
+reveal_type(vectorized_func.excluded) # E: set[Union[builtins.int, builtins.str]]
+reveal_type(vectorized_func.__doc__) # E: Union[None, builtins.str]
+reveal_type(vectorized_func([1])) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(np.vectorize(int)) # E: numpy.vectorize
+reveal_type(np.vectorize( # E: numpy.vectorize
+ int, otypes="i", doc="doc", excluded=(), cache=True, signature=None
+))
+
+reveal_type(np.add_newdoc("__main__", "blabla", doc="test doc")) # E: None
+reveal_type(np.add_newdoc("__main__", "blabla", doc=("meth", "test doc"))) # E: None
+reveal_type(np.add_newdoc("__main__", "blabla", doc=[("meth", "test doc")])) # E: None
+
+reveal_type(np.rot90(AR_f8, k=2)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.rot90(AR_LIKE_f8, axes=(0, 1))) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+
+reveal_type(np.flip(f8)) # E: {float64}
+reveal_type(np.flip(1.0)) # E: Any
+reveal_type(np.flip(AR_f8, axis=(0, 1))) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.flip(AR_LIKE_f8, axis=0)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+
+reveal_type(np.iterable(1)) # E: bool
+reveal_type(np.iterable([1])) # E: bool
+
+reveal_type(np.average(AR_f8)) # E: numpy.floating[Any]
+reveal_type(np.average(AR_f8, weights=AR_c16)) # E: numpy.complexfloating[Any, Any]
+reveal_type(np.average(AR_O)) # E: Any
+reveal_type(np.average(AR_f8, returned=True)) # E: Tuple[numpy.floating[Any], numpy.floating[Any]]
+reveal_type(np.average(AR_f8, weights=AR_c16, returned=True)) # E: Tuple[numpy.complexfloating[Any, Any], numpy.complexfloating[Any, Any]]
+reveal_type(np.average(AR_O, returned=True)) # E: Tuple[Any, Any]
+reveal_type(np.average(AR_f8, axis=0)) # E: Any
+reveal_type(np.average(AR_f8, axis=0, returned=True)) # E: Tuple[Any, Any]
+
+reveal_type(np.asarray_chkfinite(AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.asarray_chkfinite(AR_LIKE_f8)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(np.asarray_chkfinite(AR_f8, dtype=np.float64)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.asarray_chkfinite(AR_f8, dtype=float)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+
+reveal_type(np.piecewise(AR_f8, AR_b, [func])) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.piecewise(AR_LIKE_f8, AR_b, [func])) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+
+reveal_type(np.select([AR_f8], [AR_f8])) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+
+reveal_type(np.copy(AR_LIKE_f8)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(np.copy(AR_U)) # E: numpy.ndarray[Any, numpy.dtype[numpy.str_]]
+reveal_type(np.copy(CHAR_AR_U)) # E: numpy.ndarray[Any, Any]
+reveal_type(np.copy(CHAR_AR_U, "K", subok=True)) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]]
+reveal_type(np.copy(CHAR_AR_U, subok=True)) # E: numpy.chararray[Any, numpy.dtype[numpy.str_]]
+
+reveal_type(np.gradient(AR_f8, axis=None)) # E: Any
+reveal_type(np.gradient(AR_LIKE_f8, edge_order=2)) # E: Any
+
+reveal_type(np.diff("bob", n=0)) # E: str
+reveal_type(np.diff(AR_f8, axis=0)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(np.diff(AR_LIKE_f8, prepend=1.5)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+
+reveal_type(np.angle(AR_f8)) # E: numpy.floating[Any]
+reveal_type(np.angle(AR_c16, deg=True)) # E: numpy.complexfloating[Any, Any]
+reveal_type(np.angle(AR_O)) # E: Any
+
+reveal_type(np.unwrap(AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.floating[Any]]]
+reveal_type(np.unwrap(AR_O)) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]]
+
+reveal_type(np.sort_complex(AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[numpy.complexfloating[Any, Any]]]
+
+reveal_type(np.trim_zeros(AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.trim_zeros(AR_LIKE_f8)) # E: list[builtins.float]
+
+reveal_type(np.extract(AR_i8, AR_f8)) # E: numpy.ndarray[Any, numpy.dtype[{float64}]]
+reveal_type(np.extract(AR_i8, AR_LIKE_f8)) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+
+reveal_type(np.place(AR_f8, mask=AR_i8, vals=5.0)) # E: None
+
+reveal_type(np.disp(1, linefeed=True)) # E: None
+with open("test", "w") as f:
+ reveal_type(np.disp("message", device=f)) # E: None
diff --git a/numpy/typing/tests/data/reveal/lib_utils.py b/numpy/typing/tests/data/reveal/lib_utils.pyi
index d82012707..d82012707 100644
--- a/numpy/typing/tests/data/reveal/lib_utils.py
+++ b/numpy/typing/tests/data/reveal/lib_utils.pyi
diff --git a/numpy/typing/tests/data/reveal/lib_version.py b/numpy/typing/tests/data/reveal/lib_version.pyi
index e6f695558..e6f695558 100644
--- a/numpy/typing/tests/data/reveal/lib_version.py
+++ b/numpy/typing/tests/data/reveal/lib_version.pyi
diff --git a/numpy/typing/tests/data/reveal/linalg.py b/numpy/typing/tests/data/reveal/linalg.pyi
index fecdc0d37..fecdc0d37 100644
--- a/numpy/typing/tests/data/reveal/linalg.py
+++ b/numpy/typing/tests/data/reveal/linalg.pyi
diff --git a/numpy/typing/tests/data/reveal/memmap.py b/numpy/typing/tests/data/reveal/memmap.pyi
index c1d8edc67..c1d8edc67 100644
--- a/numpy/typing/tests/data/reveal/memmap.py
+++ b/numpy/typing/tests/data/reveal/memmap.pyi
diff --git a/numpy/typing/tests/data/reveal/mod.py b/numpy/typing/tests/data/reveal/mod.pyi
index bf45b8c58..bf45b8c58 100644
--- a/numpy/typing/tests/data/reveal/mod.py
+++ b/numpy/typing/tests/data/reveal/mod.pyi
diff --git a/numpy/typing/tests/data/reveal/modules.py b/numpy/typing/tests/data/reveal/modules.pyi
index 7e695433e..7e695433e 100644
--- a/numpy/typing/tests/data/reveal/modules.py
+++ b/numpy/typing/tests/data/reveal/modules.pyi
diff --git a/numpy/typing/tests/data/reveal/multiarray.py b/numpy/typing/tests/data/reveal/multiarray.pyi
index ee818c08a..ee818c08a 100644
--- a/numpy/typing/tests/data/reveal/multiarray.py
+++ b/numpy/typing/tests/data/reveal/multiarray.pyi
diff --git a/numpy/typing/tests/data/reveal/nbit_base_example.py b/numpy/typing/tests/data/reveal/nbit_base_example.pyi
index d34f6f69a..d34f6f69a 100644
--- a/numpy/typing/tests/data/reveal/nbit_base_example.py
+++ b/numpy/typing/tests/data/reveal/nbit_base_example.pyi
diff --git a/numpy/typing/tests/data/reveal/ndarray_conversion.py b/numpy/typing/tests/data/reveal/ndarray_conversion.pyi
index 03f2faf43..03f2faf43 100644
--- a/numpy/typing/tests/data/reveal/ndarray_conversion.py
+++ b/numpy/typing/tests/data/reveal/ndarray_conversion.pyi
diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.py b/numpy/typing/tests/data/reveal/ndarray_misc.pyi
index 050b82cdc..e384b5388 100644
--- a/numpy/typing/tests/data/reveal/ndarray_misc.py
+++ b/numpy/typing/tests/data/reveal/ndarray_misc.pyi
@@ -20,6 +20,7 @@ B: SubClass
AR_f8: NDArray[np.float64]
AR_i8: NDArray[np.int64]
AR_U: NDArray[np.str_]
+AR_V: NDArray[np.void]
ctypes_obj = AR_f8.ctypes
@@ -193,3 +194,13 @@ reveal_type(operator.index(AR_i8)) # E: int
reveal_type(AR_f8.__array_prepare__(B)) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]]
reveal_type(AR_f8.__array_wrap__(B)) # E: numpy.ndarray[Any, numpy.dtype[numpy.object_]]
+
+reveal_type(AR_V[0]) # E: Any
+reveal_type(AR_V[0, 0]) # E: Any
+reveal_type(AR_V[AR_i8]) # E: Any
+reveal_type(AR_V[AR_i8, AR_i8]) # E: Any
+reveal_type(AR_V[AR_i8, None]) # E: numpy.ndarray[Any, numpy.dtype[numpy.void]]
+reveal_type(AR_V[0, ...]) # E: numpy.ndarray[Any, numpy.dtype[numpy.void]]
+reveal_type(AR_V[:]) # E: numpy.ndarray[Any, numpy.dtype[numpy.void]]
+reveal_type(AR_V["a"]) # E: numpy.ndarray[Any, numpy.dtype[Any]]
+reveal_type(AR_V[["a", "b"]]) # E: numpy.ndarray[Any, numpy.dtype[numpy.void]]
diff --git a/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.py b/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi
index a44e1cfa1..a44e1cfa1 100644
--- a/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.py
+++ b/numpy/typing/tests/data/reveal/ndarray_shape_manipulation.pyi
diff --git a/numpy/typing/tests/data/reveal/nditer.py b/numpy/typing/tests/data/reveal/nditer.pyi
index 473e922a2..473e922a2 100644
--- a/numpy/typing/tests/data/reveal/nditer.py
+++ b/numpy/typing/tests/data/reveal/nditer.pyi
diff --git a/numpy/typing/tests/data/reveal/nested_sequence.py b/numpy/typing/tests/data/reveal/nested_sequence.pyi
index 07e24e357..07e24e357 100644
--- a/numpy/typing/tests/data/reveal/nested_sequence.py
+++ b/numpy/typing/tests/data/reveal/nested_sequence.pyi
diff --git a/numpy/typing/tests/data/reveal/npyio.py b/numpy/typing/tests/data/reveal/npyio.pyi
index bee97a8e1..bee97a8e1 100644
--- a/numpy/typing/tests/data/reveal/npyio.py
+++ b/numpy/typing/tests/data/reveal/npyio.pyi
diff --git a/numpy/typing/tests/data/reveal/numeric.py b/numpy/typing/tests/data/reveal/numeric.pyi
index ec6e47ca0..ec6e47ca0 100644
--- a/numpy/typing/tests/data/reveal/numeric.py
+++ b/numpy/typing/tests/data/reveal/numeric.pyi
diff --git a/numpy/typing/tests/data/reveal/numerictypes.py b/numpy/typing/tests/data/reveal/numerictypes.pyi
index c50a3a3d6..c50a3a3d6 100644
--- a/numpy/typing/tests/data/reveal/numerictypes.py
+++ b/numpy/typing/tests/data/reveal/numerictypes.pyi
diff --git a/numpy/typing/tests/data/reveal/random.py b/numpy/typing/tests/data/reveal/random.pyi
index 6fc35aced..6fc35aced 100644
--- a/numpy/typing/tests/data/reveal/random.py
+++ b/numpy/typing/tests/data/reveal/random.pyi
diff --git a/numpy/typing/tests/data/reveal/rec.py b/numpy/typing/tests/data/reveal/rec.pyi
index 2fa8cc7b9..2fa8cc7b9 100644
--- a/numpy/typing/tests/data/reveal/rec.py
+++ b/numpy/typing/tests/data/reveal/rec.pyi
diff --git a/numpy/typing/tests/data/reveal/scalars.py b/numpy/typing/tests/data/reveal/scalars.pyi
index a95f8f6f2..a95f8f6f2 100644
--- a/numpy/typing/tests/data/reveal/scalars.py
+++ b/numpy/typing/tests/data/reveal/scalars.pyi
diff --git a/numpy/typing/tests/data/reveal/shape_base.py b/numpy/typing/tests/data/reveal/shape_base.pyi
index 57633defb..57633defb 100644
--- a/numpy/typing/tests/data/reveal/shape_base.py
+++ b/numpy/typing/tests/data/reveal/shape_base.pyi
diff --git a/numpy/typing/tests/data/reveal/stride_tricks.py b/numpy/typing/tests/data/reveal/stride_tricks.pyi
index 152d9cea6..152d9cea6 100644
--- a/numpy/typing/tests/data/reveal/stride_tricks.py
+++ b/numpy/typing/tests/data/reveal/stride_tricks.pyi
diff --git a/numpy/typing/tests/data/reveal/testing.py b/numpy/typing/tests/data/reveal/testing.pyi
index 2b040ff60..2b040ff60 100644
--- a/numpy/typing/tests/data/reveal/testing.py
+++ b/numpy/typing/tests/data/reveal/testing.pyi
diff --git a/numpy/typing/tests/data/reveal/twodim_base.py b/numpy/typing/tests/data/reveal/twodim_base.pyi
index b95fbc71e..b95fbc71e 100644
--- a/numpy/typing/tests/data/reveal/twodim_base.py
+++ b/numpy/typing/tests/data/reveal/twodim_base.pyi
diff --git a/numpy/typing/tests/data/reveal/type_check.py b/numpy/typing/tests/data/reveal/type_check.pyi
index 416dd42a8..416dd42a8 100644
--- a/numpy/typing/tests/data/reveal/type_check.py
+++ b/numpy/typing/tests/data/reveal/type_check.pyi
diff --git a/numpy/typing/tests/data/reveal/ufunc_config.py b/numpy/typing/tests/data/reveal/ufunc_config.pyi
index 26be80314..26be80314 100644
--- a/numpy/typing/tests/data/reveal/ufunc_config.py
+++ b/numpy/typing/tests/data/reveal/ufunc_config.pyi
diff --git a/numpy/typing/tests/data/reveal/ufunclike.py b/numpy/typing/tests/data/reveal/ufunclike.pyi
index 8b3aea7ce..8b3aea7ce 100644
--- a/numpy/typing/tests/data/reveal/ufunclike.py
+++ b/numpy/typing/tests/data/reveal/ufunclike.pyi
diff --git a/numpy/typing/tests/data/reveal/ufuncs.py b/numpy/typing/tests/data/reveal/ufuncs.pyi
index ade45577c..ade45577c 100644
--- a/numpy/typing/tests/data/reveal/ufuncs.py
+++ b/numpy/typing/tests/data/reveal/ufuncs.pyi
diff --git a/numpy/typing/tests/data/reveal/warnings_and_errors.py b/numpy/typing/tests/data/reveal/warnings_and_errors.pyi
index 3f20a0135..3f20a0135 100644
--- a/numpy/typing/tests/data/reveal/warnings_and_errors.py
+++ b/numpy/typing/tests/data/reveal/warnings_and_errors.pyi
diff --git a/numpy/typing/tests/test_runtime.py b/numpy/typing/tests/test_runtime.py
index 151b06bed..5b5df49dc 100644
--- a/numpy/typing/tests/test_runtime.py
+++ b/numpy/typing/tests/test_runtime.py
@@ -3,7 +3,7 @@
from __future__ import annotations
import sys
-from typing import get_type_hints, Union, Tuple, NamedTuple, get_args, get_origin
+from typing import get_type_hints, Union, NamedTuple, get_args, get_origin
import pytest
import numpy as np
@@ -12,7 +12,7 @@ import numpy.typing as npt
class TypeTup(NamedTuple):
typ: type
- args: Tuple[type, ...]
+ args: tuple[type, ...]
origin: None | type
diff --git a/numpy/typing/tests/test_typing.py b/numpy/typing/tests/test_typing.py
index 81863c780..0f3e10b7b 100644
--- a/numpy/typing/tests/test_typing.py
+++ b/numpy/typing/tests/test_typing.py
@@ -1,10 +1,13 @@
+from __future__ import annotations
+
import importlib.util
import itertools
import os
import re
import shutil
from collections import defaultdict
-from typing import Optional, IO, Dict, List
+from collections.abc import Iterator
+from typing import IO, TYPE_CHECKING
import pytest
import numpy as np
@@ -21,6 +24,10 @@ except ImportError:
else:
NO_MYPY = False
+if TYPE_CHECKING:
+ # We need this as annotation, but it's located in a private namespace.
+ # As a compromise, do *not* import it during runtime
+ from _pytest.mark.structures import ParameterSet
DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
PASS_DIR = os.path.join(DATA_DIR, "pass")
@@ -32,7 +39,7 @@ CACHE_DIR = os.path.join(DATA_DIR, ".mypy_cache")
#: A dictionary with file names as keys and lists of the mypy stdout as values.
#: To-be populated by `run_mypy`.
-OUTPUT_MYPY: Dict[str, List[str]] = {}
+OUTPUT_MYPY: dict[str, list[str]] = {}
def _key_func(key: str) -> str:
@@ -62,7 +69,10 @@ def run_mypy() -> None:
NUMPY_TYPING_TEST_CLEAR_CACHE=0 pytest numpy/typing/tests
"""
- if os.path.isdir(CACHE_DIR) and bool(os.environ.get("NUMPY_TYPING_TEST_CLEAR_CACHE", True)):
+ if (
+ os.path.isdir(CACHE_DIR)
+ and bool(os.environ.get("NUMPY_TYPING_TEST_CLEAR_CACHE", True))
+ ):
shutil.rmtree(CACHE_DIR)
for directory in (PASS_DIR, REVEAL_DIR, FAIL_DIR, MISC_DIR):
@@ -85,10 +95,10 @@ def run_mypy() -> None:
OUTPUT_MYPY.update((k, list(v)) for k, v in iterator if k)
-def get_test_cases(directory):
+def get_test_cases(directory: str) -> Iterator[ParameterSet]:
for root, _, files in os.walk(directory):
for fname in files:
- if os.path.splitext(fname)[-1] == ".py":
+ if os.path.splitext(fname)[-1] in (".pyi", ".py"):
fullpath = os.path.join(root, fname)
# Use relative path for nice py.test name
relpath = os.path.relpath(fullpath, start=directory)
@@ -103,7 +113,7 @@ def get_test_cases(directory):
@pytest.mark.slow
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
@pytest.mark.parametrize("path", get_test_cases(PASS_DIR))
-def test_success(path):
+def test_success(path) -> None:
# Alias `OUTPUT_MYPY` so that it appears in the local namespace
output_mypy = OUTPUT_MYPY
if path in output_mypy:
@@ -115,7 +125,7 @@ def test_success(path):
@pytest.mark.slow
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
@pytest.mark.parametrize("path", get_test_cases(FAIL_DIR))
-def test_fail(path):
+def test_fail(path: str) -> None:
__tracebackhide__ = True
with open(path) as fin:
@@ -138,7 +148,10 @@ def test_fail(path):
for i, line in enumerate(lines):
lineno = i + 1
- if line.startswith('#') or (" E:" not in line and lineno not in errors):
+ if (
+ line.startswith('#')
+ or (" E:" not in line and lineno not in errors)
+ ):
continue
target_line = lines[lineno - 1]
@@ -162,14 +175,19 @@ Observed error: {!r}
"""
-def _test_fail(path: str, error: str, expected_error: Optional[str], lineno: int) -> None:
+def _test_fail(
+ path: str,
+ error: str,
+ expected_error: None | str,
+ lineno: int,
+) -> None:
if expected_error is None:
raise AssertionError(_FAIL_MSG1.format(lineno, error))
elif error not in expected_error:
raise AssertionError(_FAIL_MSG2.format(lineno, expected_error, error))
-def _construct_format_dict():
+def _construct_format_dict() -> dict[str, str]:
dct = {k.split(".")[-1]: v.replace("numpy", "numpy.typing") for
k, v in _PRECISION_DICT.items()}
@@ -193,12 +211,18 @@ def _construct_format_dict():
"float96": "numpy.floating[numpy.typing._96Bit]",
"float128": "numpy.floating[numpy.typing._128Bit]",
"float256": "numpy.floating[numpy.typing._256Bit]",
- "complex64": "numpy.complexfloating[numpy.typing._32Bit, numpy.typing._32Bit]",
- "complex128": "numpy.complexfloating[numpy.typing._64Bit, numpy.typing._64Bit]",
- "complex160": "numpy.complexfloating[numpy.typing._80Bit, numpy.typing._80Bit]",
- "complex192": "numpy.complexfloating[numpy.typing._96Bit, numpy.typing._96Bit]",
- "complex256": "numpy.complexfloating[numpy.typing._128Bit, numpy.typing._128Bit]",
- "complex512": "numpy.complexfloating[numpy.typing._256Bit, numpy.typing._256Bit]",
+ "complex64": ("numpy.complexfloating"
+ "[numpy.typing._32Bit, numpy.typing._32Bit]"),
+ "complex128": ("numpy.complexfloating"
+ "[numpy.typing._64Bit, numpy.typing._64Bit]"),
+ "complex160": ("numpy.complexfloating"
+ "[numpy.typing._80Bit, numpy.typing._80Bit]"),
+ "complex192": ("numpy.complexfloating"
+ "[numpy.typing._96Bit, numpy.typing._96Bit]"),
+ "complex256": ("numpy.complexfloating"
+ "[numpy.typing._128Bit, numpy.typing._128Bit]"),
+ "complex512": ("numpy.complexfloating"
+ "[numpy.typing._256Bit, numpy.typing._256Bit]"),
"ubyte": f"numpy.unsignedinteger[{dct['_NBitByte']}]",
"ushort": f"numpy.unsignedinteger[{dct['_NBitShort']}]",
@@ -217,9 +241,14 @@ def _construct_format_dict():
"single": f"numpy.floating[{dct['_NBitSingle']}]",
"double": f"numpy.floating[{dct['_NBitDouble']}]",
"longdouble": f"numpy.floating[{dct['_NBitLongDouble']}]",
- "csingle": f"numpy.complexfloating[{dct['_NBitSingle']}, {dct['_NBitSingle']}]",
- "cdouble": f"numpy.complexfloating[{dct['_NBitDouble']}, {dct['_NBitDouble']}]",
- "clongdouble": f"numpy.complexfloating[{dct['_NBitLongDouble']}, {dct['_NBitLongDouble']}]",
+ "csingle": ("numpy.complexfloating"
+ f"[{dct['_NBitSingle']}, {dct['_NBitSingle']}]"),
+ "cdouble": ("numpy.complexfloating"
+ f"[{dct['_NBitDouble']}, {dct['_NBitDouble']}]"),
+ "clongdouble": (
+ "numpy.complexfloating"
+ f"[{dct['_NBitLongDouble']}, {dct['_NBitLongDouble']}]"
+ ),
# numpy.typing
"_NBitInt": dct['_NBitInt'],
@@ -231,14 +260,16 @@ def _construct_format_dict():
#: A dictionary with all supported format keys (as keys)
#: and matching values
-FORMAT_DICT: Dict[str, str] = _construct_format_dict()
+FORMAT_DICT: dict[str, str] = _construct_format_dict()
-def _parse_reveals(file: IO[str]) -> List[str]:
- """Extract and parse all ``" # E: "`` comments from the passed file-like object.
+def _parse_reveals(file: IO[str]) -> list[str]:
+ """Extract and parse all ``" # E: "`` comments from the passed
+ file-like object.
- All format keys will be substituted for their respective value from `FORMAT_DICT`,
- *e.g.* ``"{float64}"`` becomes ``"numpy.floating[numpy.typing._64Bit]"``.
+ All format keys will be substituted for their respective value
+ from `FORMAT_DICT`, *e.g.* ``"{float64}"`` becomes
+ ``"numpy.floating[numpy.typing._64Bit]"``.
"""
string = file.read().replace("*", "")
@@ -250,7 +281,8 @@ def _parse_reveals(file: IO[str]) -> List[str]:
# there is the risk of accidentally grabbing dictionaries and sets
key_set = set(re.findall(r"\{(.*?)\}", comments))
kwargs = {
- k: FORMAT_DICT.get(k, f"<UNRECOGNIZED FORMAT KEY {k!r}>") for k in key_set
+ k: FORMAT_DICT.get(k, f"<UNRECOGNIZED FORMAT KEY {k!r}>") for
+ k in key_set
}
fmt_str = comments.format(**kwargs)
@@ -260,7 +292,10 @@ def _parse_reveals(file: IO[str]) -> List[str]:
@pytest.mark.slow
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
@pytest.mark.parametrize("path", get_test_cases(REVEAL_DIR))
-def test_reveal(path):
+def test_reveal(path: str) -> None:
+ """Validate that mypy correctly infers the return-types of
+ the expressions in `path`.
+ """
__tracebackhide__ = True
with open(path) as fin:
@@ -290,18 +325,33 @@ Observed reveal: {!r}
"""
-def _test_reveal(path: str, reveal: str, expected_reveal: str, lineno: int) -> None:
+def _test_reveal(
+ path: str,
+ reveal: str,
+ expected_reveal: str,
+ lineno: int,
+) -> None:
+ """Error-reporting helper function for `test_reveal`."""
if reveal not in expected_reveal:
- raise AssertionError(_REVEAL_MSG.format(lineno, expected_reveal, reveal))
+ raise AssertionError(
+ _REVEAL_MSG.format(lineno, expected_reveal, reveal)
+ )
@pytest.mark.slow
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
@pytest.mark.parametrize("path", get_test_cases(PASS_DIR))
-def test_code_runs(path):
+def test_code_runs(path: str) -> None:
+ """Validate that the code in `path` properly during runtime."""
path_without_extension, _ = os.path.splitext(path)
dirname, filename = path.split(os.sep)[-2:]
- spec = importlib.util.spec_from_file_location(f"{dirname}.{filename}", path)
+
+ spec = importlib.util.spec_from_file_location(
+ f"{dirname}.{filename}", path
+ )
+ assert spec is not None
+ assert spec.loader is not None
+
test_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(test_module)
@@ -325,7 +375,7 @@ LINENO_MAPPING = {
@pytest.mark.slow
@pytest.mark.skipif(NO_MYPY, reason="Mypy is not installed")
def test_extended_precision() -> None:
- path = os.path.join(MISC_DIR, "extended_precision.py")
+ path = os.path.join(MISC_DIR, "extended_precision.pyi")
output_mypy = OUTPUT_MYPY
assert path in output_mypy
diff --git a/tools/linter.py b/tools/linter.py
index 9d23ffb48..0031ff83a 100644
--- a/tools/linter.py
+++ b/tools/linter.py
@@ -14,6 +14,7 @@ CONFIG = os.path.join(
# computing the diff itself.
EXCLUDE = (
"numpy/typing/tests/data/",
+ "numpy/typing/_char_codes.py",
"numpy/__config__.py",
"numpy/f2py",
)