summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/workflows/build_test.yml2
-rw-r--r--.gitignore1
-rw-r--r--.mailmap23
-rw-r--r--benchmarks/benchmarks/bench_core.py2
-rw-r--r--benchmarks/benchmarks/bench_function_base.py19
-rw-r--r--benchmarks/benchmarks/bench_linalg.py12
-rw-r--r--benchmarks/benchmarks/bench_ufunc.py40
-rw-r--r--doc/release/upcoming_changes/16830.expired.rst6
-rw-r--r--doc/release/upcoming_changes/20020.new_function.rst4
-rw-r--r--doc/release/upcoming_changes/20993.improvement.rst5
-rw-r--r--doc/release/upcoming_changes/21485.new_feature.rst5
-rw-r--r--doc/source/_static/numpy.css41
-rw-r--r--doc/source/_templates/layout.html10
-rw-r--r--doc/source/conf.py5
-rw-r--r--doc/source/index.rst29
-rw-r--r--doc/source/reference/c-api/array.rst15
-rw-r--r--doc/source/reference/routines.ma.rst1
-rw-r--r--doc/source/user/misc.rst20
-rw-r--r--environment.yml3
-rw-r--r--numpy/__init__.pyi26
-rw-r--r--numpy/core/_add_newdocs.py27
-rw-r--r--numpy/core/_methods.py3
-rw-r--r--numpy/core/feature_detection_locale.h1
-rw-r--r--numpy/core/include/numpy/npy_3kcompat.h4
-rw-r--r--numpy/core/multiarray.py2
-rw-r--r--numpy/core/src/common/get_attr_string.h2
-rw-r--r--numpy/core/src/common/lowlevel_strided_loops.h16
-rw-r--r--numpy/core/src/common/ufunc_override.c6
-rw-r--r--numpy/core/src/multiarray/alloc.c19
-rw-r--r--numpy/core/src/multiarray/alloc.h3
-rw-r--r--numpy/core/src/multiarray/array_method.c18
-rw-r--r--numpy/core/src/multiarray/arrayobject.c4
-rw-r--r--numpy/core/src/multiarray/common.c17
-rw-r--r--numpy/core/src/multiarray/common.h47
-rw-r--r--numpy/core/src/multiarray/ctors.c125
-rw-r--r--numpy/core/src/multiarray/getset.c6
-rw-r--r--numpy/core/src/multiarray/methods.c15
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c2
-rw-r--r--numpy/core/src/umath/fast_loop_macros.h13
-rw-r--r--numpy/core/src/umath/scalarmath.c.src252
-rw-r--r--numpy/core/src/umath/ufunc_object.c34
-rw-r--r--numpy/core/tests/test_dtype.py6
-rw-r--r--numpy/core/tests/test_numeric.py81
-rw-r--r--numpy/core/tests/test_regression.py1
-rw-r--r--numpy/core/tests/test_scalarmath.py52
-rw-r--r--numpy/distutils/mingw32ccompiler.py3
-rw-r--r--numpy/distutils/misc_util.py5
-rw-r--r--numpy/lib/format.py3
-rw-r--r--numpy/lib/function_base.py34
-rw-r--r--numpy/lib/function_base.pyi8
-rw-r--r--numpy/lib/npyio.py20
-rw-r--r--numpy/lib/npyio.pyi3
-rw-r--r--numpy/lib/tests/test_function_base.py28
-rw-r--r--numpy/lib/tests/test_twodim_base.py8
-rw-r--r--numpy/lib/twodim_base.py12
-rw-r--r--numpy/ma/__init__.pyi1
-rw-r--r--numpy/ma/extras.py119
-rw-r--r--numpy/ma/extras.pyi3
-rw-r--r--numpy/ma/tests/test_extras.py67
-rw-r--r--numpy/random/bit_generator.pyx8
-rw-r--r--numpy/testing/_private/utils.py3
-rw-r--r--numpy/testing/tests/test_utils.py16
-rw-r--r--numpy/typing/tests/data/reveal/arithmetic.pyi5
-rw-r--r--pyproject.toml2
-rwxr-xr-xsetup.py4
-rw-r--r--test_requirements.txt4
66 files changed, 1014 insertions, 367 deletions
diff --git a/.github/workflows/build_test.yml b/.github/workflows/build_test.yml
index db7883ab9..1def3acae 100644
--- a/.github/workflows/build_test.yml
+++ b/.github/workflows/build_test.yml
@@ -253,7 +253,7 @@ jobs:
docker run --name the_container --interactive -v /:/host arm32v7/ubuntu:focal /bin/bash -c "
apt update &&
apt install -y git python3 python3-dev python3-pip &&
- pip3 install cython==0.29.28 setuptools\<49.2.0 hypothesis==6.23.3 pytest==6.2.5 &&
+ pip3 install cython==0.29.29 setuptools\<49.2.0 hypothesis==6.23.3 pytest==6.2.5 &&
ln -s /host/lib64 /lib64 &&
ln -s /host/lib/x86_64-linux-gnu /lib/x86_64-linux-gnu &&
ln -s /host/usr/arm-linux-gnueabihf /usr/arm-linux-gnueabihf &&
diff --git a/.gitignore b/.gitignore
index d905f638f..632f13674 100644
--- a/.gitignore
+++ b/.gitignore
@@ -224,6 +224,7 @@ numpy/core/src/umath/loops_trigonometric.dispatch.c
numpy/core/src/umath/loops_exponent_log.dispatch.c
numpy/core/src/umath/loops_umath_fp.dispatch.c
numpy/core/src/umath/loops_hyperbolic.dispatch.c
+numpy/core/src/umath/loops_modulo.dispatch.c
# npysort module
numpy/core/src/npysort/x86-qsort.dispatch.c
numpy/core/src/npysort/x86-qsort.dispatch.*.cpp
diff --git a/.mailmap b/.mailmap
index 73e3965a0..314966e09 100644
--- a/.mailmap
+++ b/.mailmap
@@ -11,12 +11,14 @@
@8bitmp3 <19637339+8bitmp3@users.noreply.github.com>
@DWesl <22566757+DWesl@users.noreply.github.com>
@Endolith <endolith@gmail.com>
+@GalaxySnail <ylc991@163.com>
@Illviljan <14371165+Illviljan@users.noreply.github.com>
@LSchroefl <65246829+LSchroefl@users.noreply.github.com>
@Lbogula <bogulala7@gmail.com>
@Lisa <34400837+lyzlisa@users.noreply.github.com>
@Patrick <39380924+xamm@users.noreply.github.com>
@Scian <65375075+hoony6134@users.noreply.github.com>
+@code-review-doctor <contact+django-doctor-test@richardtier.co.uk>
@h-vetinari <h.vetinari@gmx.com>
@h6197627 <44726212+h6197627@users.noreply.github.com>
@jbCodeHub <besselingcodehub@gmail.com>
@@ -45,6 +47,8 @@ Abel Aoun <aoun@cerfacs.fr>
Adam Ginsburg <adam.g.ginsburg@gmail.com> <keflavich@gmail.com>
Aerik Pawson <45904740+aerikpawson@users.noreply.github.com>
Ahmet Can Solak <asolak14@ku.edu.tr>
+Amrit Krishnan <amrit110@gmail.com>
+Amrit Krishnan <amrit110@gmail.com> <amritk@vectorinstitute.ai>
Albert Jornet Puig <albert.jornet@ic3.cat>
Alberto Rubiales <arubiales11@gmail.com>
Alex Rockhill <aprockhill206@gmail.com>
@@ -96,6 +100,8 @@ Ashutosh Singh <ashutoshsinghrkt@gmail.com> <55102089+Ashutosh619-sudo@users.nor
Åsmund Hjulstad <ahju@statoil.com> <asmund@hjulstad.com>
Auke Wiggers <wiggers.auke@gmail.com>
Badhri Narayanan Krishnakumar <badhrinarayanan.k@gmail.com>
+Bhavuk Kalra <bhavukkalra1786@gmail.com>
+Bhavuk Kalra <bhavukkalra1786@gmail.com> <ybhavukkalra1786@gmail.com>
Bangcheng Yang <bangchengyang@hotmail.com>
Bhargav V <12525622+brpy@users.noreply.github.com>
Bas van Beek <b.f.van.beek@vu.nl> <43369155+BvB93@users.noreply.github.com>
@@ -119,6 +125,7 @@ Bryan Van de Ven <bryanv@continuum.io> Bryan Van de Ven <bryan@Laptop-3.local>
Bryan Van de Ven <bryanv@continuum.io> Bryan Van de Ven <bryan@laptop.local>
Brénainn Woodsend <bwoodsend@gmail.com>
Bui Duc Minh <buiducminh287@gmail.com> <41239569+Mibu287@users.noreply.github.com>
+Caio Agiani <agianicaio@gmail.com>
Carl Kleffner <cmkleffner@gmail.com>
Carl Leake <leakec57@gmail.com>
Charles Stern <62192187+cisaacstern@users.noreply.github.com>
@@ -142,6 +149,7 @@ Chunlin Fang <fangchunlin@huawei.com> <qiyu8@foxmail.com>
Chunlin Fang <fangchunlin@huawei.com> <834352945@qq.com>
Colin Snyder <8csnyder@gmail.com> <47012605+colinsnyder@users.noreply.github.com>
Constanza Fierro <constanza.fierro94@gmail.com>
+Dahyun Kim <dahyun@kakao.com>
Daniel B Allan <daniel.b.allan@gmail.com>
Daniel da Silva <mail@danieldasilva.org> <daniel@meltingwax.net>
Daniel da Silva <mail@danieldasilva.org> <var.mail.daniel@gmail.com>
@@ -161,6 +169,7 @@ David M Cooke <cookedm@localhost>
David Nicholson <davidjn@google.com> <dnic12345@gmail.com>
David Ochoa <ochoadavid@gmail.com>
David Pitchford <david.t.pitchford@gmail.com> <david.t.pitchford@gmail.com>
+David Prosin <davidprosin@gmail.com>
Davide Dal Bosco <davidemcwood@gmail.com> <62077652+davidedalbosco@users.noreply.github.com>
Dawid Zych <dawid.zych@yandex.com>
Dennis Zollo <dzollo@swift-nav.com>
@@ -168,8 +177,12 @@ Derek Homeier <derek@astro.physik.uni-goettingen.de>
Derek Homeier <derek@astro.physik.uni-goettingen.de> <dhomeie@gwdg.de>
Derek Homeier <derek@astro.physik.uni-goettingen.de> <derek@astro.phsik.uni-goettingen.de>
Derrick Williams <myutat@gmail.com>
+Devin Shanahan <dshanahan88@gmail.com>
Dima Pasechnik <dima@pasechnik.info>
Dima Pasechnik <dima@pasechnik.info> <dimpase@gmail.com>
+Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com>
+Ding Liu <nimrodcowboy@gmail.com>
+Ding Liu <nimrodcowboy@gmail.com> <Nimrodcowboy@gmail.com>
Dmitriy Shalyga <zuko3d@gmail.com>
Dustan Levenstein <dlevenstein@gmail.com> <43019642+dustanlevenstein@users.noreply.github.com>
Dylan Cutler <dylancutler@google.com>
@@ -214,6 +227,7 @@ Guo Shuai <gs0801@foxmail.com>
Hameer Abbasi <einstein.edison@gmail.com> <hameerabbasi@yahoo.com>
Han Genuit <hangenuit@gmail.com>
Hanno Klemm <hanno.klemm@maerskoil.com> hklemm <hanno.klemm@maerskoil.com>
+Harsh Mishra <erbeusgriffincasper@gmail.com>
Helder Oliveira <heldercro@gmail.com>
Hemil Desai <desai38@purdue.edu>
Himanshu <addyjeridiq@gmail.com>
@@ -224,6 +238,7 @@ Imen Rajhi <imen.rajhi.ir@gmail.com>
Inessa Pawson <albuscode@gmail.com>
Irina Maria Mocan <28827042+IrinaMaria@users.noreply.github.com>
Irvin Probst <irvin.probst@ensta-bretagne.fr>
+Ivan Meleshko <vn.mlshk@gmail.com>
Isabela Presedo-Floyd <irpf.design@gmail.com> <ipresedo@calpoly.edu>
Gerhard Hobler <gerhard.hobler@tuwien.ac.at>
Giannis Zapantis <sdi1900059@di.uoa.gr>
@@ -249,6 +264,7 @@ Jeffrey Yancey <jeffrey@octane5.com> <3820914+jeffyancey@users.noreply.github.co
Jeremy Lay <jlay80@gmail.com>
Jérémie du Boisberranger <jeremie.du-boisberranger@inria.fr> jeremiedbb <34657725+jeremiedbb@users.noreply.github.com>
Jérome Eertmans <jeertmans@icloud.com>
+Jérôme Richard <jeromerichard111@msn.com> <ubuntu@ip-172-31-17-195.eu-west-3.compute.internal>
Jerome Kelleher <jerome.kelleher@ed.ac.uk>
Jessi J Zhao <35235453+jessijzhao@users.noreply.github.com>
Johannes Hampp <johannes.hampp@zeu.uni-giessen.de> <42553970+euronion@users.noreply.github.com>
@@ -271,6 +287,7 @@ Julian Taylor <juliantaylor108@gmail.com> <jtaylor.debian@googlemail.com>
Julian Taylor <juliantaylor108@gmail.com> <jtaylor108@googlemail.com>
Julien Lhermitte <jrmlhermitte@gmail.com> Julien Lhermitte <lhermitte@bnl.gov>
Julien Schueller <julien.schueller@gmail.com>
+Junyan Ou <junyan.ou189@gmail.com>
Justus Magin <keewis@posteo.de>
Justus Magin <keewis@posteo.de> <keewis@users.noreply.github.com>
Kai Striega <kaistriega@gmail.com>
@@ -299,6 +316,7 @@ Lars Grüter <lagru@mailbox.org>
Lars Grüter <lagru@mailbox.org> <lagru@users.noreply.github.com>
Leonardus Chen <leonardus.chen@gmail.com>
Licht Takeuchi <licht-t@outlook.jp> <licht-t@math.dis.titech.ac.jp>
+Lorenzo Mammana <mammanalorenzo@outlook.it> <lorenzom96@hotmail.it>
Luis Pedro Coelho <luis@luispedro.org> <lpc@cmu.edu>
Luke Zoltan Kelley <lkelley@cfa.harvard.edu>
Madhulika Jain Chambers <madhulikajain@gmail.com> <53166646+madhulikajc@users.noreply.github.com>
@@ -329,6 +347,7 @@ Matt Ord <Matthew.ord1@gmail.com>
Matt Ord <Matthew.ord1@gmail.com> <55235095+Matt-Ord@users.noreply.github.com>
Matt Hancock <not.matt.hancock@gmail.com> <mhancock743@gmail.com>
Martino Sorbaro <martino.sorbaro@ed.ac.uk>
+Márton Gunyhó <marci@gunyho.com> <marci.gunyho@gmail.com>
Mattheus Ueckermann <empeeu@yahoo.com>
Matthew Barber <quitesimplymatt@gmail.com>
Matthew Harrigan <harrigan.matthew@gmail.com>
@@ -372,6 +391,8 @@ Omid Rajaei <rajaei.net@gmail.com> <89868505+rajaeinet@users.noreply.github.com>
Ondřej Čertík <ondrej.certik@gmail.com>
Óscar Villellas Guillén <oscar.villellas@continuum.io>
Panos Mavrogiorgos <pmav99@users.noreply.github.com>
+Pantelis Antonoudiou <pantelis.antonoudiou@gmail.com>
+Pantelis Antonoudiou <pantelis.antonoudiou@gmail.com> <pantelis71@hotmail.com>
Pat Miller <patmiller@localhost> patmiller <patmiller@localhost>
Paul Ivanov <pivanov5@bloomberg.net> <pi@berkeley.edu>
Paul Ivanov <pivanov5@bloomberg.net> <paul.ivanov@local>
@@ -446,6 +467,7 @@ Stefan van der Walt <stefanv@berkeley.edu> <sjvdwalt@gmail.com>
Stefan van der Walt <stefanv@berkeley.edu> <stefan@sun.ac.za>
Stephan Hoyer <shoyer@gmail.com> <shoyer@climate.com>
Stephan Hoyer <shoyer@gmail.com> <shoyer@google.com>
+Stephen Worsley <stephen.worsley@metoffice.gov.uk> <49274989+stephenworsley@users.noreply.github.com>
Steve Stagg <stestagg@gmail.com> <ste@sta.gg>
Steven J Kern <kern.steven0@gmail.com>
Stuart Archibald <stuart.archibald@googlemail.com> <stuart@opengamma.com>
@@ -506,3 +528,4 @@ Zieji Pohz <poh.ziji@gmail.com>
Zieji Pohz <poh.ziji@gmail.com> <8103276+zjpoh@users.noreply.github.com>
Zolboo Erdenebaatar <erdenebz@dickinson.edu>
Zolisa Bleki <zolisa.bleki@gmail.com> <44142765+zoj613@users.noreply.github.com>
+陳仲肯 (Chen, Jhong-Ken) <kenny.kuo.fs@gmail.com>
diff --git a/benchmarks/benchmarks/bench_core.py b/benchmarks/benchmarks/bench_core.py
index 30647f4b8..4fcd7ace5 100644
--- a/benchmarks/benchmarks/bench_core.py
+++ b/benchmarks/benchmarks/bench_core.py
@@ -207,7 +207,7 @@ class Indices(Benchmark):
np.indices((1000, 500))
class VarComplex(Benchmark):
- params = [10**n for n in range(1, 9)]
+ params = [10**n for n in range(0, 9)]
def setup(self, n):
self.arr = np.random.randn(n) + 1j * np.random.randn(n)
diff --git a/benchmarks/benchmarks/bench_function_base.py b/benchmarks/benchmarks/bench_function_base.py
index d52d7692c..3e35f54f2 100644
--- a/benchmarks/benchmarks/bench_function_base.py
+++ b/benchmarks/benchmarks/bench_function_base.py
@@ -43,6 +43,20 @@ class Bincount(Benchmark):
np.bincount(self.d, weights=self.e)
+class Mean(Benchmark):
+ param_names = ['size']
+ params = [[1, 10, 100_000]]
+
+ def setup(self, size):
+ self.array = np.arange(2*size).reshape(2, size)
+
+ def time_mean(self, size):
+ np.mean(self.array)
+
+ def time_mean_axis(self, size):
+ np.mean(self.array, axis=1)
+
+
class Median(Benchmark):
def setup(self):
self.e = np.arange(10000, dtype=np.float32)
@@ -78,7 +92,7 @@ class Median(Benchmark):
class Percentile(Benchmark):
def setup(self):
self.e = np.arange(10000, dtype=np.float32)
- self.o = np.arange(10001, dtype=np.float32)
+ self.o = np.arange(21, dtype=np.float32)
def time_quartile(self):
np.percentile(self.e, [25, 75])
@@ -86,6 +100,9 @@ class Percentile(Benchmark):
def time_percentile(self):
np.percentile(self.e, [25, 35, 55, 65, 75])
+ def time_percentile_small(self):
+ np.percentile(self.o, [25, 75])
+
class Select(Benchmark):
def setup(self):
diff --git a/benchmarks/benchmarks/bench_linalg.py b/benchmarks/benchmarks/bench_linalg.py
index 02e657668..a94ba1139 100644
--- a/benchmarks/benchmarks/bench_linalg.py
+++ b/benchmarks/benchmarks/bench_linalg.py
@@ -98,6 +98,18 @@ class Linalg(Benchmark):
self.func(self.a)
+class LinalgSmallArrays(Benchmark):
+ """ Test overhead of linalg methods for small arrays """
+ def setup(self):
+ self.array_5 = np.arange(5.)
+ self.array_5_5 = np.arange(5.)
+
+ def time_norm_small_array(self):
+ np.linalg.norm(self.array_5)
+
+ def time_det_small_array(self):
+ np.linalg.det(self.array_5_5)
+
class Lstsq(Benchmark):
def setup(self):
self.a = get_squares_()['float64']
diff --git a/benchmarks/benchmarks/bench_ufunc.py b/benchmarks/benchmarks/bench_ufunc.py
index 5aff1f56d..cfa29017d 100644
--- a/benchmarks/benchmarks/bench_ufunc.py
+++ b/benchmarks/benchmarks/bench_ufunc.py
@@ -57,10 +57,47 @@ class UFunc(Benchmark):
def time_ufunc_types(self, ufuncname):
[self.f(*arg) for arg in self.args]
+class UFuncSmall(Benchmark):
+ """ Benchmark for a selection of ufuncs on a small arrays and scalars
+
+ Since the arrays and scalars are small, we are benchmarking the overhead
+ of the numpy ufunc functionality
+ """
+ params = ['abs', 'sqrt', 'cos']
+ param_names = ['ufunc']
+ timeout = 10
+
+ def setup(self, ufuncname):
+ np.seterr(all='ignore')
+ try:
+ self.f = getattr(np, ufuncname)
+ except AttributeError:
+ raise NotImplementedError()
+ self.array_5 = np.array([1., 2., 10., 3., 4.])
+ self.array_int_3 = np.array([1, 2, 3])
+ self.float64 = np.float64(1.1)
+ self.python_float = 1.1
+
+ def time_ufunc_small_array(self, ufuncname):
+ self.f(self.array_5)
+
+ def time_ufunc_small_array_inplace(self, ufuncname):
+ self.f(self.array_5, out = self.array_5)
+
+ def time_ufunc_small_int_array(self, ufuncname):
+ self.f(self.array_int_3)
+
+ def time_ufunc_numpy_scalar(self, ufuncname):
+ self.f(self.float64)
+
+ def time_ufunc_python_float(self, ufuncname):
+ self.f(self.python_float)
+
class Custom(Benchmark):
def setup(self):
self.b = np.ones(20000, dtype=bool)
+ self.b_small = np.ones(3, dtype=bool)
def time_nonzero(self):
np.nonzero(self.b)
@@ -74,6 +111,9 @@ class Custom(Benchmark):
def time_or_bool(self):
(self.b | self.b)
+ def time_and_bool_small(self):
+ (self.b_small & self.b_small)
+
class CustomInplace(Benchmark):
def setup(self):
diff --git a/doc/release/upcoming_changes/16830.expired.rst b/doc/release/upcoming_changes/16830.expired.rst
new file mode 100644
index 000000000..2994a7d9e
--- /dev/null
+++ b/doc/release/upcoming_changes/16830.expired.rst
@@ -0,0 +1,6 @@
+``NpzFile.iteritems()`` and ``NpzFile.iterkeys()`` are removed
+--------------------------------------------------------------
+
+As part of the continued removal of Python 2 compatibility
+``NpzFile.iteritems()`` and ``NpzFile.iterkeys()`` methods are now removed. This
+concludes the deprecation from 1.15.
diff --git a/doc/release/upcoming_changes/20020.new_function.rst b/doc/release/upcoming_changes/20020.new_function.rst
new file mode 100644
index 000000000..0f310ceac
--- /dev/null
+++ b/doc/release/upcoming_changes/20020.new_function.rst
@@ -0,0 +1,4 @@
+`ndenumerate` specialization for masked arrays
+----------------------------------------------
+The masked array module now provides the `numpy.ma.ndenumerate` function,
+an alternative to `numpy.ndenumerate` that skips masked values by default.
diff --git a/doc/release/upcoming_changes/20993.improvement.rst b/doc/release/upcoming_changes/20993.improvement.rst
new file mode 100644
index 000000000..f0019c45e
--- /dev/null
+++ b/doc/release/upcoming_changes/20993.improvement.rst
@@ -0,0 +1,5 @@
+``np.fromiter`` now accepts objects and subarrays
+-------------------------------------------------
+The `~numpy.fromiter` function now supports object and
+subarray dtypes. Please see he function documentation for
+examples.
diff --git a/doc/release/upcoming_changes/21485.new_feature.rst b/doc/release/upcoming_changes/21485.new_feature.rst
new file mode 100644
index 000000000..99fd5e92d
--- /dev/null
+++ b/doc/release/upcoming_changes/21485.new_feature.rst
@@ -0,0 +1,5 @@
+``keepdims`` parameter for ``average``
+--------------------------------------
+The parameter ``keepdims`` was added to the functions `numpy.average`
+and `numpy.ma.average`. The parameter has the same meaning as it
+does in reduction functions such as `numpy.sum` or `numpy.mean`.
diff --git a/doc/source/_static/numpy.css b/doc/source/_static/numpy.css
index 53b610bf1..fc296e8ab 100644
--- a/doc/source/_static/numpy.css
+++ b/doc/source/_static/numpy.css
@@ -22,7 +22,6 @@ h1 {
color: #013243; /* warm black */
}
-
h2 {
color: #4d77cf; /* han blue */
letter-spacing: -.03em;
@@ -32,3 +31,43 @@ h3 {
color: #013243; /* warm black */
letter-spacing: -.03em;
}
+
+/* Main page overview cards */
+
+.intro-card {
+ background: #fff;
+ border-radius: 0;
+ padding: 30px 10px 20px 10px;
+ margin: 10px 0px;
+}
+
+.intro-card p.card-text {
+ margin: 0px;
+}
+
+.intro-card .card-img-top {
+ height: 52px;
+ width: 52px;
+ margin-left: auto;
+ margin-right: auto;
+}
+
+.intro-card .card-header {
+ border: none;
+ background-color: white;
+ color: #150458 !important;
+ font-size: var(--pst-font-size-h5);
+ font-weight: bold;
+ padding: 2.5rem 0rem 0.5rem 0rem;
+}
+
+.intro-card .card-footer {
+ border: none;
+ background-color: white;
+}
+
+.intro-card .card-footer p.card-text {
+ max-width: 220px;
+ margin-left: auto;
+ margin-right: auto;
+}
diff --git a/doc/source/_templates/layout.html b/doc/source/_templates/layout.html
deleted file mode 100644
index e2812fdd5..000000000
--- a/doc/source/_templates/layout.html
+++ /dev/null
@@ -1,10 +0,0 @@
-{% extends "!layout.html" %}
-
-{%- block extrahead %}
-{{ super() }}
-<link rel="stylesheet" href="{{ pathto('_static/numpy.css', 1) }}" type="text/css" />
-
- <!-- PR #17220: This is added via javascript in versionwarning.js -->
- <!-- link rel="canonical" href="http://numpy.org/doc/stable/{{ pagename }}{{ file_suffix }}" / -->
-
-{% endblock %}
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 1a201a3c5..5c056b201 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -196,6 +196,11 @@ html_theme_options = {
html_title = "%s v%s Manual" % (project, version)
html_static_path = ['_static']
html_last_updated_fmt = '%b %d, %Y'
+html_css_files = ["numpy.css"]
+
+# Prevent sphinx-panels from loading bootstrap css, the pydata-sphinx-theme
+# already loads it
+panels_add_bootstrap_css = False
html_use_modindex = True
html_copy_source = False
diff --git a/doc/source/index.rst b/doc/source/index.rst
index a753a21f1..1576d0672 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -11,6 +11,7 @@ NumPy documentation
User Guide <user/index>
API reference <reference/index>
Development <dev/index>
+ release
**Version**: |version|
@@ -35,7 +36,7 @@ basic statistical operations, random simulation and much more.
.. panels::
:card: + intro-card text-center
- :column: col-lg-6 col-md-6 col-sm-6 col-xs-12 p-2
+ :column: col-lg-6 col-md-6 col-sm-6 col-xs-12 d-flex
---
:img-top: ../source/_static/index-images/getting_started.svg
@@ -46,10 +47,12 @@ basic statistical operations, random simulation and much more.
New to NumPy? Check out the Absolute Beginner's Guide. It contains an
introduction to NumPy's main concepts and links to additional tutorials.
+ +++
+
.. link-button:: user/absolute_beginners
:type: ref
- :text:
- :classes: stretched-link
+ :text: To the absolute beginner's guide
+ :classes: btn-block btn-secondary stretched-link
---
:img-top: ../source/_static/index-images/user_guide.svg
@@ -60,10 +63,12 @@ basic statistical operations, random simulation and much more.
The user guide provides in-depth information on the
key concepts of NumPy with useful background information and explanation.
+ +++
+
.. link-button:: user
:type: ref
- :text:
- :classes: stretched-link
+ :text: To the user guide
+ :classes: btn-block btn-secondary stretched-link
---
:img-top: ../source/_static/index-images/api.svg
@@ -76,25 +81,29 @@ basic statistical operations, random simulation and much more.
methods work and which parameters can be used. It assumes that you have an
understanding of the key concepts.
+ +++
+
.. link-button:: reference
:type: ref
- :text:
- :classes: stretched-link
+ :text: To the reference guide
+ :classes: btn-block btn-secondary stretched-link
---
:img-top: ../source/_static/index-images/contributor.svg
Contributor's Guide
- ^^^^^^^^^^^^^^^
+ ^^^^^^^^^^^^^^^^^^^
Want to add to the codebase? Can help add translation or a flowchart to the
documentation? The contributing guidelines will guide you through the
process of improving NumPy.
+ +++
+
.. link-button:: devindex
:type: ref
- :text:
- :classes: stretched-link
+ :text: To the contributor's guide
+ :classes: btn-block btn-secondary stretched-link
.. This is not really the index page, that is found in
_templates/indexcontent.html The toctree content here will be added to the
diff --git a/doc/source/reference/c-api/array.rst b/doc/source/reference/c-api/array.rst
index d2e873802..f22b41a85 100644
--- a/doc/source/reference/c-api/array.rst
+++ b/doc/source/reference/c-api/array.rst
@@ -2176,8 +2176,8 @@ Array Functions
^^^^^^^^^^^^^^^
.. c:function:: int PyArray_AsCArray( \
- PyObject** op, void* ptr, npy_intp* dims, int nd, int typenum, \
- int itemsize)
+ PyObject** op, void* ptr, npy_intp* dims, int nd, \
+ PyArray_Descr* typedescr)
Sometimes it is useful to access a multidimensional array as a
C-style multi-dimensional array so that algorithms can be
@@ -2207,14 +2207,11 @@ Array Functions
The dimensionality of the array (1, 2, or 3).
- :param typenum:
+ :param typedescr:
- The expected data type of the array.
-
- :param itemsize:
-
- This argument is only needed when *typenum* represents a
- flexible array. Otherwise it should be 0.
+ A :c:type:`PyArray_Descr` structure indicating the desired data-type
+ (including required byteorder). The call will steal a reference to
+ the parameter.
.. note::
diff --git a/doc/source/reference/routines.ma.rst b/doc/source/reference/routines.ma.rst
index 5404c43d8..1de5c1c02 100644
--- a/doc/source/reference/routines.ma.rst
+++ b/doc/source/reference/routines.ma.rst
@@ -190,6 +190,7 @@ Finding masked data
.. autosummary::
:toctree: generated/
+ ma.ndenumerate
ma.flatnotmasked_contiguous
ma.flatnotmasked_edges
ma.notmasked_contiguous
diff --git a/doc/source/user/misc.rst b/doc/source/user/misc.rst
index ca8f078f6..9b6aa65e2 100644
--- a/doc/source/user/misc.rst
+++ b/doc/source/user/misc.rst
@@ -123,8 +123,6 @@ Only a survey of the choices. Little detail on how each works.
- getting it wrong leads to memory leaks, and worse, segfaults
- - API will change for Python 3.0!
-
2) Cython
- Plusses:
@@ -183,21 +181,7 @@ Only a survey of the choices. Little detail on how each works.
- doesn't necessarily avoid reference counting issues or needing to know
API's
-5) scipy.weave
-
- - Plusses:
-
- - can turn many numpy expressions into C code
- - dynamic compiling and loading of generated C code
- - can embed pure C code in Python module and have weave extract, generate
- interfaces and compile, etc.
-
- - Minuses:
-
- - Future very uncertain: it's the only part of Scipy not ported to Python 3
- and is effectively deprecated in favor of Cython.
-
-6) Psyco
+5) Psyco
- Plusses:
@@ -226,5 +210,3 @@ Interfacing to C++:
3) Boost.python
4) SWIG
5) SIP (used mainly in PyQT)
-
-
diff --git a/environment.yml b/environment.yml
index ded3b1a1d..e503e9990 100644
--- a/environment.yml
+++ b/environment.yml
@@ -19,7 +19,7 @@ dependencies:
- pytest-xdist
- hypothesis
# For type annotations
- - mypy=0.942
+ - mypy=0.950
# For building docs
- sphinx=4.5.0
- sphinx-panels
@@ -29,6 +29,7 @@ dependencies:
- pandas
- matplotlib
- pydata-sphinx-theme=0.8.1
+ - doxygen
# NOTE: breathe 4.33.0 collides with sphinx.ext.graphviz
- breathe!=4.33.0
# For linting
diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi
index f22692c02..a1115776e 100644
--- a/numpy/__init__.pyi
+++ b/numpy/__init__.pyi
@@ -1926,6 +1926,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]):
@overload
def __matmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
@overload
+ def __matmul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ...
+ @overload
def __matmul__(self: NDArray[object_], other: Any) -> Any: ...
@overload
def __matmul__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
@@ -1941,6 +1943,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]):
@overload
def __rmatmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
@overload
+ def __rmatmul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ...
+ @overload
def __rmatmul__(self: NDArray[object_], other: Any) -> Any: ...
@overload
def __rmatmul__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
@@ -2008,6 +2012,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]):
@overload
def __add__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc]
@overload
+ def __add__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ...
+ @overload
def __add__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... # type: ignore[misc]
@overload
def __add__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co) -> NDArray[datetime64]: ...
@@ -2029,6 +2035,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]):
@overload
def __radd__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc]
@overload
+ def __radd__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ...
+ @overload
def __radd__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... # type: ignore[misc]
@overload
def __radd__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co) -> NDArray[datetime64]: ...
@@ -2050,6 +2058,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]):
@overload
def __sub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc]
@overload
+ def __sub__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ...
+ @overload
def __sub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... # type: ignore[misc]
@overload
def __sub__(self: NDArray[datetime64], other: _ArrayLikeTD64_co) -> NDArray[datetime64]: ...
@@ -2071,6 +2081,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]):
@overload
def __rsub__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc]
@overload
+ def __rsub__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ...
+ @overload
def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ... # type: ignore[misc]
@overload
def __rsub__(self: _ArrayTD64_co, other: _ArrayLikeDT64_co) -> NDArray[datetime64]: ... # type: ignore[misc]
@@ -2092,6 +2104,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]):
@overload
def __mul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc]
@overload
+ def __mul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ...
+ @overload
def __mul__(self: _ArrayTD64_co, other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ...
@overload
def __mul__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ...
@@ -2111,6 +2125,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]):
@overload
def __rmul__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc]
@overload
+ def __rmul__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ...
+ @overload
def __rmul__(self: _ArrayTD64_co, other: _ArrayLikeFloat_co) -> NDArray[timedelta64]: ...
@overload
def __rmul__(self: _ArrayFloat_co, other: _ArrayLikeTD64_co) -> NDArray[timedelta64]: ...
@@ -2168,6 +2184,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]):
@overload
def __pow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
@overload
+ def __pow__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ...
+ @overload
def __pow__(self: NDArray[object_], other: Any) -> Any: ...
@overload
def __pow__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
@@ -2183,6 +2201,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]):
@overload
def __rpow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
@overload
+ def __rpow__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ...
+ @overload
def __rpow__(self: NDArray[object_], other: Any) -> Any: ...
@overload
def __rpow__(self: NDArray[Any], other: _ArrayLikeObject_co) -> Any: ...
@@ -2194,6 +2214,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]):
@overload
def __truediv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc]
@overload
+ def __truediv__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ...
+ @overload
def __truediv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[float64]: ...
@overload
def __truediv__(self: NDArray[timedelta64], other: _ArrayLikeBool_co) -> NoReturn: ...
@@ -2211,6 +2233,8 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]):
@overload
def __rtruediv__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ... # type: ignore[misc]
@overload
+ def __rtruediv__(self: NDArray[number[Any]], other: _ArrayLikeNumber_co) -> NDArray[number[Any]]: ...
+ @overload
def __rtruediv__(self: NDArray[timedelta64], other: _SupportsArray[_dtype[timedelta64]] | _NestedSequence[_SupportsArray[_dtype[timedelta64]]]) -> NDArray[float64]: ...
@overload
def __rtruediv__(self: NDArray[bool_], other: _ArrayLikeTD64_co) -> NoReturn: ...
@@ -4368,4 +4392,4 @@ class chararray(ndarray[_ShapeType, _CharDType]):
class _SupportsDLPack(Protocol[_T_contra]):
def __dlpack__(self, *, stream: None | _T_contra = ...) -> _PyCapsule: ...
-def from_dlpack(__obj: _SupportsDLPack[None]) -> NDArray[Any]: ...
+def from_dlpack(obj: _SupportsDLPack[None], /) -> NDArray[Any]: ...
diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py
index ea7b96fd4..fb9c30d93 100644
--- a/numpy/core/_add_newdocs.py
+++ b/numpy/core/_add_newdocs.py
@@ -1318,6 +1318,7 @@ add_newdoc('numpy.core.multiarray', 'fromstring',
text, the binary mode of `fromstring` will first encode it into
bytes using either utf-8 (python 3) or the default encoding
(python 2), neither of which produce sane results.
+
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
@@ -1398,6 +1399,11 @@ add_newdoc('numpy.core.multiarray', 'fromiter',
An iterable object providing data for the array.
dtype : data-type
The data-type of the returned array.
+
+ .. versionchanged:: 1.23
+ Object and subarray dtypes are now supported (note that the final
+ result is not 1-D for a subarray dtype).
+
count : int, optional
The number of items to read from *iterable*. The default is -1,
which means all data is read.
@@ -1421,6 +1427,18 @@ add_newdoc('numpy.core.multiarray', 'fromiter',
>>> np.fromiter(iterable, float)
array([ 0., 1., 4., 9., 16.])
+ A carefully constructed subarray dtype will lead to higher dimensional
+ results:
+
+ >>> iterable = ((x+1, x+2) for x in range(5))
+ >>> np.fromiter(iterable, dtype=np.dtype((int, 2)))
+ array([[1, 2],
+ [2, 3],
+ [3, 4],
+ [4, 5],
+ [5, 6]])
+
+
""".replace(
"${ARRAY_FUNCTION_LIKE}",
array_function_like_doc,
@@ -4857,6 +4875,15 @@ add_newdoc('numpy.core.multiarray', 'get_handler_version',
its memory, in which case you can traverse ``a.base`` for a memory handler.
""")
+add_newdoc('numpy.core.multiarray', '_get_madvise_hugepage',
+ """
+ _get_madvise_hugepage() -> bool
+
+ Get use of ``madvise (2)`` MADV_HUGEPAGE support when
+ allocating the array data. Returns the currently set value.
+ See `global_state` for more information.
+ """)
+
add_newdoc('numpy.core.multiarray', '_set_madvise_hugepage',
"""
_set_madvise_hugepage(enabled: bool) -> bool
diff --git a/numpy/core/_methods.py b/numpy/core/_methods.py
index a239e2c87..eda00147d 100644
--- a/numpy/core/_methods.py
+++ b/numpy/core/_methods.py
@@ -71,9 +71,10 @@ def _count_reduce_items(arr, axis, keepdims=False, where=True):
axis = tuple(range(arr.ndim))
elif not isinstance(axis, tuple):
axis = (axis,)
- items = nt.intp(1)
+ items = 1
for ax in axis:
items *= arr.shape[mu.normalize_axis_index(ax, arr.ndim)]
+ items = nt.intp(items)
else:
# TODO: Optimize case when `where` is broadcast along a non-reduction
# axis and full sum is more excessive than needed.
diff --git a/numpy/core/feature_detection_locale.h b/numpy/core/feature_detection_locale.h
index 0af1d6e7e..59e78936d 100644
--- a/numpy/core/feature_detection_locale.h
+++ b/numpy/core/feature_detection_locale.h
@@ -1 +1,2 @@
+#pragma GCC diagnostic ignored "-Wnonnull"
long double strtold_l(const char*, char**, locale_t);
diff --git a/numpy/core/include/numpy/npy_3kcompat.h b/numpy/core/include/numpy/npy_3kcompat.h
index 22c103e93..11cc47765 100644
--- a/numpy/core/include/numpy/npy_3kcompat.h
+++ b/numpy/core/include/numpy/npy_3kcompat.h
@@ -1,6 +1,8 @@
/*
* This is a convenience header file providing compatibility utilities
- * for supporting Python 2 and Python 3 in the same code base.
+ * for supporting different minor versions of Python 3.
+ * It was originally used to support the transition from Python 2,
+ * hence the "3k" naming.
*
* If you want to use this for your own projects, it's recommended to make a
* copy of it. Although the stuff below is unlikely to change, we don't provide
diff --git a/numpy/core/multiarray.py b/numpy/core/multiarray.py
index 1a37ed3e1..ee88ce30b 100644
--- a/numpy/core/multiarray.py
+++ b/numpy/core/multiarray.py
@@ -16,7 +16,7 @@ from ._multiarray_umath import * # noqa: F403
from ._multiarray_umath import (
_fastCopyAndTranspose, _flagdict, from_dlpack, _insert, _reconstruct,
_vec_string, _ARRAY_API, _monotonicity, _get_ndarray_c_version,
- _set_madvise_hugepage,
+ _get_madvise_hugepage, _set_madvise_hugepage,
)
__all__ = [
diff --git a/numpy/core/src/common/get_attr_string.h b/numpy/core/src/common/get_attr_string.h
index a3e5d5ec8..90eca5ee6 100644
--- a/numpy/core/src/common/get_attr_string.h
+++ b/numpy/core/src/common/get_attr_string.h
@@ -42,7 +42,7 @@ _is_basic_python_type(PyTypeObject *tp)
* on the type object, rather than on the instance itself.
*
* Assumes that the special method is a numpy-specific one, so does not look
- * at builtin types, nor does it look at a base ndarray.
+ * at builtin types. It does check base ndarray and numpy scalar types.
*
* In future, could be made more like _Py_LookupSpecial
*/
diff --git a/numpy/core/src/common/lowlevel_strided_loops.h b/numpy/core/src/common/lowlevel_strided_loops.h
index ad86c0489..118ce9cb1 100644
--- a/numpy/core/src/common/lowlevel_strided_loops.h
+++ b/numpy/core/src/common/lowlevel_strided_loops.h
@@ -692,6 +692,19 @@ PyArray_EQUIVALENTLY_ITERABLE_OVERLAP_OK(PyArrayObject *arr1, PyArrayObject *arr
return 1;
}
+ size1 = PyArray_SIZE(arr1);
+ stride1 = PyArray_TRIVIAL_PAIR_ITERATION_STRIDE(size1, arr1);
+
+ /*
+ * arr1 == arr2 is common for in-place operations, so we fast-path it here.
+ * TODO: The stride1 != 0 check rejects broadcast arrays. This may affect
+ * self-overlapping arrays, but seems only necessary due to
+ * `try_trivial_single_output_loop` not rejecting broadcast outputs.
+ */
+ if (arr1 == arr2 && stride1 != 0) {
+ return 1;
+ }
+
if (solve_may_share_memory(arr1, arr2, 1) == 0) {
return 1;
}
@@ -701,10 +714,7 @@ PyArray_EQUIVALENTLY_ITERABLE_OVERLAP_OK(PyArrayObject *arr1, PyArrayObject *arr
* arrays stride ahead faster than output arrays.
*/
- size1 = PyArray_SIZE(arr1);
size2 = PyArray_SIZE(arr2);
-
- stride1 = PyArray_TRIVIAL_PAIR_ITERATION_STRIDE(size1, arr1);
stride2 = PyArray_TRIVIAL_PAIR_ITERATION_STRIDE(size2, arr2);
/*
diff --git a/numpy/core/src/common/ufunc_override.c b/numpy/core/src/common/ufunc_override.c
index 2c3dc5cb3..4fb4d4b3e 100644
--- a/numpy/core/src/common/ufunc_override.c
+++ b/numpy/core/src/common/ufunc_override.c
@@ -5,6 +5,7 @@
#include "get_attr_string.h"
#include "npy_import.h"
#include "ufunc_override.h"
+#include "scalartypes.h"
/*
* Check whether an object has __array_ufunc__ defined on its class and it
@@ -30,6 +31,11 @@ PyUFuncOverride_GetNonDefaultArrayUfunc(PyObject *obj)
if (PyArray_CheckExact(obj)) {
return NULL;
}
+ /* Fast return for numpy scalar types */
+ if (is_anyscalar_exact(obj)) {
+ return NULL;
+ }
+
/*
* Does the class define __array_ufunc__? (Note that LookupSpecial has fast
* return for basic python types, so no need to worry about those here)
diff --git a/numpy/core/src/multiarray/alloc.c b/numpy/core/src/multiarray/alloc.c
index 759a02aeb..6f18054ff 100644
--- a/numpy/core/src/multiarray/alloc.c
+++ b/numpy/core/src/multiarray/alloc.c
@@ -39,6 +39,25 @@ static int _madvise_hugepage = 1;
/*
+ * This function tells whether NumPy attempts to call `madvise` with
+ * `MADV_HUGEPAGE`. `madvise` is only ever used on linux, so the value
+ * of `_madvise_hugepage` may be ignored.
+ *
+ * It is exposed to Python as `np.core.multiarray._get_madvise_hugepage`.
+ */
+NPY_NO_EXPORT PyObject *
+_get_madvise_hugepage(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args))
+{
+#ifdef NPY_OS_LINUX
+ if (_madvise_hugepage) {
+ Py_RETURN_TRUE;
+ }
+#endif
+ Py_RETURN_FALSE;
+}
+
+
+/*
* This function enables or disables the use of `MADV_HUGEPAGE` on Linux
* by modifying the global static `_madvise_hugepage`.
* It returns the previous value of `_madvise_hugepage`.
diff --git a/numpy/core/src/multiarray/alloc.h b/numpy/core/src/multiarray/alloc.h
index 13c828458..e82f2d947 100644
--- a/numpy/core/src/multiarray/alloc.h
+++ b/numpy/core/src/multiarray/alloc.h
@@ -8,6 +8,9 @@
#define NPY_TRACE_DOMAIN 389047
NPY_NO_EXPORT PyObject *
+_get_madvise_hugepage(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args));
+
+NPY_NO_EXPORT PyObject *
_set_madvise_hugepage(PyObject *NPY_UNUSED(self), PyObject *enabled_obj);
NPY_NO_EXPORT void *
diff --git a/numpy/core/src/multiarray/array_method.c b/numpy/core/src/multiarray/array_method.c
index b8a190f83..3450273b1 100644
--- a/numpy/core/src/multiarray/array_method.c
+++ b/numpy/core/src/multiarray/array_method.c
@@ -863,15 +863,17 @@ generic_masked_strided_loop(PyArrayMethod_Context *context,
/* Process unmasked values */
mask = npy_memchr(mask, 0, mask_stride, N, &subloopsize, 0);
- int res = strided_loop(context,
- dataptrs, &subloopsize, strides, strided_loop_auxdata);
- if (res != 0) {
- return res;
- }
- for (int i = 0; i < nargs; i++) {
- dataptrs[i] += subloopsize * strides[i];
+ if (subloopsize > 0) {
+ int res = strided_loop(context,
+ dataptrs, &subloopsize, strides, strided_loop_auxdata);
+ if (res != 0) {
+ return res;
+ }
+ for (int i = 0; i < nargs; i++) {
+ dataptrs[i] += subloopsize * strides[i];
+ }
+ N -= subloopsize;
}
- N -= subloopsize;
} while (N > 0);
return 0;
diff --git a/numpy/core/src/multiarray/arrayobject.c b/numpy/core/src/multiarray/arrayobject.c
index f696d8b76..a1f0e2d5b 100644
--- a/numpy/core/src/multiarray/arrayobject.c
+++ b/numpy/core/src/multiarray/arrayobject.c
@@ -468,10 +468,6 @@ array_dealloc(PyArrayObject *self)
free(fa->data);
}
else {
- /*
- * In theory `PyArray_NBYTES_ALLOCATED`, but differs somewhere?
- * So instead just use the knowledge that 0 is impossible.
- */
size_t nbytes = PyArray_NBYTES(self);
if (nbytes == 0) {
nbytes = 1;
diff --git a/numpy/core/src/multiarray/common.c b/numpy/core/src/multiarray/common.c
index 8264f83b2..aa612146c 100644
--- a/numpy/core/src/multiarray/common.c
+++ b/numpy/core/src/multiarray/common.c
@@ -127,23 +127,6 @@ PyArray_DTypeFromObject(PyObject *obj, int maxdims, PyArray_Descr **out_dtype)
return 0;
}
-NPY_NO_EXPORT char *
-index2ptr(PyArrayObject *mp, npy_intp i)
-{
- npy_intp dim0;
-
- if (PyArray_NDIM(mp) == 0) {
- PyErr_SetString(PyExc_IndexError, "0-d arrays can't be indexed");
- return NULL;
- }
- dim0 = PyArray_DIMS(mp)[0];
- if (check_and_adjust_index(&i, dim0, 0, NULL) < 0)
- return NULL;
- if (i == 0) {
- return PyArray_DATA(mp);
- }
- return PyArray_BYTES(mp)+i*PyArray_STRIDES(mp)[0];
-}
NPY_NO_EXPORT int
_zerofill(PyArrayObject *ret)
diff --git a/numpy/core/src/multiarray/common.h b/numpy/core/src/multiarray/common.h
index 85fd3aab1..a6c117745 100644
--- a/numpy/core/src/multiarray/common.h
+++ b/numpy/core/src/multiarray/common.h
@@ -43,9 +43,6 @@ NPY_NO_EXPORT int
PyArray_DTypeFromObject(PyObject *obj, int maxdims,
PyArray_Descr **out_dtype);
-NPY_NO_EXPORT int
-PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims,
- PyArray_Descr **out_dtype, int string_status);
/*
* Returns NULL without setting an exception if no scalar is matched, a
@@ -54,12 +51,6 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims,
NPY_NO_EXPORT PyArray_Descr *
_array_find_python_scalar_type(PyObject *op);
-NPY_NO_EXPORT PyArray_Descr *
-_array_typedescr_fromstr(char const *str);
-
-NPY_NO_EXPORT char *
-index2ptr(PyArrayObject *mp, npy_intp i);
-
NPY_NO_EXPORT int
_zerofill(PyArrayObject *ret);
@@ -248,6 +239,15 @@ npy_uint_alignment(int itemsize)
* compared to memchr it returns one stride past end instead of NULL if needle
* is not found.
*/
+#ifdef __clang__
+ /*
+ * The code below currently makes use of !NPY_ALIGNMENT_REQUIRED, which
+ * should be OK but causes the clang sanitizer to warn. It may make
+ * sense to modify the code to avoid this "unaligned" access but
+ * it would be good to carefully check the performance changes.
+ */
+ __attribute__((no_sanitize("alignment")))
+#endif
static NPY_INLINE char *
npy_memchr(char * haystack, char needle,
npy_intp stride, npy_intp size, npy_intp * psubloopsize, int invert)
@@ -292,35 +292,6 @@ npy_memchr(char * haystack, char needle,
return p;
}
-/*
- * Helper to work around issues with the allocation strategy currently
- * allocating not 1 byte for empty arrays, but enough for an array where
- * all 0 dimensions are replaced with size 1 (if the itemsize is not 0).
- *
- * This means that we can fill in nice (nonzero) strides and still handle
- * slicing direct math without being in danger of leaving the allocated byte
- * bounds.
- * In practice, that probably does not matter, but in principle this would be
- * undefined behaviour in C. Another solution may be to force the strides
- * to 0 in these cases. See also gh-15788.
- *
- * Unlike the code in `PyArray_NewFromDescr` does no overflow checks.
- */
-static NPY_INLINE npy_intp
-PyArray_NBYTES_ALLOCATED(PyArrayObject *arr)
-{
- if (PyArray_ITEMSIZE(arr) == 0) {
- return 1;
- }
- npy_intp nbytes = PyArray_ITEMSIZE(arr);
- for (int i = 0; i < PyArray_NDIM(arr); i++) {
- if (PyArray_DIMS(arr)[i] != 0) {
- nbytes *= PyArray_DIMS(arr)[i];
- }
- }
- return nbytes;
-}
-
/*
* Simple helper to create a tuple from an array of items. The `make_null_none`
diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c
index f72ba11cd..743d14558 100644
--- a/numpy/core/src/multiarray/ctors.c
+++ b/numpy/core/src/multiarray/ctors.c
@@ -758,19 +758,17 @@ PyArray_NewFromDescr_int(
/*
* Copy dimensions, check them, and find total array size `nbytes`
- *
- * Note that we ignore 0-length dimensions, to match this in the `free`
- * calls, `PyArray_NBYTES_ALLOCATED` is a private helper matching this
- * behaviour, but without overflow checking.
*/
+ int is_zero = 0;
for (int i = 0; i < nd; i++) {
fa->dimensions[i] = dims[i];
if (fa->dimensions[i] == 0) {
/*
- * Compare to PyArray_OverflowMultiplyList that
- * returns 0 in this case. See also `PyArray_NBYTES_ALLOCATED`.
+ * Continue calculating the max size "as if" this were 1
+ * to get the proper overflow error
*/
+ is_zero = 1;
continue;
}
@@ -791,6 +789,9 @@ PyArray_NewFromDescr_int(
goto fail;
}
}
+ if (is_zero) {
+ nbytes = 0;
+ }
/* Fill the strides (or copy them if they were passed in) */
if (strides == NULL) {
@@ -825,11 +826,13 @@ PyArray_NewFromDescr_int(
* Allocate something even for zero-space arrays
* e.g. shape=(0,) -- otherwise buffer exposure
* (a.data) doesn't work as it should.
- * Could probably just allocate a few bytes here. -- Chuck
- * Note: always sync this with calls to PyDataMem_UserFREE
*/
if (nbytes == 0) {
- nbytes = descr->elsize ? descr->elsize : 1;
+ nbytes = 1;
+ /* Make sure all the strides are 0 */
+ for (int i = 0; i < nd; i++) {
+ fa->strides[i] = 0;
+ }
}
/*
* It is bad to have uninitialized OBJECT pointers
@@ -3894,11 +3897,9 @@ PyArray_FromString(char *data, npy_intp slen, PyArray_Descr *dtype,
NPY_NO_EXPORT PyObject *
PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count)
{
- PyObject *value;
PyObject *iter = NULL;
PyArrayObject *ret = NULL;
npy_intp i, elsize, elcount;
- char *item, *new_data;
if (dtype == NULL) {
return NULL;
@@ -3910,6 +3911,7 @@ PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count)
}
if (PyDataType_ISUNSIZED(dtype)) {
+ /* If this error is removed, the `ret` allocation may need fixing */
PyErr_SetString(PyExc_ValueError,
"Must specify length when using variable-size data-type.");
goto done;
@@ -3927,38 +3929,43 @@ PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count)
elsize = dtype->elsize;
/*
- * We would need to alter the memory RENEW code to decrement any
- * reference counts before throwing away any memory.
+ * Note that PyArray_DESCR(ret) may not match dtype. There are exactly
+ * two cases where this can happen: empty strings/bytes/void (rejected
+ * above) and subarray dtypes (supported by sticking with `dtype`).
*/
- if (PyDataType_REFCHK(dtype)) {
- PyErr_SetString(PyExc_ValueError,
- "cannot create object arrays from iterator");
- goto done;
- }
-
+ Py_INCREF(dtype);
ret = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, dtype, 1,
&elcount, NULL,NULL, 0, NULL);
- dtype = NULL;
if (ret == NULL) {
goto done;
}
- for (i = 0; (i < count || count == -1) &&
- (value = PyIter_Next(iter)); i++) {
- if (i >= elcount && elsize != 0) {
+
+ char *item = PyArray_BYTES(ret);
+ for (i = 0; i < count || count == -1; i++, item += elsize) {
+ PyObject *value = PyIter_Next(iter);
+ if (value == NULL) {
+ if (PyErr_Occurred()) {
+ /* Fetching next item failed perhaps due to exhausting iterator */
+ goto done;
+ }
+ break;
+ }
+
+ if (NPY_UNLIKELY(i >= elcount) && elsize != 0) {
+ char *new_data = NULL;
npy_intp nbytes;
/*
Grow PyArray_DATA(ret):
this is similar for the strategy for PyListObject, but we use
50% overallocation => 0, 4, 8, 14, 23, 36, 56, 86 ...
+ TODO: The loadtxt code now uses a `growth` helper that would
+ be suitable to reuse here.
*/
elcount = (i >> 1) + (i < 4 ? 4 : 2) + i;
if (!npy_mul_with_overflow_intp(&nbytes, elcount, elsize)) {
/* The handler is always valid */
- new_data = PyDataMem_UserRENEW(PyArray_DATA(ret), nbytes,
- PyArray_HANDLER(ret));
- }
- else {
- new_data = NULL;
+ new_data = PyDataMem_UserRENEW(
+ PyArray_BYTES(ret), nbytes, PyArray_HANDLER(ret));
}
if (new_data == NULL) {
PyErr_SetString(PyExc_MemoryError,
@@ -3967,44 +3974,66 @@ PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count)
goto done;
}
((PyArrayObject_fields *)ret)->data = new_data;
+ /* resize array for cleanup: */
+ PyArray_DIMS(ret)[0] = elcount;
+ /* Reset `item` pointer to point into realloc'd chunk */
+ item = new_data + i * elsize;
+ if (PyDataType_FLAGCHK(dtype, NPY_NEEDS_INIT)) {
+ /* Initialize new chunk: */
+ memset(item, 0, nbytes - i * elsize);
+ }
}
- PyArray_DIMS(ret)[0] = i + 1;
- if (((item = index2ptr(ret, i)) == NULL) ||
- PyArray_SETITEM(ret, item, value) == -1) {
+ if (PyArray_Pack(dtype, item, value) < 0) {
Py_DECREF(value);
goto done;
}
Py_DECREF(value);
}
-
- if (PyErr_Occurred()) {
- goto done;
- }
if (i < count) {
- PyErr_SetString(PyExc_ValueError,
- "iterator too short");
+ PyErr_Format(PyExc_ValueError,
+ "iterator too short: Expected %zd but iterator had only %zd "
+ "items.", (Py_ssize_t)count, (Py_ssize_t)i);
goto done;
}
/*
- * Realloc the data so that don't keep extra memory tied up
- * (assuming realloc is reasonably good about reusing space...)
+ * Realloc the data so that don't keep extra memory tied up and fix
+ * the arrays first dimension (there could be more than one).
*/
if (i == 0 || elsize == 0) {
/* The size cannot be zero for realloc. */
- goto done;
}
- /* The handler is always valid */
- new_data = PyDataMem_UserRENEW(PyArray_DATA(ret), i * elsize,
- PyArray_HANDLER(ret));
- if (new_data == NULL) {
- PyErr_SetString(PyExc_MemoryError,
- "cannot allocate array memory");
- goto done;
+ else {
+ /* Resize array to actual final size (it may be too large) */
+ /* The handler is always valid */
+ char *new_data = PyDataMem_UserRENEW(
+ PyArray_DATA(ret), i * elsize, PyArray_HANDLER(ret));
+
+ if (new_data == NULL) {
+ PyErr_SetString(PyExc_MemoryError,
+ "cannot allocate array memory");
+ goto done;
+ }
+ ((PyArrayObject_fields *)ret)->data = new_data;
+
+ if (count < 0 || NPY_RELAXED_STRIDES_DEBUG) {
+ /*
+ * If the count was smaller than zero or NPY_RELAXED_STRIDES_DEBUG
+ * was active, the strides may be all 0 or intentionally mangled
+ * (even in the later dimensions for `count < 0`!
+ * Thus, fix all strides here again for C-contiguity.
+ */
+ int oflags;
+ _array_fill_strides(
+ PyArray_STRIDES(ret), PyArray_DIMS(ret), PyArray_NDIM(ret),
+ PyArray_ITEMSIZE(ret), NPY_ARRAY_C_CONTIGUOUS, &oflags);
+ PyArray_STRIDES(ret)[0] = elsize;
+ assert(oflags & NPY_ARRAY_C_CONTIGUOUS);
+ }
}
- ((PyArrayObject_fields *)ret)->data = new_data;
+ PyArray_DIMS(ret)[0] = i;
done:
Py_XDECREF(iter);
diff --git a/numpy/core/src/multiarray/getset.c b/numpy/core/src/multiarray/getset.c
index d6406845a..eb55e5e61 100644
--- a/numpy/core/src/multiarray/getset.c
+++ b/numpy/core/src/multiarray/getset.c
@@ -384,7 +384,10 @@ array_data_set(PyArrayObject *self, PyObject *op, void *NPY_UNUSED(ignored))
}
if (PyArray_FLAGS(self) & NPY_ARRAY_OWNDATA) {
PyArray_XDECREF(self);
- size_t nbytes = PyArray_NBYTES_ALLOCATED(self);
+ size_t nbytes = PyArray_NBYTES(self);
+ if (nbytes == 0) {
+ nbytes = 1;
+ }
PyObject *handler = PyArray_HANDLER(self);
if (handler == NULL) {
/* This can happen if someone arbitrarily sets NPY_ARRAY_OWNDATA */
@@ -513,6 +516,7 @@ array_descr_set(PyArrayObject *self, PyObject *arg, void *NPY_UNUSED(ignored))
/* resize on last axis only */
int axis = PyArray_NDIM(self) - 1;
if (PyArray_DIMS(self)[axis] != 1 &&
+ PyArray_SIZE(self) != 0 &&
PyArray_STRIDES(self)[axis] != PyArray_DESCR(self)->elsize) {
PyErr_SetString(PyExc_ValueError,
"To change to a dtype of a different size, the last axis "
diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c
index bd6318206..b738c1d44 100644
--- a/numpy/core/src/multiarray/methods.c
+++ b/numpy/core/src/multiarray/methods.c
@@ -2023,7 +2023,10 @@ array_setstate(PyArrayObject *self, PyObject *args)
* since fa could be a 0-d or scalar, and then
* PyDataMem_UserFREE will be confused
*/
- size_t n_tofree = PyArray_NBYTES_ALLOCATED(self);
+ size_t n_tofree = PyArray_NBYTES(self);
+ if (n_tofree == 0) {
+ n_tofree = 1;
+ }
Py_XDECREF(PyArray_DESCR(self));
fa->descr = typecode;
Py_INCREF(typecode);
@@ -2160,7 +2163,10 @@ array_setstate(PyArrayObject *self, PyObject *args)
/* Bytes should always be considered immutable, but we just grab the
* pointer if they are large, to save memory. */
if (!IsAligned(self) || swap || (len <= 1000)) {
- npy_intp num = PyArray_NBYTES_ALLOCATED(self);
+ npy_intp num = PyArray_NBYTES(self);
+ if (num == 0) {
+ num = 1;
+ }
/* Store the handler in case the default is modified */
Py_XDECREF(fa->mem_handler);
fa->mem_handler = PyDataMem_GetHandler();
@@ -2223,7 +2229,10 @@ array_setstate(PyArrayObject *self, PyObject *args)
}
}
else {
- npy_intp num = PyArray_NBYTES_ALLOCATED(self);
+ npy_intp num = PyArray_NBYTES(self);
+ if (num == 0) {
+ num = 1;
+ }
/* Store the functions in case the default handler is modified */
Py_XDECREF(fa->mem_handler);
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index 5e51bcaa6..ce47276db 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -4488,6 +4488,8 @@ static struct PyMethodDef array_module_methods[] = {
METH_VARARGS, NULL},
{"_get_sfloat_dtype",
get_sfloat_dtype, METH_NOARGS, NULL},
+ {"_get_madvise_hugepage", (PyCFunction)_get_madvise_hugepage,
+ METH_NOARGS, NULL},
{"_set_madvise_hugepage", (PyCFunction)_set_madvise_hugepage,
METH_O, NULL},
{"_reload_guard", (PyCFunction)_reload_guard,
diff --git a/numpy/core/src/umath/fast_loop_macros.h b/numpy/core/src/umath/fast_loop_macros.h
index 6132d0d71..cbd1f04aa 100644
--- a/numpy/core/src/umath/fast_loop_macros.h
+++ b/numpy/core/src/umath/fast_loop_macros.h
@@ -10,6 +10,8 @@
#ifndef _NPY_UMATH_FAST_LOOP_MACROS_H_
#define _NPY_UMATH_FAST_LOOP_MACROS_H_
+#include <assert.h>
+
/*
* MAX_STEP_SIZE is used to determine if we need to use SIMD version of the ufunc.
* Very large step size can be as slow as processing it using scalar. The
@@ -99,12 +101,19 @@ abs_ptrdiff(char *a, char *b)
#define IS_OUTPUT_CONT(tout) (steps[1] == sizeof(tout))
-#define IS_BINARY_REDUCE ((args[0] == args[2])\
+/*
+ * Make sure dimensions is non-zero with an assert, to allow subsequent code
+ * to ignore problems of accessing invalid memory
+ */
+
+#define IS_BINARY_REDUCE (assert(dimensions[0] != 0), \
+ (args[0] == args[2])\
&& (steps[0] == steps[2])\
&& (steps[0] == 0))
/* input contiguous (for binary reduces only) */
-#define IS_BINARY_REDUCE_INPUT_CONT(tin) (steps[1] == sizeof(tin))
+#define IS_BINARY_REDUCE_INPUT_CONT(tin) (assert(dimensions[0] != 0), \
+ steps[1] == sizeof(tin))
/* binary loop input and output contiguous */
#define IS_BINARY_CONT(tin, tout) (steps[0] == sizeof(tin) && \
diff --git a/numpy/core/src/umath/scalarmath.c.src b/numpy/core/src/umath/scalarmath.c.src
index f273cdd91..8fb219b63 100644
--- a/numpy/core/src/umath/scalarmath.c.src
+++ b/numpy/core/src/umath/scalarmath.c.src
@@ -747,13 +747,23 @@ static NPY_INLINE int
/*
* Enum used to describe the space of possibilities when converting the second
* argument to a binary operation.
+ * Any of these flags may be combined with the return flag of
+ * `may_need_deferring` indicating that the other is any type of object which
+ * may e.g. define an `__array_priority__`.
*/
typedef enum {
/* An error occurred (should not really happen/be possible) */
CONVERSION_ERROR = -1,
/* A known NumPy scalar, but of higher precision: we defer */
DEFER_TO_OTHER_KNOWN_SCALAR,
- /* Conversion was successful (known scalar of less precision) */
+ /*
+ * Conversion was successful (known scalar of less precision). Note that
+ * the other value may still be a subclass of such a scalar so even here
+ * we may have to check for deferring.
+ * More specialized subclass handling, which defers based on whether the
+ * subclass has an implementation, plausible but complicated.
+ * We do not do it, as even CPython does not do it for the builtin `int`.
+ */
CONVERSION_SUCCESS,
/*
* Other object is an unkown scalar or array-like, we (typically) use
@@ -764,11 +774,6 @@ typedef enum {
* Promotion necessary
*/
PROMOTION_REQUIRED,
- /*
- * The other object may be a subclass, conversion is successful. We do
- * not special case this as Python's `int` does not either
- */
- OTHER_IS_SUBCLASS,
} conversion_result;
/**begin repeat
@@ -817,7 +822,6 @@ typedef enum {
#define GET_VALUE_OR_DEFER(OTHER, Other, value) \
case NPY_##OTHER: \
if (IS_SAFE(NPY_##OTHER, NPY_@TYPE@)) { \
- assert(Py_TYPE(value) == &Py##Other##ArrType_Type); \
CONVERT_TO_RESULT(PyArrayScalar_VAL(value, Other)); \
ret = CONVERSION_SUCCESS; \
} \
@@ -877,12 +881,20 @@ typedef enum {
*
* @param value The value to convert (if compatible)
* @param result The result value (output)
+ * @param may_need_deferring Set to `NPY_TRUE` when the caller must check
+ * `BINOP_GIVE_UP_IF_NEEDED` (or similar) due to possible implementation
+ * of `__array_priority__` (or similar).
+ * This is set for unknown objects and all subclasses even when they
+ * can be handled.
* @result The result value indicating what we did with `value` or what type
* of object it is (see `conversion_result`).
*/
static NPY_INLINE conversion_result
-convert_to_@name@(PyObject *value, @type@ *result)
+convert_to_@name@(PyObject *value, @type@ *result, npy_bool *may_need_deferring)
{
+ PyArray_Descr *descr;
+ *may_need_deferring = NPY_FALSE;
+
if (Py_TYPE(value) == &Py@Name@ArrType_Type) {
*result = PyArrayScalar_VAL(value, @Name@);
return CONVERSION_SUCCESS;
@@ -892,9 +904,11 @@ convert_to_@name@(PyObject *value, @type@ *result)
*result = PyArrayScalar_VAL(value, @Name@);
/*
* In principle special, assyemetric, handling could be possible for
- * subclasses. But in practice even we do not bother later.
+ * explicit subclasses.
+ * In practice, we just check the normal deferring logic.
*/
- return OTHER_IS_SUBCLASS;
+ *may_need_deferring = NPY_TRUE;
+ return CONVERSION_SUCCESS;
}
/*
@@ -906,12 +920,33 @@ convert_to_@name@(PyObject *value, @type@ *result)
return CONVERSION_SUCCESS;
}
- if (IS_SAFE(NPY_DOUBLE, NPY_@TYPE@) && PyFloat_CheckExact(value)) {
+ if (PyFloat_Check(value)) {
+ if (!PyFloat_CheckExact(value)) {
+ /* A NumPy double is a float subclass, but special. */
+ if (PyArray_IsScalar(value, Double)) {
+ descr = PyArray_DescrFromType(NPY_DOUBLE);
+ goto numpy_scalar;
+ }
+ *may_need_deferring = NPY_TRUE;
+ }
+ if (!IS_SAFE(NPY_DOUBLE, NPY_@TYPE@)) {
+ return PROMOTION_REQUIRED;
+ }
CONVERT_TO_RESULT(PyFloat_AS_DOUBLE(value));
return CONVERSION_SUCCESS;
}
- if (IS_SAFE(NPY_LONG, NPY_@TYPE@) && PyLong_CheckExact(value)) {
+ if (PyLong_Check(value)) {
+ if (!PyLong_CheckExact(value)) {
+ *may_need_deferring = NPY_TRUE;
+ }
+ if (!IS_SAFE(NPY_LONG, NPY_@TYPE@)) {
+ /*
+ * long -> (c)longdouble is safe, so `THER_IS_UNKNOWN_OBJECT` will
+ * be returned below for huge integers.
+ */
+ return PROMOTION_REQUIRED;
+ }
int overflow;
long val = PyLong_AsLongAndOverflow(value, &overflow);
if (overflow) {
@@ -924,8 +959,19 @@ convert_to_@name@(PyObject *value, @type@ *result)
return CONVERSION_SUCCESS;
}
+ if (PyComplex_Check(value)) {
+ if (!PyComplex_CheckExact(value)) {
+ /* A NumPy complex double is a float subclass, but special. */
+ if (PyArray_IsScalar(value, CDouble)) {
+ descr = PyArray_DescrFromType(NPY_CDOUBLE);
+ goto numpy_scalar;
+ }
+ *may_need_deferring = NPY_TRUE;
+ }
+ if (!IS_SAFE(NPY_CDOUBLE, NPY_@TYPE@)) {
+ return PROMOTION_REQUIRED;
+ }
#if defined(IS_CFLOAT) || defined(IS_CDOUBLE) || defined(IS_CLONGDOUBLE)
- if (IS_SAFE(NPY_CDOUBLE, NPY_@TYPE@) && PyComplex_CheckExact(value)) {
Py_complex val = PyComplex_AsCComplex(value);
if (error_converting(val.real)) {
return CONVERSION_ERROR; /* should not be possible */
@@ -933,16 +979,24 @@ convert_to_@name@(PyObject *value, @type@ *result)
result->real = val.real;
result->imag = val.imag;
return CONVERSION_SUCCESS;
- }
-#endif /* defined(IS_CFLOAT) || ... */
-
- PyObject *dtype = PyArray_DiscoverDTypeFromScalarType(Py_TYPE(value));
- if (dtype == Py_None) {
- Py_DECREF(dtype);
- /* Signal that this is an array or array-like: Defer to array logic */
+#else
+ /* unreachable, always unsafe cast above; return to avoid warning */
+ assert(0);
return OTHER_IS_UNKNOWN_OBJECT;
+#endif /* defined(IS_CFLOAT) || ... */
}
- else if (dtype == NULL) {
+
+ /*
+ * (seberg) It would be nice to use `PyArray_DiscoverDTypeFromScalarType`
+ * from array coercion here. OTOH, the array coercion code also falls
+ * back to this code. The issue is around how subclasses should work...
+ *
+ * It would be nice to try to fully align the paths again (they effectively
+ * are equivalent). Proper support for subclasses is in general tricky,
+ * and it would make more sense to just _refuse_ to support them.
+ * However, it is unclear that this is a viable option...
+ */
+ if (!PyArray_IsScalar(value, Generic)) {
/*
* The input is an unknown python object. This should probably defer
* but only does so for float128.
@@ -951,9 +1005,31 @@ convert_to_@name@(PyObject *value, @type@ *result)
* scalar to a Python scalar and then try again.
* The logic is that the ufunc casts the input to object, which does
* the conversion.
+ * If the object is an array, deferring will always kick in.
*/
+ *may_need_deferring = NPY_TRUE;
return OTHER_IS_UNKNOWN_OBJECT;
}
+
+ descr = PyArray_DescrFromScalar(value);
+ if (descr == NULL) {
+ if (PyErr_Occurred()) {
+ return CONVERSION_ERROR;
+ }
+ /* Should not happen, but may be possible with bad user subclasses */
+ *may_need_deferring = NPY_TRUE;
+ return OTHER_IS_UNKNOWN_OBJECT;
+ }
+
+ numpy_scalar:
+ if (descr->typeobj != Py_TYPE(value)) {
+ /*
+ * This is a subclass of a builtin type, we may continue normally,
+ * but should check whether we need to defer.
+ */
+ *may_need_deferring = NPY_TRUE;
+ }
+
/*
* Otherwise, we have a clear NumPy scalar, find if it is a compatible
* builtin scalar.
@@ -967,7 +1043,7 @@ convert_to_@name@(PyObject *value, @type@ *result)
* since it would disable `np.float64(1.) * [1, 2, 3, 4]`.
*/
int ret; /* set by the GET_VALUE_OR_DEFER macro */
- switch (((PyArray_DTypeMeta *)dtype)->type_num) {
+ switch (descr->type_num) {
GET_VALUE_OR_DEFER(BOOL, Bool, value);
/* UInts */
GET_VALUE_OR_DEFER(UBYTE, UByte, value);
@@ -984,9 +1060,8 @@ convert_to_@name@(PyObject *value, @type@ *result)
/* Floats */
case NPY_HALF:
if (IS_SAFE(NPY_HALF, NPY_@TYPE@)) {
- assert(Py_TYPE(value) == &PyHalfArrType_Type);
CONVERT_TO_RESULT(npy_half_to_float(PyArrayScalar_VAL(value, Half)));
- ret = 1;
+ ret = CONVERSION_SUCCESS;
}
else if (IS_SAFE(NPY_@TYPE@, NPY_HALF)) {
ret = DEFER_TO_OTHER_KNOWN_SCALAR;
@@ -1012,9 +1087,10 @@ convert_to_@name@(PyObject *value, @type@ *result)
* defer (which would be much faster potentially).
* TODO: We could add a DType flag to allow opting in to deferring!
*/
+ *may_need_deferring = NPY_TRUE;
ret = OTHER_IS_UNKNOWN_OBJECT;
}
- Py_DECREF(dtype);
+ Py_DECREF(descr);
return ret;
}
@@ -1079,12 +1155,21 @@ static PyObject *
/*
* Check if this operation may be considered forward. Note `is_forward`
- * does not imply that we can defer to a subclass `b`, we need to check
- * `BINOP_IS_FORWARD` for that (it takes into account that both may be
- * identicalclass).
+ * does not imply that we can defer to a subclass `b`. It just means that
+ * the first operand fits to the method.
*/
- int is_forward = (Py_TYPE(a)->tp_as_number != NULL
- && (void *)(Py_TYPE(a)->tp_as_number->nb_@oper@) == (void*)(@name@_@oper@));
+ int is_forward;
+ if (Py_TYPE(a) == &Py@Name@ArrType_Type) {
+ is_forward = 1;
+ }
+ else if (Py_TYPE(b) == &Py@Name@ArrType_Type) {
+ is_forward = 0;
+ }
+ else {
+ /* subclasses are involved */
+ is_forward = PyArray_IsScalar(a, @Name@);
+ assert(is_forward || PyArray_IsScalar(b, @Name@));
+ }
/*
* Extract the other value (if it is compatible). Otherwise, decide
@@ -1094,10 +1179,16 @@ static PyObject *
*/
PyObject *other = is_forward ? b : a;
- conversion_result res = convert_to_@name@(other, &other_val);
+ npy_bool may_need_deferring;
+ conversion_result res = convert_to_@name@(
+ other, &other_val, &may_need_deferring);
+ if (res == CONVERSION_ERROR) {
+ return NULL; /* an error occurred (should never happen) */
+ }
+ if (may_need_deferring) {
+ BINOP_GIVE_UP_IF_NEEDED(a, b, nb_@oper@, @name@_@oper@);
+ }
switch (res) {
- case CONVERSION_ERROR:
- return NULL; /* an error occurred (should never happen) */
case DEFER_TO_OTHER_KNOWN_SCALAR:
/*
* defer to other; This is normally a forward operation. However,
@@ -1109,26 +1200,30 @@ static PyObject *
case CONVERSION_SUCCESS:
break; /* successfully extracted value we can proceed */
case OTHER_IS_UNKNOWN_OBJECT:
+ /*
+ * Either an array-like, unknown scalar (any Python object, but
+ * also integers that are too large to convert to `long`), or
+ * even a subclass of a NumPy scalar (currently).
+ *
+ * Generally, we try dropping through to the array path here,
+ * but this can lead to infinite recursions for (c)longdouble.
+ */
#if defined(IS_longdouble) || defined(IS_clongdouble)
Py_RETURN_NOTIMPLEMENTED;
#endif
- BINOP_GIVE_UP_IF_NEEDED(a, b, nb_@oper@, @name@_@oper@);
case PROMOTION_REQUIRED:
/*
- * Either an array-like, unknown scalar or we need to promote.
+ * Python scalar that is larger than the current one, or two
+ * NumPy scalars that promote to a third (uint16 + int16 -> int32).
*
* TODO: We could special case the promotion case here for much
* better speed and to deal with integer overflow warnings
* correctly. (e.g. `uint8 * int8` cannot warn).
*/
return PyGenericArrType_Type.tp_as_number->nb_@oper@(a,b);
- case OTHER_IS_SUBCLASS:
- /*
- * Success converting. We _could_ in principle defer in cases
- * were the other subclass does not inherit the behavior. In
- * practice not even Python's `int` attempt this, so we also punt.
- */
- break;
+ default:
+ assert(0); /* error was checked already, impossible to reach */
+ return NULL;
}
#if @fperr@
@@ -1152,8 +1247,6 @@ static PyObject *
PyObject *obj;
#endif
-
-
/*
* here we do the actual calculation with arg1 and arg2
* as a function call.
@@ -1262,19 +1355,33 @@ static PyObject *
PyObject *ret;
@type@ arg1, arg2, other_val;
- int is_forward = (Py_TYPE(a)->tp_as_number != NULL
- && (void *)(Py_TYPE(a)->tp_as_number->nb_power) == (void*)(@name@_power));
-
+ int is_forward;
+ if (Py_TYPE(a) == &Py@Name@ArrType_Type) {
+ is_forward = 1;
+ }
+ else if (Py_TYPE(b) == &Py@Name@ArrType_Type) {
+ is_forward = 0;
+ }
+ else {
+ /* subclasses are involved */
+ is_forward = PyArray_IsScalar(a, @Name@);
+ assert(is_forward || PyArray_IsScalar(b, @Name@));
+ }
/*
* Extract the other value (if it is compatible). See the generic
* (non power) version above for detailed notes.
*/
PyObject *other = is_forward ? b : a;
- int res = convert_to_@name@(other, &other_val);
+ npy_bool may_need_deferring;
+ int res = convert_to_@name@(other, &other_val, &may_need_deferring);
+ if (res == CONVERSION_ERROR) {
+ return NULL; /* an error occurred (should never happen) */
+ }
+ if (may_need_deferring) {
+ BINOP_GIVE_UP_IF_NEEDED(a, b, nb_power, @name@_power);
+ }
switch (res) {
- case CONVERSION_ERROR:
- return NULL; /* an error occurred (should never happen) */
case DEFER_TO_OTHER_KNOWN_SCALAR:
Py_RETURN_NOTIMPLEMENTED;
case CONVERSION_SUCCESS:
@@ -1283,16 +1390,11 @@ static PyObject *
#if defined(IS_longdouble) || defined(IS_clongdouble)
Py_RETURN_NOTIMPLEMENTED;
#endif
- BINOP_GIVE_UP_IF_NEEDED(a, b, nb_power, @name@_power);
case PROMOTION_REQUIRED:
return PyGenericArrType_Type.tp_as_number->nb_power(a, b, modulo);
- case OTHER_IS_SUBCLASS:
- /*
- * Success converting. We _could_ in principle defer in cases
- * were the other subclass does not inherit the behavior. In
- * practice not even Python's `int` attempt this, so we also punt.
- */
- break;
+ default:
+ assert(0); /* error was checked already, impossible to reach */
+ return NULL;
}
#if !@isint@
@@ -1609,6 +1711,12 @@ static PyObject *
}
/**end repeat**/
+#if __GNUC__ < 10
+ /* At least GCC 9.2 issues spurious warnings for arg2 below. */
+ #pragma GCC diagnostic push /* matching pop after function and repeat */
+ #pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
+#endif
+
/**begin repeat
* #oper = le, ge, lt, gt, eq, ne#
* #op = <=, >=, <, >, ==, !=#
@@ -1644,10 +1752,15 @@ static PyObject*
/*
* Extract the other value (if it is compatible).
*/
- int res = convert_to_@name@(other, &arg2);
+ npy_bool may_need_deferring;
+ int res = convert_to_@name@(other, &arg2, &may_need_deferring);
+ if (res == CONVERSION_ERROR) {
+ return NULL; /* an error occurred (should never happen) */
+ }
+ if (may_need_deferring) {
+ RICHCMP_GIVE_UP_IF_NEEDED(self, other);
+ }
switch (res) {
- case CONVERSION_ERROR:
- return NULL; /* an error occurred (should never happen) */
case DEFER_TO_OTHER_KNOWN_SCALAR:
Py_RETURN_NOTIMPLEMENTED;
case CONVERSION_SUCCESS:
@@ -1656,17 +1769,11 @@ static PyObject*
#if defined(IS_longdouble) || defined(IS_clongdouble)
Py_RETURN_NOTIMPLEMENTED;
#endif
- RICHCMP_GIVE_UP_IF_NEEDED(self, other);
case PROMOTION_REQUIRED:
return PyGenericArrType_Type.tp_richcompare(self, other, cmp_op);
- case OTHER_IS_SUBCLASS:
- /*
- * Success converting. We _could_ in principle defer in cases
- * were the other subclass does not inherit the behavior. In
- * practice not even Python's `int` attempt this, so we also punt.
- * (This is also even trickier for richcompare, though.)
- */
- break;
+ default:
+ assert(0); /* error was checked already, impossible to reach */
+ return NULL;
}
arg1 = PyArrayScalar_VAL(self, @Name@);
@@ -1704,6 +1811,11 @@ static PyObject*
#undef IS_@name@
/**end repeat**/
+#if __GNUC__ < 10
+ #pragma GCC diagnostic pop
+#endif
+
+
/**begin repeat
* #name = byte, ubyte, short, ushort, int, uint,
* long, ulong, longlong, ulonglong,
diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c
index 007f1bc53..290ed24a6 100644
--- a/numpy/core/src/umath/ufunc_object.c
+++ b/numpy/core/src/umath/ufunc_object.c
@@ -1206,6 +1206,7 @@ prepare_ufunc_output(PyUFuncObject *ufunc,
* cannot broadcast any other array (as it requires a single stride).
* The function accepts all 1-D arrays, and N-D arrays that are either all
* C- or all F-contiguous.
+ * NOTE: Broadcast outputs are implicitly rejected in the overlap detection.
*
* Returns -2 if a trivial loop is not possible, 0 on success and -1 on error.
*/
@@ -1321,6 +1322,10 @@ try_trivial_single_output_loop(PyArrayMethod_Context *context,
*/
char *data[NPY_MAXARGS];
npy_intp count = PyArray_MultiplyList(operation_shape, operation_ndim);
+ if (count == 0) {
+ /* Nothing to do */
+ return 0;
+ }
NPY_BEGIN_THREADS_DEF;
PyArrayMethod_StridedLoop *strided_loop;
@@ -2819,7 +2824,7 @@ reduce_loop(PyArrayMethod_Context *context,
npy_intp const *countptr, NpyIter_IterNextFunc *iternext,
int needs_api, npy_intp skip_first_count)
{
- int retval;
+ int retval = 0;
char *dataptrs_copy[4];
npy_intp strides_copy[4];
npy_bool masked;
@@ -2849,19 +2854,20 @@ reduce_loop(PyArrayMethod_Context *context,
count = 0;
}
}
-
- /* Turn the two items into three for the inner loop */
- dataptrs_copy[0] = dataptrs[0];
- dataptrs_copy[1] = dataptrs[1];
- dataptrs_copy[2] = dataptrs[0];
- strides_copy[0] = strides[0];
- strides_copy[1] = strides[1];
- strides_copy[2] = strides[0];
-
- retval = strided_loop(context,
- dataptrs_copy, &count, strides_copy, auxdata);
- if (retval < 0) {
- goto finish_loop;
+ if (count > 0) {
+ /* Turn the two items into three for the inner loop */
+ dataptrs_copy[0] = dataptrs[0];
+ dataptrs_copy[1] = dataptrs[1];
+ dataptrs_copy[2] = dataptrs[0];
+ strides_copy[0] = strides[0];
+ strides_copy[1] = strides[1];
+ strides_copy[2] = strides[0];
+
+ retval = strided_loop(context,
+ dataptrs_copy, &count, strides_copy, auxdata);
+ if (retval < 0) {
+ goto finish_loop;
+ }
}
/* Advance loop, and abort on error (or finish) */
diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py
index 2c7f0826e..356b53df9 100644
--- a/numpy/core/tests/test_dtype.py
+++ b/numpy/core/tests/test_dtype.py
@@ -1064,6 +1064,12 @@ class TestDtypeAttributes:
pass
assert_equal(np.dtype(user_def_subcls).name, 'user_def_subcls')
+ def test_zero_stride(self):
+ arr = np.ones(1, dtype="i8")
+ arr = np.broadcast_to(arr, 10)
+ assert arr.strides == (0,)
+ with pytest.raises(ValueError):
+ arr.dtype = "i1"
class TestDTypeMakeCanonical:
def check_canonical(self, dtype, canonical):
diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py
index 173927810..0b03c6576 100644
--- a/numpy/core/tests/test_numeric.py
+++ b/numpy/core/tests/test_numeric.py
@@ -1200,19 +1200,76 @@ class TestFromiter:
raise NIterError('error at index %s' % eindex)
yield e
- def test_2592(self):
- # Test iteration exceptions are correctly raised.
- count, eindex = 10, 5
- assert_raises(NIterError, np.fromiter,
- self.load_data(count, eindex), dtype=int, count=count)
-
- def test_2592_edge(self):
- # Test iter. exceptions, edge case (exception at end of iterator).
- count = 10
- eindex = count-1
- assert_raises(NIterError, np.fromiter,
- self.load_data(count, eindex), dtype=int, count=count)
+ @pytest.mark.parametrize("dtype", [int, object])
+ @pytest.mark.parametrize(["count", "error_index"], [(10, 5), (10, 9)])
+ def test_2592(self, count, error_index, dtype):
+ # Test iteration exceptions are correctly raised. The data/generator
+ # has `count` elements but errors at `error_index`
+ iterable = self.load_data(count, error_index)
+ with pytest.raises(NIterError):
+ np.fromiter(iterable, dtype=dtype, count=count)
+
+ @pytest.mark.parametrize("dtype", ["S", "S0", "V0", "U0"])
+ def test_empty_not_structured(self, dtype):
+ # Note, "S0" could be allowed at some point, so long "S" (without
+ # any length) is rejected.
+ with pytest.raises(ValueError, match="Must specify length"):
+ np.fromiter([], dtype=dtype)
+
+ @pytest.mark.parametrize(["dtype", "data"],
+ [("d", [1, 2, 3, 4, 5, 6, 7, 8, 9]),
+ ("O", [1, 2, 3, 4, 5, 6, 7, 8, 9]),
+ ("i,O", [(1, 2), (5, 4), (2, 3), (9, 8), (6, 7)]),
+ # subarray dtypes (important because their dimensions end up
+ # in the result arrays dimension:
+ ("2i", [(1, 2), (5, 4), (2, 3), (9, 8), (6, 7)]),
+ (np.dtype(("O", (2, 3))),
+ [((1, 2, 3), (3, 4, 5)), ((3, 2, 1), (5, 4, 3))])])
+ @pytest.mark.parametrize("length_hint", [0, 1])
+ def test_growth_and_complicated_dtypes(self, dtype, data, length_hint):
+ dtype = np.dtype(dtype)
+
+ data = data * 100 # make sure we realloc a bit
+
+ class MyIter:
+ # Class/example from gh-15789
+ def __length_hint__(self):
+ # only required to be an estimate, this is legal
+ return length_hint # 0 or 1
+
+ def __iter__(self):
+ return iter(data)
+
+ res = np.fromiter(MyIter(), dtype=dtype)
+ expected = np.array(data, dtype=dtype)
+
+ assert_array_equal(res, expected)
+
+ def test_empty_result(self):
+ class MyIter:
+ def __length_hint__(self):
+ return 10
+
+ def __iter__(self):
+ return iter([]) # actual iterator is empty.
+
+ res = np.fromiter(MyIter(), dtype="d")
+ assert res.shape == (0,)
+ assert res.dtype == "d"
+
+ def test_too_few_items(self):
+ msg = "iterator too short: Expected 10 but iterator had only 3 items."
+ with pytest.raises(ValueError, match=msg):
+ np.fromiter([1, 2, 3], count=10, dtype=int)
+
+ def test_failed_itemsetting(self):
+ with pytest.raises(TypeError):
+ np.fromiter([1, None, 3], dtype=int)
+ # The following manages to hit somewhat trickier code paths:
+ iterable = ((2, 3, 4) for i in range(5))
+ with pytest.raises(ValueError):
+ np.fromiter(iterable, dtype=np.dtype((int, 2)))
class TestNonzero:
def test_nonzero_trivial(self):
diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py
index 0ebdfec61..e578491b3 100644
--- a/numpy/core/tests/test_regression.py
+++ b/numpy/core/tests/test_regression.py
@@ -430,7 +430,6 @@ class TestRegression:
def test_lexsort_zerolen_custom_strides(self):
# Ticket #14228
xs = np.array([], dtype='i8')
- assert xs.strides == (8,)
assert np.lexsort((xs,)).shape[0] == 0 # Works
xs.strides = (16,)
diff --git a/numpy/core/tests/test_scalarmath.py b/numpy/core/tests/test_scalarmath.py
index a2b801760..b7fe5183e 100644
--- a/numpy/core/tests/test_scalarmath.py
+++ b/numpy/core/tests/test_scalarmath.py
@@ -967,9 +967,6 @@ def test_subclass_deferral(sctype, __op__, __rop__, op, cmp):
class myf_simple2(sctype):
pass
- def defer(self, other):
- return NotImplemented
-
def op_func(self, other):
return __op__
@@ -989,18 +986,55 @@ def test_subclass_deferral(sctype, __op__, __rop__, op, cmp):
assert op(myf_simple1(1), myf_op(2)) == op(1, 2) # inherited
+def test_longdouble_complex():
+ # Simple test to check longdouble and complex combinations, since these
+ # need to go through promotion, which longdouble needs to be careful about.
+ x = np.longdouble(1)
+ assert x + 1j == 1+1j
+ assert 1j + x == 1+1j
+
+
@pytest.mark.parametrize(["__op__", "__rop__", "op", "cmp"], ops_with_names)
-@pytest.mark.parametrize("pytype", [float, int, complex])
-def test_pyscalar_subclasses(pytype, __op__, __rop__, op, cmp):
+@pytest.mark.parametrize("subtype", [float, int, complex, np.float16])
+def test_pyscalar_subclasses(subtype, __op__, __rop__, op, cmp):
def op_func(self, other):
return __op__
def rop_func(self, other):
return __rop__
- myf = type("myf", (pytype,),
- {__op__: op_func, __rop__: rop_func, "__array_ufunc__": None})
+ # Check that deferring is indicated using `__array_ufunc__`:
+ myt = type("myt", (subtype,),
+ {__op__: op_func, __rop__: rop_func, "__array_ufunc__": None})
# Just like normally, we should never presume we can modify the float.
- assert op(myf(1), np.float64(2)) == __op__
- assert op(np.float64(1), myf(2)) == __rop__
+ assert op(myt(1), np.float64(2)) == __op__
+ assert op(np.float64(1), myt(2)) == __rop__
+
+ if op in {operator.mod, operator.floordiv} and subtype == complex:
+ return # module is not support for complex. Do not test.
+
+ if __rop__ == __op__:
+ return
+
+ # When no deferring is indicated, subclasses are handled normally.
+ myt = type("myt", (subtype,), {__rop__: rop_func})
+
+ # Check for float32, as a float subclass float64 may behave differently
+ res = op(myt(1), np.float16(2))
+ expected = op(subtype(1), np.float16(2))
+ assert res == expected
+ assert type(res) == type(expected)
+ res = op(np.float32(2), myt(1))
+ expected = op(np.float32(2), subtype(1))
+ assert res == expected
+ assert type(res) == type(expected)
+
+ # Same check for longdouble:
+ res = op(myt(1), np.longdouble(2))
+ expected = op(subtype(1), np.longdouble(2))
+ assert res == expected
+ assert type(res) == type(expected)
+ res = op(np.float32(2), myt(1))
+ expected = op(np.longdouble(2), subtype(1))
+ assert res == expected
diff --git a/numpy/distutils/mingw32ccompiler.py b/numpy/distutils/mingw32ccompiler.py
index d48e27966..3349a56e8 100644
--- a/numpy/distutils/mingw32ccompiler.py
+++ b/numpy/distutils/mingw32ccompiler.py
@@ -37,9 +37,6 @@ def get_msvcr_replacement():
msvcr = msvc_runtime_library()
return [] if msvcr is None else [msvcr]
-# monkey-patch cygwinccompiler with our updated version from misc_util
-# to avoid getting an exception raised on Python 3.5
-distutils.cygwinccompiler.get_msvcr = get_msvcr_replacement
# Useful to generate table of symbols from a dll
_START = re.compile(r'\[Ordinal/Name Pointer\] Table')
diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py
index 513be75db..0bee5a8ec 100644
--- a/numpy/distutils/misc_util.py
+++ b/numpy/distutils/misc_util.py
@@ -699,10 +699,7 @@ def get_shared_lib_extension(is_python_ext=False):
"""
confvars = distutils.sysconfig.get_config_vars()
- # SO is deprecated in 3.3.1, use EXT_SUFFIX instead
- so_ext = confvars.get('EXT_SUFFIX', None)
- if so_ext is None:
- so_ext = confvars.get('SO', '')
+ so_ext = confvars.get('EXT_SUFFIX', '')
if not is_python_ext:
# hardcode known values, config vars (including SHLIB_SUFFIX) are
diff --git a/numpy/lib/format.py b/numpy/lib/format.py
index 264fff8d6..625768b62 100644
--- a/numpy/lib/format.py
+++ b/numpy/lib/format.py
@@ -370,8 +370,7 @@ def _wrap_header(header, version):
import struct
assert version is not None
fmt, encoding = _header_size_info[version]
- if not isinstance(header, bytes): # always true on python 3
- header = header.encode(encoding)
+ header = header.encode(encoding)
hlen = len(header) + 1
padlen = ARRAY_ALIGN - ((MAGIC_LEN + struct.calcsize(fmt) + hlen) % ARRAY_ALIGN)
try:
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index d611dd225..b8ae9a470 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -388,12 +388,14 @@ def iterable(y):
return True
-def _average_dispatcher(a, axis=None, weights=None, returned=None):
+def _average_dispatcher(a, axis=None, weights=None, returned=None, *,
+ keepdims=None):
return (a, weights)
@array_function_dispatch(_average_dispatcher)
-def average(a, axis=None, weights=None, returned=False):
+def average(a, axis=None, weights=None, returned=False, *,
+ keepdims=np._NoValue):
"""
Compute the weighted average along the specified axis.
@@ -428,6 +430,14 @@ def average(a, axis=None, weights=None, returned=False):
is returned, otherwise only the average is returned.
If `weights=None`, `sum_of_weights` is equivalent to the number of
elements over which the average is taken.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the original `a`.
+ *Note:* `keepdims` will not work with instances of `numpy.matrix`
+ or other classes whose methods do not support `keepdims`.
+
+ .. versionadded:: 1.23.0
Returns
-------
@@ -471,7 +481,7 @@ def average(a, axis=None, weights=None, returned=False):
>>> np.average(np.arange(1, 11), weights=np.arange(10, 0, -1))
4.0
- >>> data = np.arange(6).reshape((3,2))
+ >>> data = np.arange(6).reshape((3, 2))
>>> data
array([[0, 1],
[2, 3],
@@ -488,11 +498,24 @@ def average(a, axis=None, weights=None, returned=False):
>>> avg = np.average(a, weights=w)
>>> print(avg.dtype)
complex256
+
+ With ``keepdims=True``, the following result has shape (3, 1).
+
+ >>> np.average(data, axis=1, keepdims=True)
+ array([[0.5],
+ [2.5],
+ [4.5]])
"""
a = np.asanyarray(a)
+ if keepdims is np._NoValue:
+ # Don't pass on the keepdims argument if one wasn't given.
+ keepdims_kw = {}
+ else:
+ keepdims_kw = {'keepdims': keepdims}
+
if weights is None:
- avg = a.mean(axis)
+ avg = a.mean(axis, **keepdims_kw)
scl = avg.dtype.type(a.size/avg.size)
else:
wgt = np.asanyarray(weights)
@@ -524,7 +547,8 @@ def average(a, axis=None, weights=None, returned=False):
raise ZeroDivisionError(
"Weights sum to zero, can't be normalized")
- avg = np.multiply(a, wgt, dtype=result_dtype).sum(axis)/scl
+ avg = np.multiply(a, wgt,
+ dtype=result_dtype).sum(axis, **keepdims_kw) / scl
if returned:
if scl.shape != avg.shape:
diff --git a/numpy/lib/function_base.pyi b/numpy/lib/function_base.pyi
index f8dbd8a8b..6c00d26b4 100644
--- a/numpy/lib/function_base.pyi
+++ b/numpy/lib/function_base.pyi
@@ -110,6 +110,7 @@ def average(
axis: None = ...,
weights: None | _ArrayLikeFloat_co= ...,
returned: L[False] = ...,
+ keepdims: L[False] = ...,
) -> floating[Any]: ...
@overload
def average(
@@ -117,6 +118,7 @@ def average(
axis: None = ...,
weights: None | _ArrayLikeComplex_co = ...,
returned: L[False] = ...,
+ keepdims: L[False] = ...,
) -> complexfloating[Any, Any]: ...
@overload
def average(
@@ -124,6 +126,7 @@ def average(
axis: None = ...,
weights: None | Any = ...,
returned: L[False] = ...,
+ keepdims: L[False] = ...,
) -> Any: ...
@overload
def average(
@@ -131,6 +134,7 @@ def average(
axis: None = ...,
weights: None | _ArrayLikeFloat_co= ...,
returned: L[True] = ...,
+ keepdims: L[False] = ...,
) -> _2Tuple[floating[Any]]: ...
@overload
def average(
@@ -138,6 +142,7 @@ def average(
axis: None = ...,
weights: None | _ArrayLikeComplex_co = ...,
returned: L[True] = ...,
+ keepdims: L[False] = ...,
) -> _2Tuple[complexfloating[Any, Any]]: ...
@overload
def average(
@@ -145,6 +150,7 @@ def average(
axis: None = ...,
weights: None | Any = ...,
returned: L[True] = ...,
+ keepdims: L[False] = ...,
) -> _2Tuple[Any]: ...
@overload
def average(
@@ -152,6 +158,7 @@ def average(
axis: None | _ShapeLike = ...,
weights: None | Any = ...,
returned: L[False] = ...,
+ keepdims: bool = ...,
) -> Any: ...
@overload
def average(
@@ -159,6 +166,7 @@ def average(
axis: None | _ShapeLike = ...,
weights: None | Any = ...,
returned: L[True] = ...,
+ keepdims: bool = ...,
) -> _2Tuple[Any]: ...
@overload
diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py
index 33b49127c..210c0ea94 100644
--- a/numpy/lib/npyio.py
+++ b/numpy/lib/npyio.py
@@ -250,26 +250,6 @@ class NpzFile(Mapping):
else:
raise KeyError("%s is not a file in the archive" % key)
- # deprecate the python 2 dict apis that we supported by accident in
- # python 3. We forgot to implement itervalues() at all in earlier
- # versions of numpy, so no need to deprecated it here.
-
- def iteritems(self):
- # Numpy 1.15, 2018-02-20
- warnings.warn(
- "NpzFile.iteritems is deprecated in python 3, to match the "
- "removal of dict.itertems. Use .items() instead.",
- DeprecationWarning, stacklevel=2)
- return self.items()
-
- def iterkeys(self):
- # Numpy 1.15, 2018-02-20
- warnings.warn(
- "NpzFile.iterkeys is deprecated in python 3, to match the "
- "removal of dict.iterkeys. Use .keys() instead.",
- DeprecationWarning, stacklevel=2)
- return self.keys()
-
@set_module('numpy')
def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True,
diff --git a/numpy/lib/npyio.pyi b/numpy/lib/npyio.pyi
index 231ed7584..8007b2dc7 100644
--- a/numpy/lib/npyio.pyi
+++ b/numpy/lib/npyio.pyi
@@ -143,6 +143,7 @@ def loadtxt(
encoding: None | str = ...,
max_rows: None | int = ...,
*,
+ quotechar: None | str = ...,
like: None | _SupportsArrayFunc = ...
) -> NDArray[float64]: ...
@overload
@@ -159,6 +160,7 @@ def loadtxt(
encoding: None | str = ...,
max_rows: None | int = ...,
*,
+ quotechar: None | str = ...,
like: None | _SupportsArrayFunc = ...
) -> NDArray[_SCT]: ...
@overload
@@ -175,6 +177,7 @@ def loadtxt(
encoding: None | str = ...,
max_rows: None | int = ...,
*,
+ quotechar: None | str = ...,
like: None | _SupportsArrayFunc = ...
) -> NDArray[Any]: ...
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index 874754a64..bdcbef91d 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -305,6 +305,29 @@ class TestAverage:
assert_almost_equal(y5.mean(0), average(y5, 0))
assert_almost_equal(y5.mean(1), average(y5, 1))
+ @pytest.mark.parametrize(
+ 'x, axis, expected_avg, weights, expected_wavg, expected_wsum',
+ [([1, 2, 3], None, [2.0], [3, 4, 1], [1.75], [8.0]),
+ ([[1, 2, 5], [1, 6, 11]], 0, [[1.0, 4.0, 8.0]],
+ [1, 3], [[1.0, 5.0, 9.5]], [[4, 4, 4]])],
+ )
+ def test_basic_keepdims(self, x, axis, expected_avg,
+ weights, expected_wavg, expected_wsum):
+ avg = np.average(x, axis=axis, keepdims=True)
+ assert avg.shape == np.shape(expected_avg)
+ assert_array_equal(avg, expected_avg)
+
+ wavg = np.average(x, axis=axis, weights=weights, keepdims=True)
+ assert wavg.shape == np.shape(expected_wavg)
+ assert_array_equal(wavg, expected_wavg)
+
+ wavg, wsum = np.average(x, axis=axis, weights=weights, returned=True,
+ keepdims=True)
+ assert wavg.shape == np.shape(expected_wavg)
+ assert_array_equal(wavg, expected_wavg)
+ assert wsum.shape == np.shape(expected_wsum)
+ assert_array_equal(wsum, expected_wsum)
+
def test_weights(self):
y = np.arange(10)
w = np.arange(10)
@@ -1242,11 +1265,11 @@ class TestTrimZeros:
res = trim_zeros(arr)
assert_array_equal(arr, res)
-
def test_list_to_list(self):
res = trim_zeros(self.a.tolist())
assert isinstance(res, list)
+
class TestExtins:
def test_basic(self):
@@ -1759,6 +1782,7 @@ class TestLeaks:
finally:
gc.enable()
+
class TestDigitize:
def test_forward(self):
@@ -2339,6 +2363,7 @@ class Test_I0:
with pytest.raises(TypeError, match="i0 not supported for complex values"):
res = i0(a)
+
class TestKaiser:
def test_simple(self):
@@ -3474,6 +3499,7 @@ class TestQuantile:
assert np.isscalar(actual)
assert_equal(np.quantile(a, 0.5), np.nan)
+
class TestLerp:
@hypothesis.given(t0=st.floats(allow_nan=False, allow_infinity=False,
min_value=0, max_value=1),
diff --git a/numpy/lib/tests/test_twodim_base.py b/numpy/lib/tests/test_twodim_base.py
index c1c5a1615..141f508fd 100644
--- a/numpy/lib/tests/test_twodim_base.py
+++ b/numpy/lib/tests/test_twodim_base.py
@@ -44,6 +44,12 @@ class TestEye:
assert_equal(eye(3) == 1,
eye(3, dtype=bool))
+ def test_uint64(self):
+ # Regression test for gh-9982
+ assert_equal(eye(np.uint64(2), dtype=int), array([[1, 0], [0, 1]]))
+ assert_equal(eye(np.uint64(2), M=np.uint64(4), k=np.uint64(1)),
+ array([[0, 1, 0, 0], [0, 0, 1, 0]]))
+
def test_diag(self):
assert_equal(eye(4, k=1),
array([[0, 1, 0, 0],
@@ -382,7 +388,7 @@ def test_tril_triu_dtype():
assert_equal(np.triu(arr).dtype, arr.dtype)
assert_equal(np.tril(arr).dtype, arr.dtype)
- arr = np.zeros((3,3), dtype='f4,f4')
+ arr = np.zeros((3, 3), dtype='f4,f4')
assert_equal(np.triu(arr).dtype, arr.dtype)
assert_equal(np.tril(arr).dtype, arr.dtype)
diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py
index 3e5ad31ff..3d47abbfb 100644
--- a/numpy/lib/twodim_base.py
+++ b/numpy/lib/twodim_base.py
@@ -2,6 +2,7 @@
"""
import functools
+import operator
from numpy.core.numeric import (
asanyarray, arange, zeros, greater_equal, multiply, ones,
@@ -214,6 +215,11 @@ def eye(N, M=None, k=0, dtype=float, order='C', *, like=None):
m = zeros((N, M), dtype=dtype, order=order)
if k >= M:
return m
+ # Ensure M and k are integers, so we don't get any surprise casting
+ # results in the expressions `M-k` and `M+1` used below. This avoids
+ # a problem with inputs with type (for example) np.uint64.
+ M = operator.index(M)
+ k = operator.index(k)
if k >= 0:
i = k
else:
@@ -494,8 +500,8 @@ def triu(m, k=0):
Upper triangle of an array.
Return a copy of an array with the elements below the `k`-th diagonal
- zeroed. For arrays with ``ndim`` exceeding 2, `triu` will apply to the final
- two axes.
+ zeroed. For arrays with ``ndim`` exceeding 2, `triu` will apply to the
+ final two axes.
Please refer to the documentation for `tril` for further details.
@@ -804,7 +810,7 @@ def histogram2d(x, y, bins=10, range=None, normed=None, weights=None,
>>> plt.show()
"""
from numpy import histogramdd
-
+
if len(x) != len(y):
raise ValueError('x and y must have the same length.')
diff --git a/numpy/ma/__init__.pyi b/numpy/ma/__init__.pyi
index 04368b6c4..7f5cb56a8 100644
--- a/numpy/ma/__init__.pyi
+++ b/numpy/ma/__init__.pyi
@@ -216,6 +216,7 @@ from numpy.ma.extras import (
masked_all_like as masked_all_like,
median as median,
mr_ as mr_,
+ ndenumerate as ndenumerate,
notmasked_contiguous as notmasked_contiguous,
notmasked_edges as notmasked_edges,
polyfit as polyfit,
diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py
index 048d94bb7..641f4746f 100644
--- a/numpy/ma/extras.py
+++ b/numpy/ma/extras.py
@@ -10,12 +10,12 @@ A collection of utilities for `numpy.ma`.
"""
__all__ = [
'apply_along_axis', 'apply_over_axes', 'atleast_1d', 'atleast_2d',
- 'atleast_3d', 'average', 'clump_masked', 'clump_unmasked',
- 'column_stack', 'compress_cols', 'compress_nd', 'compress_rowcols',
- 'compress_rows', 'count_masked', 'corrcoef', 'cov', 'diagflat', 'dot',
- 'dstack', 'ediff1d', 'flatnotmasked_contiguous', 'flatnotmasked_edges',
- 'hsplit', 'hstack', 'isin', 'in1d', 'intersect1d', 'mask_cols', 'mask_rowcols',
- 'mask_rows', 'masked_all', 'masked_all_like', 'median', 'mr_',
+ 'atleast_3d', 'average', 'clump_masked', 'clump_unmasked', 'column_stack',
+ 'compress_cols', 'compress_nd', 'compress_rowcols', 'compress_rows',
+ 'count_masked', 'corrcoef', 'cov', 'diagflat', 'dot', 'dstack', 'ediff1d',
+ 'flatnotmasked_contiguous', 'flatnotmasked_edges', 'hsplit', 'hstack',
+ 'isin', 'in1d', 'intersect1d', 'mask_cols', 'mask_rowcols', 'mask_rows',
+ 'masked_all', 'masked_all_like', 'median', 'mr_', 'ndenumerate',
'notmasked_contiguous', 'notmasked_edges', 'polyfit', 'row_stack',
'setdiff1d', 'setxor1d', 'stack', 'unique', 'union1d', 'vander', 'vstack',
]
@@ -475,6 +475,7 @@ def apply_over_axes(func, a, axes):
"an array of the correct shape")
return val
+
if apply_over_axes.__doc__ is not None:
apply_over_axes.__doc__ = np.apply_over_axes.__doc__[
:np.apply_over_axes.__doc__.find('Notes')].rstrip() + \
@@ -524,7 +525,8 @@ if apply_over_axes.__doc__ is not None:
"""
-def average(a, axis=None, weights=None, returned=False):
+def average(a, axis=None, weights=None, returned=False, *,
+ keepdims=np._NoValue):
"""
Return the weighted average of array over the given axis.
@@ -550,6 +552,14 @@ def average(a, axis=None, weights=None, returned=False):
Flag indicating whether a tuple ``(result, sum of weights)``
should be returned as output (True), or just the result (False).
Default is False.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the original `a`.
+ *Note:* `keepdims` will not work with instances of `numpy.matrix`
+ or other classes whose methods do not support `keepdims`.
+
+ .. versionadded:: 1.23.0
Returns
-------
@@ -582,14 +592,29 @@ def average(a, axis=None, weights=None, returned=False):
mask=[False, False],
fill_value=1e+20)
+ With ``keepdims=True``, the following result has shape (3, 1).
+
+ >>> np.ma.average(x, axis=1, keepdims=True)
+ masked_array(
+ data=[[0.5],
+ [2.5],
+ [4.5]],
+ mask=False,
+ fill_value=1e+20)
"""
a = asarray(a)
m = getmask(a)
# inspired by 'average' in numpy/lib/function_base.py
+ if keepdims is np._NoValue:
+ # Don't pass on the keepdims argument if one wasn't given.
+ keepdims_kw = {}
+ else:
+ keepdims_kw = {'keepdims': keepdims}
+
if weights is None:
- avg = a.mean(axis)
+ avg = a.mean(axis, **keepdims_kw)
scl = avg.dtype.type(a.count(axis))
else:
wgt = asarray(weights)
@@ -621,7 +646,8 @@ def average(a, axis=None, weights=None, returned=False):
wgt.mask |= a.mask
scl = wgt.sum(axis=axis, dtype=result_dtype)
- avg = np.multiply(a, wgt, dtype=result_dtype).sum(axis)/scl
+ avg = np.multiply(a, wgt,
+ dtype=result_dtype).sum(axis, **keepdims_kw) / scl
if returned:
if scl.shape != avg.shape:
@@ -713,6 +739,7 @@ def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
else:
return r
+
def _median(a, axis=None, out=None, overwrite_input=False):
# when an unmasked NaN is present return it, so we need to sort the NaN
# values behind the mask
@@ -840,6 +867,7 @@ def compress_nd(x, axis=None):
data = data[(slice(None),)*ax + (~m.any(axis=axes),)]
return data
+
def compress_rowcols(x, axis=None):
"""
Suppress the rows and/or columns of a 2-D array that contain
@@ -912,6 +940,7 @@ def compress_rows(a):
raise NotImplementedError("compress_rows works for 2D arrays only.")
return compress_rowcols(a, 0)
+
def compress_cols(a):
"""
Suppress whole columns of a 2-D array that contain masked values.
@@ -929,6 +958,7 @@ def compress_cols(a):
raise NotImplementedError("compress_cols works for 2D arrays only.")
return compress_rowcols(a, 1)
+
def mask_rows(a, axis=np._NoValue):
"""
Mask rows of a 2D array that contain masked values.
@@ -979,6 +1009,7 @@ def mask_rows(a, axis=np._NoValue):
"will raise TypeError", DeprecationWarning, stacklevel=2)
return mask_rowcols(a, 0)
+
def mask_cols(a, axis=np._NoValue):
"""
Mask columns of a 2D array that contain masked values.
@@ -1516,10 +1547,79 @@ class mr_class(MAxisConcatenator):
mr_ = mr_class()
+
#####--------------------------------------------------------------------------
#---- Find unmasked data ---
#####--------------------------------------------------------------------------
+def ndenumerate(a, compressed=True):
+ """
+ Multidimensional index iterator.
+
+ Return an iterator yielding pairs of array coordinates and values,
+ skipping elements that are masked. With `compressed=False`,
+ `ma.masked` is yielded as the value of masked elements. This
+ behavior differs from that of `numpy.ndenumerate`, which yields the
+ value of the underlying data array.
+
+ Notes
+ -----
+ .. versionadded:: 1.23.0
+
+ Parameters
+ ----------
+ a : array_like
+ An array with (possibly) masked elements.
+ compressed : bool, optional
+ If True (default), masked elements are skipped.
+
+ See Also
+ --------
+ numpy.ndenumerate : Equivalent function ignoring any mask.
+
+ Examples
+ --------
+ >>> a = np.ma.arange(9).reshape((3, 3))
+ >>> a[1, 0] = np.ma.masked
+ >>> a[1, 2] = np.ma.masked
+ >>> a[2, 1] = np.ma.masked
+ >>> a
+ masked_array(
+ data=[[0, 1, 2],
+ [--, 4, --],
+ [6, --, 8]],
+ mask=[[False, False, False],
+ [ True, False, True],
+ [False, True, False]],
+ fill_value=999999)
+ >>> for index, x in np.ma.ndenumerate(a):
+ ... print(index, x)
+ (0, 0) 0
+ (0, 1) 1
+ (0, 2) 2
+ (1, 1) 4
+ (2, 0) 6
+ (2, 2) 8
+
+ >>> for index, x in np.ma.ndenumerate(a, compressed=False):
+ ... print(index, x)
+ (0, 0) 0
+ (0, 1) 1
+ (0, 2) 2
+ (1, 0) --
+ (1, 1) 4
+ (1, 2) --
+ (2, 0) 6
+ (2, 1) --
+ (2, 2) 8
+ """
+ for it, mask in zip(np.ndenumerate(a), getmaskarray(a).flat):
+ if not mask:
+ yield it
+ elif not compressed:
+ yield it[0], masked
+
+
def flatnotmasked_edges(a):
"""
Find the indices of the first and last unmasked values.
@@ -1682,6 +1782,7 @@ def flatnotmasked_contiguous(a):
i += n
return result
+
def notmasked_contiguous(a, axis=None):
"""
Find contiguous unmasked data in a masked array along the given axis.
diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi
index e66d7cb63..56228b927 100644
--- a/numpy/ma/extras.pyi
+++ b/numpy/ma/extras.pyi
@@ -44,7 +44,7 @@ diagflat: _fromnxfunction_single
def apply_along_axis(func1d, axis, arr, *args, **kwargs): ...
def apply_over_axes(func, a, axes): ...
-def average(a, axis=..., weights=..., returned=...): ...
+def average(a, axis=..., weights=..., returned=..., keepdims=...): ...
def median(a, axis=..., out=..., overwrite_input=..., keepdims=...): ...
def compress_nd(x, axis=...): ...
def compress_rowcols(x, axis=...): ...
@@ -74,6 +74,7 @@ class mr_class(MAxisConcatenator):
mr_: mr_class
+def ndenumerate(a, compressed=...): ...
def flatnotmasked_edges(a): ...
def notmasked_edges(a, axis=...): ...
def flatnotmasked_contiguous(a): ...
diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py
index d30dfd92f..1827edd1f 100644
--- a/numpy/ma/tests/test_extras.py
+++ b/numpy/ma/tests/test_extras.py
@@ -28,7 +28,7 @@ from numpy.ma.extras import (
ediff1d, apply_over_axes, apply_along_axis, compress_nd, compress_rowcols,
mask_rowcols, clump_masked, clump_unmasked, flatnotmasked_contiguous,
notmasked_contiguous, notmasked_edges, masked_all, masked_all_like, isin,
- diagflat, stack, vstack
+ diagflat, ndenumerate, stack, vstack
)
@@ -75,7 +75,7 @@ class TestGeneric:
assert_equal(len(masked_arr['b']['c']), 1)
assert_equal(masked_arr['b']['c'].shape, (1, 1))
assert_equal(masked_arr['b']['c']._fill_value.shape, ())
-
+
def test_masked_all_with_object(self):
# same as above except that the array is not nested
my_dtype = np.dtype([('b', (object, (1,)))])
@@ -292,6 +292,29 @@ class TestAverage:
assert_almost_equal(wav1.real, expected1.real)
assert_almost_equal(wav1.imag, expected1.imag)
+ @pytest.mark.parametrize(
+ 'x, axis, expected_avg, weights, expected_wavg, expected_wsum',
+ [([1, 2, 3], None, [2.0], [3, 4, 1], [1.75], [8.0]),
+ ([[1, 2, 5], [1, 6, 11]], 0, [[1.0, 4.0, 8.0]],
+ [1, 3], [[1.0, 5.0, 9.5]], [[4, 4, 4]])],
+ )
+ def test_basic_keepdims(self, x, axis, expected_avg,
+ weights, expected_wavg, expected_wsum):
+ avg = np.ma.average(x, axis=axis, keepdims=True)
+ assert avg.shape == np.shape(expected_avg)
+ assert_array_equal(avg, expected_avg)
+
+ wavg = np.ma.average(x, axis=axis, weights=weights, keepdims=True)
+ assert wavg.shape == np.shape(expected_wavg)
+ assert_array_equal(wavg, expected_wavg)
+
+ wavg, wsum = np.ma.average(x, axis=axis, weights=weights,
+ returned=True, keepdims=True)
+ assert wavg.shape == np.shape(expected_wavg)
+ assert_array_equal(wavg, expected_wavg)
+ assert wsum.shape == np.shape(expected_wsum)
+ assert_array_equal(wsum, expected_wsum)
+
def test_masked_weights(self):
# Test with masked weights.
# (Regression test for https://github.com/numpy/numpy/issues/10438)
@@ -335,6 +358,7 @@ class TestAverage:
assert_almost_equal(avg_masked, avg_expected)
assert_equal(avg_masked.mask, avg_expected.mask)
+
class TestConcatenator:
# Tests for mr_, the equivalent of r_ for masked arrays.
@@ -1642,12 +1666,49 @@ class TestShapeBase:
assert_equal(a.mask.shape, a.shape)
assert_equal(a.data.shape, a.shape)
-
b = diagflat(1.0)
assert_equal(b.shape, (1, 1))
assert_equal(b.mask.shape, b.data.shape)
+class TestNDEnumerate:
+
+ def test_ndenumerate_nomasked(self):
+ ordinary = np.arange(6.).reshape((1, 3, 2))
+ empty_mask = np.zeros_like(ordinary, dtype=bool)
+ with_mask = masked_array(ordinary, mask=empty_mask)
+ assert_equal(list(np.ndenumerate(ordinary)),
+ list(ndenumerate(ordinary)))
+ assert_equal(list(ndenumerate(ordinary)),
+ list(ndenumerate(with_mask)))
+ assert_equal(list(ndenumerate(with_mask)),
+ list(ndenumerate(with_mask, compressed=False)))
+
+ def test_ndenumerate_allmasked(self):
+ a = masked_all(())
+ b = masked_all((100,))
+ c = masked_all((2, 3, 4))
+ assert_equal(list(ndenumerate(a)), [])
+ assert_equal(list(ndenumerate(b)), [])
+ assert_equal(list(ndenumerate(b, compressed=False)),
+ list(zip(np.ndindex((100,)), 100 * [masked])))
+ assert_equal(list(ndenumerate(c)), [])
+ assert_equal(list(ndenumerate(c, compressed=False)),
+ list(zip(np.ndindex((2, 3, 4)), 2 * 3 * 4 * [masked])))
+
+ def test_ndenumerate_mixedmasked(self):
+ a = masked_array(np.arange(12).reshape((3, 4)),
+ mask=[[1, 1, 1, 1],
+ [1, 1, 0, 1],
+ [0, 0, 0, 0]])
+ items = [((1, 2), 6),
+ ((2, 0), 8), ((2, 1), 9), ((2, 2), 10), ((2, 3), 11)]
+ assert_equal(list(ndenumerate(a)), items)
+ assert_equal(len(list(ndenumerate(a, compressed=False))), a.size)
+ for coordinate, value in ndenumerate(a, compressed=False):
+ assert_equal(a[coordinate], value)
+
+
class TestStack:
def test_stack_1d(self):
diff --git a/numpy/random/bit_generator.pyx b/numpy/random/bit_generator.pyx
index fe45f85b0..2c50dbf70 100644
--- a/numpy/random/bit_generator.pyx
+++ b/numpy/random/bit_generator.pyx
@@ -35,13 +35,7 @@ import abc
import sys
from itertools import cycle
import re
-
-try:
- from secrets import randbits
-except ImportError:
- # secrets unavailable on python 3.5 and before
- from random import SystemRandom
- randbits = SystemRandom().getrandbits
+from secrets import randbits
from threading import Lock
diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py
index 80a6fdd10..4a8f42e06 100644
--- a/numpy/testing/_private/utils.py
+++ b/numpy/testing/_private/utils.py
@@ -1342,9 +1342,6 @@ def assert_raises_regex(exception_class, expected_regexp, *args, **kwargs):
Alternatively, can be used as a context manager like `assert_raises`.
- Name of this function adheres to Python 3.2+ reference, but should work in
- all versions down to 2.6.
-
Notes
-----
.. versionadded:: 1.9.0
diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py
index bbc04124f..4026a7a14 100644
--- a/numpy/testing/tests/test_utils.py
+++ b/numpy/testing/tests/test_utils.py
@@ -211,7 +211,7 @@ class TestArrayEqual(_GenericTest):
with pytest.raises(AssertionError):
with np.errstate(all="raise"):
np.testing.assert_array_equal(
- np.array([1, 2, 3], np.float32),
+ np.array([1, 2, 3], np.float32),
np.array([1, 1e-40, 3], np.float32))
@@ -1223,7 +1223,7 @@ class TestStringEqual:
lambda: assert_string_equal("aaa", "a+b"))
-def assert_warn_len_equal(mod, n_in_context, py34=None, py37=None):
+def assert_warn_len_equal(mod, n_in_context, py37=None):
try:
mod_warns = mod.__warningregistry__
except AttributeError:
@@ -1237,10 +1237,7 @@ def assert_warn_len_equal(mod, n_in_context, py34=None, py37=None):
mod_warns = {}
num_warns = len(mod_warns)
- # Python 3.4 appears to clear any pre-existing warnings of the same type,
- # when raising warnings inside a catch_warnings block. So, there is a
- # warning generated by the tests within the context manager, but no
- # previous warnings.
+
if 'version' in mod_warns:
# Python 3 adds a 'version' entry to the registry,
# do not count it.
@@ -1252,9 +1249,7 @@ def assert_warn_len_equal(mod, n_in_context, py34=None, py37=None):
if sys.version_info[:2] >= (3, 7):
if py37 is not None:
n_in_context = py37
- else:
- if py34 is not None:
- n_in_context = py34
+
assert_equal(num_warns, n_in_context)
def test_warn_len_equal_call_scenarios():
@@ -1317,12 +1312,11 @@ def test_clear_and_catch_warnings():
warnings.warn('Another warning')
assert_warn_len_equal(my_mod, 1, py37=0)
# Another warning, no module spec does add to warnings dict, except on
- # Python 3.4 (see comments in `assert_warn_len_equal`)
# Python 3.7 catch_warnings doesn't make an entry for 'ignore'.
with clear_and_catch_warnings():
warnings.simplefilter('ignore')
warnings.warn('Another warning')
- assert_warn_len_equal(my_mod, 2, py34=1, py37=0)
+ assert_warn_len_equal(my_mod, 2, py37=0)
def test_suppress_warnings_module():
diff --git a/numpy/typing/tests/data/reveal/arithmetic.pyi b/numpy/typing/tests/data/reveal/arithmetic.pyi
index a7077fcce..0ca5e9772 100644
--- a/numpy/typing/tests/data/reveal/arithmetic.pyi
+++ b/numpy/typing/tests/data/reveal/arithmetic.pyi
@@ -1,7 +1,7 @@
from typing import Any
import numpy as np
-from numpy._typing import _128Bit
+from numpy._typing import NDArray, _128Bit
# Can't directly import `np.float128` as it is not available on all platforms
f16: np.floating[_128Bit]
@@ -34,6 +34,7 @@ AR_c: np.ndarray[Any, np.dtype[np.complex128]]
AR_m: np.ndarray[Any, np.dtype[np.timedelta64]]
AR_M: np.ndarray[Any, np.dtype[np.datetime64]]
AR_O: np.ndarray[Any, np.dtype[np.object_]]
+AR_number: NDArray[np.number[Any]]
AR_LIKE_b: list[bool]
AR_LIKE_u: list[np.uint32]
@@ -46,6 +47,8 @@ AR_LIKE_O: list[np.object_]
# Array subtraction
+reveal_type(AR_number - AR_number) # E: ndarray[Any, dtype[number[Any]]]
+
reveal_type(AR_b - AR_LIKE_u) # E: ndarray[Any, dtype[unsignedinteger[Any]]]
reveal_type(AR_b - AR_LIKE_i) # E: ndarray[Any, dtype[signedinteger[Any]]]
reveal_type(AR_b - AR_LIKE_f) # E: ndarray[Any, dtype[floating[Any]]]
diff --git a/pyproject.toml b/pyproject.toml
index 5c909aebc..b821ec73c 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -4,7 +4,7 @@ requires = [
"packaging==20.5; platform_machine=='arm64'", # macos M1
"setuptools==59.2.0",
"wheel==0.37.0",
- "Cython>=0.29.28,<3.0", # Note: keep in sync with tools/cythonize.py
+ "Cython>=0.29.29,<3.0",
]
diff --git a/setup.py b/setup.py
index e22349329..e5800fbea 100755
--- a/setup.py
+++ b/setup.py
@@ -258,8 +258,8 @@ def generate_cython():
raise OSError(msg) from e
else:
# Note: keep in sync with that in pyproject.toml
- # Update for Python 3.10
- required_version = '0.29.24'
+ # Update for Python 3.11
+ required_version = '0.29.29'
if _pep440.parse(cython_version) < _pep440.Version(required_version):
cython_path = Cython.__file__
diff --git a/test_requirements.txt b/test_requirements.txt
index b40f438be..cfef4afdb 100644
--- a/test_requirements.txt
+++ b/test_requirements.txt
@@ -1,4 +1,4 @@
-cython>=0.29.28,<3.0
+cython>=0.29.29,<3.0
wheel==0.37.0
setuptools==59.2.0
hypothesis==6.24.1
@@ -10,4 +10,4 @@ cffi; python_version < '3.10'
# For testing types. Notes on the restrictions:
# - Mypy relies on C API features not present in PyPy
# NOTE: Keep mypy in sync with environment.yml
-mypy==0.942; platform_python_implementation != "PyPy"
+mypy==0.950; platform_python_implementation != "PyPy"