summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.circleci/config.yml61
-rw-r--r--.mailmap5
-rw-r--r--.travis.yml2
-rw-r--r--doc/cdoc/Doxyfile2
-rw-r--r--doc/changelog/1.12.0-changelog.rst2
-rw-r--r--doc/changelog/1.13.0-changelog.rst4
-rw-r--r--doc/changelog/1.14.0-changelog.rst494
-rw-r--r--doc/release/1.11.0-notes.rst2
-rw-r--r--doc/release/1.12.0-notes.rst2
-rw-r--r--doc/release/1.14.0-notes.rst18
-rw-r--r--doc/release/1.14.1-notes.rst21
-rw-r--r--doc/release/1.15.0-notes.rst13
-rw-r--r--doc/source/reference/c-api.array.rst7
-rw-r--r--doc/source/user/numpy-for-matlab-users.rst4
-rw-r--r--numpy/_build_utils/common.py138
-rw-r--r--numpy/_globals.py24
-rw-r--r--numpy/add_newdocs.py13
-rw-r--r--numpy/core/_internal.py4
-rw-r--r--numpy/core/arrayprint.py142
-rw-r--r--numpy/core/code_generators/generate_umath.py32
-rw-r--r--numpy/core/code_generators/ufunc_docstrings.py2
-rw-r--r--numpy/core/einsumfunc.py40
-rw-r--r--numpy/core/fromnumeric.py2
-rw-r--r--numpy/core/include/numpy/ndarraytypes.h37
-rw-r--r--numpy/core/include/numpy/npy_3kcompat.h4
-rw-r--r--numpy/core/include/numpy/utils.h2
-rw-r--r--numpy/core/numerictypes.py133
-rw-r--r--numpy/core/records.py17
-rw-r--r--numpy/core/setup.py2
-rw-r--r--numpy/core/setup_common.py2
-rw-r--r--numpy/core/src/multiarray/array_assign_scalar.c2
-rw-r--r--numpy/core/src/multiarray/arraytypes.c.src166
-rw-r--r--numpy/core/src/multiarray/buffer.c16
-rw-r--r--numpy/core/src/multiarray/common.c2
-rw-r--r--numpy/core/src/multiarray/convert_datatype.c138
-rw-r--r--numpy/core/src/multiarray/ctors.c24
-rw-r--r--numpy/core/src/multiarray/datetime.c58
-rw-r--r--numpy/core/src/multiarray/datetime_strings.c36
-rw-r--r--numpy/core/src/multiarray/descriptor.c30
-rw-r--r--numpy/core/src/multiarray/dragon4.c49
-rw-r--r--numpy/core/src/multiarray/einsum.c.src2
-rw-r--r--numpy/core/src/multiarray/item_selection.c18
-rw-r--r--numpy/core/src/multiarray/mapping.c14
-rw-r--r--numpy/core/src/multiarray/multiarray_tests.c.src4
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c14
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.h2
-rw-r--r--numpy/core/src/multiarray/refcount.c4
-rw-r--r--numpy/core/src/multiarray/scalartypes.c.src6
-rw-r--r--numpy/core/src/multiarray/shape.c19
-rw-r--r--numpy/core/src/multiarray/temp_elide.c6
-rw-r--r--numpy/core/src/multiarray/typeinfo.c114
-rw-r--r--numpy/core/src/multiarray/typeinfo.h19
-rw-r--r--numpy/core/src/multiarray/vdot.c1
-rw-r--r--numpy/core/src/npymath/halffloat.c4
-rw-r--r--numpy/core/src/npymath/npy_math_complex.c.src12
-rw-r--r--numpy/core/src/npysort/quicksort.c.src2
-rw-r--r--numpy/core/src/private/npy_binsearch.h.src26
-rw-r--r--numpy/core/src/private/npy_partition.h.src15
-rw-r--r--numpy/core/src/umath/scalarmath.c.src6
-rw-r--r--numpy/core/src/umath/test_rational.c.src12
-rw-r--r--numpy/core/src/umath/ufunc_object.c111
-rw-r--r--numpy/core/src/umath/umathmodule.c8
-rw-r--r--numpy/core/tests/test_arrayprint.py71
-rw-r--r--numpy/core/tests/test_dtype.py2
-rw-r--r--numpy/core/tests/test_einsum.py26
-rw-r--r--numpy/core/tests/test_mem_overlap.py2
-rw-r--r--numpy/core/tests/test_multiarray.py30
-rw-r--r--numpy/core/tests/test_print.py12
-rw-r--r--numpy/core/tests/test_records.py52
-rw-r--r--numpy/core/tests/test_scalarmath.py4
-rw-r--r--numpy/core/tests/test_umath.py70
-rw-r--r--numpy/distutils/command/config.py2
-rw-r--r--numpy/distutils/conv_template.py6
-rw-r--r--numpy/distutils/from_template.py6
-rw-r--r--numpy/distutils/misc_util.py3
-rw-r--r--numpy/doc/subclassing.py2
-rw-r--r--numpy/f2py/tests/test_array_from_pyobj.py23
-rw-r--r--numpy/fft/helper.py43
-rw-r--r--numpy/fft/tests/test_helper.py110
-rw-r--r--numpy/lib/arraypad.py2
-rw-r--r--numpy/lib/arraysetops.py89
-rw-r--r--numpy/lib/format.py7
-rw-r--r--numpy/lib/function_base.py5
-rw-r--r--numpy/lib/nanfunctions.py28
-rw-r--r--numpy/lib/npyio.py54
-rw-r--r--numpy/lib/polynomial.py2
-rw-r--r--numpy/lib/tests/test_arraysetops.py12
-rw-r--r--numpy/lib/tests/test_format.py12
-rw-r--r--numpy/lib/tests/test_index_tricks.py2
-rw-r--r--numpy/lib/tests/test_io.py15
-rw-r--r--numpy/lib/tests/test_polynomial.py8
-rw-r--r--numpy/lib/tests/test_type_check.py18
-rw-r--r--numpy/lib/type_check.py23
-rw-r--r--numpy/lib/utils.py2
-rw-r--r--numpy/linalg/linalg.py11
-rw-r--r--numpy/linalg/umath_linalg.c.src49
-rw-r--r--numpy/ma/core.py16
-rw-r--r--numpy/ma/extras.py2
-rw-r--r--numpy/ma/tests/test_core.py25
-rw-r--r--numpy/ma/tests/test_extras.py8
-rw-r--r--numpy/polynomial/polynomial.py2
-rw-r--r--numpy/random/tests/test_random.py2
-rw-r--r--numpy/testing/pytest_tools/utils.py11
-rw-r--r--numpy/tests/test_reloading.py8
-rwxr-xr-xsetup.py10
-rwxr-xr-xtools/changelog.py (renamed from tools/announce.py)8
-rw-r--r--tools/swig/numpy.i19
-rwxr-xr-xtools/travis-test.sh7
108 files changed, 2056 insertions, 1036 deletions
diff --git a/.circleci/config.yml b/.circleci/config.yml
new file mode 100644
index 000000000..e055739e5
--- /dev/null
+++ b/.circleci/config.yml
@@ -0,0 +1,61 @@
+# Python CircleCI 2.0 configuration file
+#
+# Check https://circleci.com/docs/2.0/language-python/ for more details
+#
+version: 2
+jobs:
+ build:
+ docker:
+ # CircleCI maintains a library of pre-built images
+ # documented at https://circleci.com/docs/2.0/circleci-images/
+ - image: circleci/python:3.6.1
+
+ working_directory: ~/repo
+
+ steps:
+ - checkout
+
+ - run:
+ name: install dependencies
+ command: |
+ python3 -m venv venv
+ . venv/bin/activate
+ pip install cython sphinx matplotlib
+
+ - run:
+ name: build numpy
+ command: |
+ . venv/bin/activate
+ pip install --upgrade pip setuptools
+ pip install cython
+ pip install .
+
+ - run:
+ name: build devdocs
+ command: |
+ . venv/bin/activate
+ cd doc
+ git submodule update --init
+ make html
+
+ - run:
+ name: build neps
+ command: |
+ . venv/bin/activate
+ cd doc/neps
+ make html
+
+ # - store_artifacts:
+ # path: doc/build/html/
+ # destination: devdocs
+
+
+ # - store_artifacts:
+ # path: doc/neps/_build/html/
+ # destination: neps
+
+ - deploy:
+ command: |
+ if [ "${CIRCLE_BRANCH}" == "master" ]; then
+ echo "Deploying on master"
+ fi
diff --git a/.mailmap b/.mailmap
index 92bc79b7b..b4e67747b 100644
--- a/.mailmap
+++ b/.mailmap
@@ -84,6 +84,7 @@ Han Genuit <hangenuit@gmail.com> 87 <hangenuit@gmail.com>
Han Genuit <hangenuit@gmail.com> hangenuit@gmail.com <hangenuit@gmail.com>
Han Genuit <hangenuit@gmail.com> Han <hangenuit@gmail.com>
Hanno Klemm <hanno.klemm@maerskoil.com> hklemm <hanno.klemm@maerskoil.com>
+Hemil Desai <desai38@purdue.edu> hemildesai <desai38@purdue.edu>
Irvin Probst <irvin.probst@ensta-bretagne.fr> I--P <irvin.probst@ensta-bretagne.fr>
Jaime Fernandez <jaime.frio@gmail.com> Jaime Fernandez <jaime.fernandez@hp.com>
Jaime Fernandez <jaime.frio@gmail.com> jaimefrio <jaime.frio@gmail.com>
@@ -103,6 +104,7 @@ Julian Taylor <juliantaylor108@gmail.com> Julian Taylor <juliantaylor108@googlem
Julien Lhermitte <jrmlhermitte@gmail.com> Julien Lhermitte <lhermitte@bnl.gov>
Julien Schueller <julien.schueller@gmail.com> jschueller <julien.schueller@gmail.com>
Khaled Ben Abdallah Okuda <khaled.ben.okuda@gmail.com> KhaledTo <khaled.ben.okuda@gmail.com>
+Konrad Kapp <k_kapp@yahoo.com> k_kapp@yahoo.com <k_kapp@yahoo.com>
Lars Buitinck <larsmans@gmail.com> Lars Buitinck <l.buitinck@esciencecenter.nl>
Lars Buitinck <larsmans@gmail.com> Lars Buitinck <L.J.Buitinck@uva.nl>
Luis Pedro Coelho <luis@luispedro.org> Luis Pedro Coelho <lpc@cmu.edu>
@@ -123,9 +125,11 @@ Michael Behrisch <oss@behrisch.de> behrisch <behrisch@users.sourceforge.net>
Michael Droettboom <mdboom@gmail.com> mdroe <mdroe@localhost>
Michael K. Tran <trankmichael@gmail.com> mtran <trankmichael@gmail.com>
Michael Martin <mmartin4242@gmail.com> mmartin <mmartin4242@gmail.com>
+Michael Schnaitter <schnaitterm@knights.ucf.edu> schnaitterm <schnaitterm@users.noreply.github.com>
Nathaniel J. Smith <njs@pobox.com> njsmith <njs@pobox.com>
Naveen Arunachalam <notatroll.troll@gmail.com> naveenarun <notatroll.troll@gmail.com>
Nicolas Scheffer <nicolas.scheffer@sri.com> Nicolas Scheffer <scheffer@speech.sri.com>
+Nicholas A. Del Grosso <delgrosso@bio.lmu.de> nickdg <delgrosso@bio.lmu.de>
Ondřej Čertík <ondrej.certik@gmail.com> Ondrej Certik <ondrej.certik@gmail.com>
Óscar Villellas Guillén <oscar.villellas@continuum.io> ovillellas <oscar.villellas@continuum.io>
Pat Miller <patmiller@localhost> patmiller <patmiller@localhost>
@@ -171,3 +175,4 @@ Wendell Smith <wendellwsmith@gmail.com> Wendell Smith <wackywendell@gmail.com>
William Spotz <wfspotz@sandia.gov@localhost> wfspotz@sandia.gov <wfspotz@sandia.gov@localhost>
Wojtek Ruszczewski <git@wr.waw.pl> wrwrwr <git@wr.waw.pl>
Zixu Zhao <zixu.zhao.tireless@gmail.com> ZZhaoTireless <zixu.zhao.tireless@gmail.com>
+Ziyan Zhou<ziyan.zhou@mujin.co.jp> Ziyan <ziyan.zhou@mujin.co.jp>
diff --git a/.travis.yml b/.travis.yml
index 2046ce975..fca0c632d 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -38,7 +38,7 @@ python:
matrix:
include:
- python: 2.7
- env: USE_CHROOT=1 ARCH=i386 DIST=zesty PYTHON=2.7
+ env: USE_CHROOT=1 ARCH=i386 DIST=artful PYTHON=2.7
sudo: true
dist: trusty
addons:
diff --git a/doc/cdoc/Doxyfile b/doc/cdoc/Doxyfile
index 9f702724d..d80e98558 100644
--- a/doc/cdoc/Doxyfile
+++ b/doc/cdoc/Doxyfile
@@ -289,7 +289,7 @@ TYPEDEF_HIDES_STRUCT = NO
# For small to medium size projects (<1000 input files) the default value is
# probably good enough. For larger projects a too small cache size can cause
# doxygen to be busy swapping symbols to and from disk most of the time
-# causing a significant performance penality.
+# causing a significant performance penalty.
# If the system has enough physical memory increasing the cache will improve the
# performance by keeping more symbols in memory. Note that the value works on
# a logarithmic scale so increasing the size by one will roughly double the
diff --git a/doc/changelog/1.12.0-changelog.rst b/doc/changelog/1.12.0-changelog.rst
index b607f70fc..7618820dc 100644
--- a/doc/changelog/1.12.0-changelog.rst
+++ b/doc/changelog/1.12.0-changelog.rst
@@ -251,7 +251,7 @@ A total of 418 pull requests were merged for this release.
* `#7292 <https://github.com/numpy/numpy/pull/7292>`__: Clarify error on repr failure in assert_equal.
* `#7294 <https://github.com/numpy/numpy/pull/7294>`__: ENH: add support for BLIS to numpy.distutils
* `#7295 <https://github.com/numpy/numpy/pull/7295>`__: DOC: understanding code and getting started section to dev doc
-* `#7296 <https://github.com/numpy/numpy/pull/7296>`__: Revert part of #3907 which incorrectly propogated MaskedArray...
+* `#7296 <https://github.com/numpy/numpy/pull/7296>`__: Revert part of #3907 which incorrectly propagated MaskedArray...
* `#7299 <https://github.com/numpy/numpy/pull/7299>`__: DOC: Fix mismatched variable names in docstrings.
* `#7300 <https://github.com/numpy/numpy/pull/7300>`__: DOC: dev: stop recommending keeping local master updated with...
* `#7301 <https://github.com/numpy/numpy/pull/7301>`__: DOC: Update release notes
diff --git a/doc/changelog/1.13.0-changelog.rst b/doc/changelog/1.13.0-changelog.rst
index 2ea0177b4..6deb8f2b7 100644
--- a/doc/changelog/1.13.0-changelog.rst
+++ b/doc/changelog/1.13.0-changelog.rst
@@ -364,7 +364,7 @@ A total of 309 pull requests were merged for this release.
* `#8928 <https://github.com/numpy/numpy/pull/8928>`__: BUG: runtests --bench fails on windows
* `#8929 <https://github.com/numpy/numpy/pull/8929>`__: BENCH: Masked array benchmarks
* `#8939 <https://github.com/numpy/numpy/pull/8939>`__: DEP: Deprecate `np.ma.MaskedArray.mini`
-* `#8942 <https://github.com/numpy/numpy/pull/8942>`__: DOC: stop refering to 'S' dtype as string
+* `#8942 <https://github.com/numpy/numpy/pull/8942>`__: DOC: stop referring to 'S' dtype as string
* `#8948 <https://github.com/numpy/numpy/pull/8948>`__: DEP: Deprecate NPY_CHAR
* `#8949 <https://github.com/numpy/numpy/pull/8949>`__: REL: add `python_requires` to setup.py
* `#8951 <https://github.com/numpy/numpy/pull/8951>`__: ENH: Add ufunc.identity for hypot and logical_xor
@@ -396,7 +396,7 @@ A total of 309 pull requests were merged for this release.
* `#9027 <https://github.com/numpy/numpy/pull/9027>`__: DOC: update binary-op / ufunc interactions and recommendations...
* `#9038 <https://github.com/numpy/numpy/pull/9038>`__: BUG: check compiler flags to determine the need for a rebuild
* `#9039 <https://github.com/numpy/numpy/pull/9039>`__: DOC: actually produce docs for as_strided
-* `#9050 <https://github.com/numpy/numpy/pull/9050>`__: BUG: distutils, add compatiblity python parallelization
+* `#9050 <https://github.com/numpy/numpy/pull/9050>`__: BUG: distutils, add compatibility python parallelization
* `#9054 <https://github.com/numpy/numpy/pull/9054>`__: BUG: Various fixes to _dtype_from_pep3118
* `#9058 <https://github.com/numpy/numpy/pull/9058>`__: MAINT: Update FutureWarning message.
* `#9060 <https://github.com/numpy/numpy/pull/9060>`__: DEP: deprecate ndarray.conjugate's no-op fall through for non-numeric...
diff --git a/doc/changelog/1.14.0-changelog.rst b/doc/changelog/1.14.0-changelog.rst
new file mode 100644
index 000000000..87b7beb8d
--- /dev/null
+++ b/doc/changelog/1.14.0-changelog.rst
@@ -0,0 +1,494 @@
+
+Contributors
+============
+
+A total of 100 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Alexey Brodkin +
+* Allan Haldane
+* Andras Deak +
+* Andrew Lawson +
+* Anna Chiara +
+* Antoine Pitrou
+* Bernhard M. Wiedemann +
+* Bob Eldering +
+* Brandon Carter
+* CJ Carey
+* Charles Harris
+* Chris Lamb
+* Christoph Boeddeker +
+* Christoph Gohlke
+* Daniel Hrisca +
+* Daniel Smith
+* Danny Hermes
+* David Freese
+* David Hagen
+* David Linke +
+* David Schaefer +
+* Dillon Niederhut +
+* Egor Panfilov +
+* Emilien Kofman
+* Eric Wieser
+* Erik Bray +
+* Erik Quaeghebeur +
+* Garry Polley +
+* Gunjan +
+* Han Shen +
+* Henke Adolfsson +
+* Hidehiro NAGAOKA +
+* Hemil Desai +
+* Hong Xu +
+* Iryna Shcherbina +
+* Jaime Fernandez
+* James Bourbeau +
+* Jamie Townsend +
+* Jarrod Millman
+* Jean Helie +
+* Jeroen Demeyer +
+* John Goetz +
+* John Kirkham
+* John Zwinck
+* Jonathan Helmus
+* Joseph Fox-Rabinovitz
+* Joseph Paul Cohen +
+* Joshua Leahy +
+* Julian Taylor
+* Jörg Döpfert +
+* Keno Goertz +
+* Kevin Sheppard +
+* Kexuan Sun +
+* Konrad Kapp +
+* Kristofor Maynard +
+* Licht Takeuchi +
+* Loïc Estève
+* Lukas Mericle +
+* Marten van Kerkwijk
+* Matheus Portela +
+* Matthew Brett
+* Matti Picus
+* Michael Lamparski +
+* Michael Odintsov +
+* Michael Schnaitter +
+* Michael Seifert
+* Mike Nolta
+* Nathaniel J. Smith
+* Nelle Varoquaux +
+* Nicholas Del Grosso +
+* Nico Schlömer +
+* Oleg Zabluda +
+* Oleksandr Pavlyk
+* Pauli Virtanen
+* Pim de Haan +
+* Ralf Gommers
+* Robert T. McGibbon +
+* Roland Kaufmann
+* Sebastian Berg
+* Serhiy Storchaka +
+* Shitian Ni +
+* Spencer Hill +
+* Srinivas Reddy Thatiparthy +
+* Stefan Winkler +
+* Stephan Hoyer
+* Steven Maude +
+* SuperBo +
+* Thomas Köppe +
+* Toon Verstraelen
+* Vedant Misra +
+* Warren Weckesser
+* Wirawan Purwanto +
+* Yang Li +
+* Ziyan Zhou +
+* chaoyu3 +
+* orbit-stabilizer +
+* solarjoe
+* wufangjie +
+* xoviat +
+* Élie Gouzien +
+
+Pull requests merged
+====================
+
+A total of 381 pull requests were merged for this release.
+
+* `#5580 <https://github.com/numpy/numpy/pull/5580>`__: BUG, DEP: Fix masked arrays to properly edit views. ( #5558 )
+* `#6053 <https://github.com/numpy/numpy/pull/6053>`__: MAINT: struct assignment "by field position", multi-field indices...
+* `#7994 <https://github.com/numpy/numpy/pull/7994>`__: BUG: Allow 'shape': () in __array_interface__ regardless of the...
+* `#8187 <https://github.com/numpy/numpy/pull/8187>`__: MAINT: Remove the unused keepdim argument from np.ufunc.accumulate
+* `#8278 <https://github.com/numpy/numpy/pull/8278>`__: MAINT: Make the refactor suggested in prepare_index
+* `#8557 <https://github.com/numpy/numpy/pull/8557>`__: ENH: add hermitian=False kwarg to np.linalg.matrix_rank
+* `#8722 <https://github.com/numpy/numpy/pull/8722>`__: DOC: Clarifying the meaning of small values for `suppress` print...
+* `#8827 <https://github.com/numpy/numpy/pull/8827>`__: BUG: Fix pinv for stacked matrices
+* `#8920 <https://github.com/numpy/numpy/pull/8920>`__: ENH: use caching memory allocator in more places
+* `#8934 <https://github.com/numpy/numpy/pull/8934>`__: MAINT: Use np.concatenate instead of np.vstack
+* `#8977 <https://github.com/numpy/numpy/pull/8977>`__: BUG: Fix all kinds of problems when itemsize == 0
+* `#8981 <https://github.com/numpy/numpy/pull/8981>`__: ENH: implement voidtype_repr and voidtype_str
+* `#8983 <https://github.com/numpy/numpy/pull/8983>`__: ENH: fix str/repr for 0d-arrays and int* scalars
+* `#9020 <https://github.com/numpy/numpy/pull/9020>`__: BUG: don't silence warnings in ufunc.reduce
+* `#9025 <https://github.com/numpy/numpy/pull/9025>`__: ENH: np.save() to align data at 64 bytes
+* `#9056 <https://github.com/numpy/numpy/pull/9056>`__: DOC: update structured array docs to reflect #6053
+* `#9065 <https://github.com/numpy/numpy/pull/9065>`__: DEP: 0 should be passed to bincount, not None
+* `#9083 <https://github.com/numpy/numpy/pull/9083>`__: MAINT: Improve error message from sorting with duplicate key
+* `#9089 <https://github.com/numpy/numpy/pull/9089>`__: MAINT: refine error message for __array_ufunc__ not implemented
+* `#9090 <https://github.com/numpy/numpy/pull/9090>`__: MAINT: Update master branch for 1.14.0 development.
+* `#9092 <https://github.com/numpy/numpy/pull/9092>`__: BUG remove memory leak in array ufunc override.
+* `#9096 <https://github.com/numpy/numpy/pull/9096>`__: ENH: Allow inplace also as keyword parameter for ndarray.byteswap
+* `#9099 <https://github.com/numpy/numpy/pull/9099>`__: TST: fix test_basic failure on Windows
+* `#9106 <https://github.com/numpy/numpy/pull/9106>`__: BUG: Array ufunc reduce out tuple
+* `#9110 <https://github.com/numpy/numpy/pull/9110>`__: BUG: Do not elide complex abs()
+* `#9112 <https://github.com/numpy/numpy/pull/9112>`__: BUG: ndarray.__pow__ does not check result of fast_scalar_power
+* `#9113 <https://github.com/numpy/numpy/pull/9113>`__: BUG: delay calls of array repr in getlimits
+* `#9115 <https://github.com/numpy/numpy/pull/9115>`__: BUG: Compilation crashes in MSVC when LIB or INCLUDE is not set
+* `#9116 <https://github.com/numpy/numpy/pull/9116>`__: DOC: link to stack from column_stack
+* `#9118 <https://github.com/numpy/numpy/pull/9118>`__: BUG: Fix reference count error of types when init multiarraymodule
+* `#9119 <https://github.com/numpy/numpy/pull/9119>`__: BUG: Fix error handling on PyCapsule when initializing multiarraymodule
+* `#9122 <https://github.com/numpy/numpy/pull/9122>`__: DOC: update 1.13 release note for MaskedArray, masked constants...
+* `#9132 <https://github.com/numpy/numpy/pull/9132>`__: DEP: Deprecate incorrect behavior of expand_dims.
+* `#9138 <https://github.com/numpy/numpy/pull/9138>`__: MAINT: Update .mailmap
+* `#9139 <https://github.com/numpy/numpy/pull/9139>`__: ENH: remove unneeded spaces in float/bool reprs, fixes 0d str
+* `#9141 <https://github.com/numpy/numpy/pull/9141>`__: DOC: Update ufunc documentation
+* `#9142 <https://github.com/numpy/numpy/pull/9142>`__: BUG: set default type for empty index array to `numpy.intp`
+* `#9149 <https://github.com/numpy/numpy/pull/9149>`__: DOC: Fix incorrect function signature in UFunc documentation.
+* `#9151 <https://github.com/numpy/numpy/pull/9151>`__: DOC: better link display text for Developer Zone.
+* `#9152 <https://github.com/numpy/numpy/pull/9152>`__: DOC: Fix some very minor spelling/grammar mistakes in docs
+* `#9155 <https://github.com/numpy/numpy/pull/9155>`__: MAINT: Take out code that will never be executed
+* `#9157 <https://github.com/numpy/numpy/pull/9157>`__: DOC: Fixed broken link to scipy developer zone
+* `#9164 <https://github.com/numpy/numpy/pull/9164>`__: BUG: have as_strided() keep custom dtypes
+* `#9167 <https://github.com/numpy/numpy/pull/9167>`__: BUG: ensure structured ndarray.__eq__,__ne__ defer when appropriate.
+* `#9168 <https://github.com/numpy/numpy/pull/9168>`__: MAINT: Simplify if statement
+* `#9174 <https://github.com/numpy/numpy/pull/9174>`__: BUG: allow pickling generic datetime
+* `#9176 <https://github.com/numpy/numpy/pull/9176>`__: DOC: Update protocols in git development document.
+* `#9181 <https://github.com/numpy/numpy/pull/9181>`__: COMPAT: PyPy calls clongdouble_int which raises a warning
+* `#9195 <https://github.com/numpy/numpy/pull/9195>`__: BUG: pull request 9087 modifies a tuple after use
+* `#9200 <https://github.com/numpy/numpy/pull/9200>`__: DOC: Update bincount docs to reflect gh-8348
+* `#9201 <https://github.com/numpy/numpy/pull/9201>`__: BUG: Fix unicode(unicode_array_0d) on python 2.7
+* `#9202 <https://github.com/numpy/numpy/pull/9202>`__: MAINT: Move ndarray.__str__ and ndarray.__repr__ to their own...
+* `#9205 <https://github.com/numpy/numpy/pull/9205>`__: DOC: Remove all references to bigndarray in documentation.
+* `#9209 <https://github.com/numpy/numpy/pull/9209>`__: ENH: Add an out argument to concatenate
+* `#9212 <https://github.com/numpy/numpy/pull/9212>`__: MAINT: Combine similar branches
+* `#9214 <https://github.com/numpy/numpy/pull/9214>`__: MAINT: Don't internally use the one-argument where
+* `#9215 <https://github.com/numpy/numpy/pull/9215>`__: BUG: Avoid bare except clauses
+* `#9217 <https://github.com/numpy/numpy/pull/9217>`__: BUG: handle resize of 0d array
+* `#9218 <https://github.com/numpy/numpy/pull/9218>`__: BUG: Only propagate TypeError from where we throw it
+* `#9219 <https://github.com/numpy/numpy/pull/9219>`__: DOC: Link to ufunc.outer from np.outer
+* `#9220 <https://github.com/numpy/numpy/pull/9220>`__: MAINT: Factor out code duplicated by nanmedian and nanpercentile
+* `#9226 <https://github.com/numpy/numpy/pull/9226>`__: DOC, ENH: Add 1.13.0-changelog.rst
+* `#9238 <https://github.com/numpy/numpy/pull/9238>`__: DOC: BLD: fix lots of Sphinx warnings/errors.
+* `#9241 <https://github.com/numpy/numpy/pull/9241>`__: MAINT: Fixup release notes, changelogs after #9238 merge.
+* `#9242 <https://github.com/numpy/numpy/pull/9242>`__: BUG: Make 0-length dim handling of tensordot consistent with...
+* `#9246 <https://github.com/numpy/numpy/pull/9246>`__: ENH: Release the GIL in einsum() special-cased loops
+* `#9247 <https://github.com/numpy/numpy/pull/9247>`__: BUG: fix missing keyword rename for common block in numpy.f2py
+* `#9253 <https://github.com/numpy/numpy/pull/9253>`__: DOC: Add isnat/positive ufunc to documentation.
+* `#9259 <https://github.com/numpy/numpy/pull/9259>`__: MAINT: Use XOR for bool arrays in `np.diff`
+* `#9260 <https://github.com/numpy/numpy/pull/9260>`__: BUG: don't elide into readonly and updateifcopy temporaries
+* `#9264 <https://github.com/numpy/numpy/pull/9264>`__: DOC: some doc build maintenance and f2py doc updates
+* `#9266 <https://github.com/numpy/numpy/pull/9266>`__: BUG: Fix unused variable in ufunc_object.c,
+* `#9268 <https://github.com/numpy/numpy/pull/9268>`__: ENH: testing: load available nose plugins that are external to...
+* `#9271 <https://github.com/numpy/numpy/pull/9271>`__: BUG: fix issue when using ``python setup.py somecommand --force``.
+* `#9280 <https://github.com/numpy/numpy/pull/9280>`__: BUG: Make extensions compilable with MinGW on Py2.7
+* `#9281 <https://github.com/numpy/numpy/pull/9281>`__: DOC: add @ operator in array vs. matrix comparison doc
+* `#9285 <https://github.com/numpy/numpy/pull/9285>`__: BUG: Fix Intel compilation on Unix.
+* `#9292 <https://github.com/numpy/numpy/pull/9292>`__: MAINT: Fix lgtm alerts
+* `#9294 <https://github.com/numpy/numpy/pull/9294>`__: BUG: Fixes histogram monotonicity check for unsigned bin values
+* `#9300 <https://github.com/numpy/numpy/pull/9300>`__: BUG: PyArray_CountNonzero does not check for exceptions
+* `#9302 <https://github.com/numpy/numpy/pull/9302>`__: BUG: Fix fillvalue
+* `#9306 <https://github.com/numpy/numpy/pull/9306>`__: BUG: f2py: Convert some error messages printed to stderr to exceptions.
+* `#9310 <https://github.com/numpy/numpy/pull/9310>`__: BUG: fix wrong ndim used in empty where check
+* `#9316 <https://github.com/numpy/numpy/pull/9316>`__: BUG: `runtest -t` should recognize development mode
+* `#9320 <https://github.com/numpy/numpy/pull/9320>`__: DOC: Use x1 and x2 in the heaviside docstring.
+* `#9322 <https://github.com/numpy/numpy/pull/9322>`__: BUG: np.ma.astype fails on structured types
+* `#9323 <https://github.com/numpy/numpy/pull/9323>`__: DOC: Add $PARAMS to the isnat docstring
+* `#9324 <https://github.com/numpy/numpy/pull/9324>`__: DOC: Fix missing asterisks in git development_setup doc page
+* `#9325 <https://github.com/numpy/numpy/pull/9325>`__: DOC: add a NumFOCUS badge to README.md
+* `#9332 <https://github.com/numpy/numpy/pull/9332>`__: ENH: fix 0d array printing using `str` or `formatter`.
+* `#9335 <https://github.com/numpy/numpy/pull/9335>`__: BUG: umath: un-break ufunc where= when no out= is given
+* `#9336 <https://github.com/numpy/numpy/pull/9336>`__: BUG: Fix various problems with the np.ma.masked constant
+* `#9337 <https://github.com/numpy/numpy/pull/9337>`__: BUG: Prevent crash if ufunc doc string is null
+* `#9341 <https://github.com/numpy/numpy/pull/9341>`__: BUG: np.resize discards empty shapes
+* `#9343 <https://github.com/numpy/numpy/pull/9343>`__: BUG: recfunctions fail in a bunch of ways due to using .descr
+* `#9344 <https://github.com/numpy/numpy/pull/9344>`__: DOC: fixes issue #9326, by removing the statement.
+* `#9346 <https://github.com/numpy/numpy/pull/9346>`__: BUG: void masked fillvalue cannot be cast to void in python 3
+* `#9354 <https://github.com/numpy/numpy/pull/9354>`__: BUG: Prevent hang traversing ufunc userloop linked list
+* `#9357 <https://github.com/numpy/numpy/pull/9357>`__: DOC: Add examples for complex dtypes
+* `#9361 <https://github.com/numpy/numpy/pull/9361>`__: DOC: isscalar add example for str
+* `#9362 <https://github.com/numpy/numpy/pull/9362>`__: ENH: Rearrange testing module to isolate nose dependency.
+* `#9364 <https://github.com/numpy/numpy/pull/9364>`__: BUG: ')' is printed at the end pointer of the buffer in numpy.f2py.
+* `#9369 <https://github.com/numpy/numpy/pull/9369>`__: BUG: fix error in fromstring function from numpy.core.records
+* `#9375 <https://github.com/numpy/numpy/pull/9375>`__: DOC: Document the internal workings of PY_ARRAY_UNIQUE_SYMBOL
+* `#9380 <https://github.com/numpy/numpy/pull/9380>`__: DOC: Forward port 1.13.1 notes and changelog.
+* `#9381 <https://github.com/numpy/numpy/pull/9381>`__: TST: test doc string of COMMON block arrays for numpy.f2py.
+* `#9387 <https://github.com/numpy/numpy/pull/9387>`__: MAINT: Simplify code using PyArray_ISBYTESWAPPED macro.
+* `#9388 <https://github.com/numpy/numpy/pull/9388>`__: MAINT: Use PyArray_ISBYTESWAPPED instead of !PyArray_ISNOTSWAPPED.
+* `#9389 <https://github.com/numpy/numpy/pull/9389>`__: DOC: Fix reference, PyArray_DescrNew -> PyArray_NewFromDescr
+* `#9392 <https://github.com/numpy/numpy/pull/9392>`__: DOC: UPDATEIFCOPY raises an error if not an array.
+* `#9399 <https://github.com/numpy/numpy/pull/9399>`__: DOC: document how to free memory from PyArray_IntpConverter.
+* `#9400 <https://github.com/numpy/numpy/pull/9400>`__: MAINT: Further unify handling of unnamed ufuncs
+* `#9403 <https://github.com/numpy/numpy/pull/9403>`__: MAINT: Replace tab escapes with four spaces
+* `#9407 <https://github.com/numpy/numpy/pull/9407>`__: DOC: add ``suppress_warnings`` to the testing routine listing.
+* `#9408 <https://github.com/numpy/numpy/pull/9408>`__: BUG: various fixes to np.gradient
+* `#9411 <https://github.com/numpy/numpy/pull/9411>`__: MAINT/BUG: improve gradient dtype handling
+* `#9412 <https://github.com/numpy/numpy/pull/9412>`__: BUG: Check for exception in sort functions
+* `#9422 <https://github.com/numpy/numpy/pull/9422>`__: DOC: correct formatting of basic.types.html
+* `#9423 <https://github.com/numpy/numpy/pull/9423>`__: MAINT: change http to https for numfocus.org link in README
+* `#9425 <https://github.com/numpy/numpy/pull/9425>`__: ENH: Einsum calls BLAS if it advantageous to do so
+* `#9426 <https://github.com/numpy/numpy/pull/9426>`__: DOC: Add a link to einsum_path
+* `#9431 <https://github.com/numpy/numpy/pull/9431>`__: ENH: distutils: make msvc + mingw-gfortran work
+* `#9432 <https://github.com/numpy/numpy/pull/9432>`__: BUG: Fix loss of masks in masked 0d methods
+* `#9433 <https://github.com/numpy/numpy/pull/9433>`__: BUG: make np.transpose return a view of the mask
+* `#9434 <https://github.com/numpy/numpy/pull/9434>`__: MAINT: Remove unittest dependencies
+* `#9437 <https://github.com/numpy/numpy/pull/9437>`__: DOC: Update 1.14.0 release notes.
+* `#9446 <https://github.com/numpy/numpy/pull/9446>`__: BUG: Inlined functions must be defined somewhere.
+* `#9447 <https://github.com/numpy/numpy/pull/9447>`__: API: Make ``a.flat.__array__`` return a copy when ``a`` non-contiguous.
+* `#9452 <https://github.com/numpy/numpy/pull/9452>`__: MAINT: Use new-style classes on 2.7
+* `#9454 <https://github.com/numpy/numpy/pull/9454>`__: MAINT: Remove branch in __array__ where if and else were the...
+* `#9457 <https://github.com/numpy/numpy/pull/9457>`__: MAINT: Add a common subclass to all the masked ufunc wrappers
+* `#9458 <https://github.com/numpy/numpy/pull/9458>`__: MAINT: Improve performance of np.copyto(where=scalar)
+* `#9469 <https://github.com/numpy/numpy/pull/9469>`__: BUG: Fix true_divide when dtype=np.float64 specified.
+* `#9470 <https://github.com/numpy/numpy/pull/9470>`__: MAINT: Make `setxor1d` a bit clearer and speed it up
+* `#9471 <https://github.com/numpy/numpy/pull/9471>`__: BLD: remove -xhost flag from IntelFCompiler.
+* `#9475 <https://github.com/numpy/numpy/pull/9475>`__: DEP: deprecate rollaxis
+* `#9482 <https://github.com/numpy/numpy/pull/9482>`__: MAINT: Make diff iterative instead of recursive
+* `#9487 <https://github.com/numpy/numpy/pull/9487>`__: DEP: Letting fromstring pretend to be frombuffer is a bad idea
+* `#9490 <https://github.com/numpy/numpy/pull/9490>`__: DOC: Replace xrange by range in quickstart docs
+* `#9491 <https://github.com/numpy/numpy/pull/9491>`__: TST: Add filter for new Py3K warning in python 2
+* `#9492 <https://github.com/numpy/numpy/pull/9492>`__: ENH: Add np.polynomial.chebyshev.chebinterpolate function.
+* `#9498 <https://github.com/numpy/numpy/pull/9498>`__: DOC: fix versionadded in docstring for moveaxis
+* `#9499 <https://github.com/numpy/numpy/pull/9499>`__: MAINT/BUG: Improve error messages for dtype reassigment, fix...
+* `#9503 <https://github.com/numpy/numpy/pull/9503>`__: MAINT: Move variables into deepest relevant scope, for clarity
+* `#9505 <https://github.com/numpy/numpy/pull/9505>`__: BUG: issubdtype is inconsistent on types and dtypes
+* `#9517 <https://github.com/numpy/numpy/pull/9517>`__: MAINT/DOC: Use builtin when np.{x} is builtins.{x}.
+* `#9519 <https://github.com/numpy/numpy/pull/9519>`__: MAINT: Remove `level=` keyword from test arguments.
+* `#9520 <https://github.com/numpy/numpy/pull/9520>`__: MAINT: types.TypeType does not ever need to be used
+* `#9521 <https://github.com/numpy/numpy/pull/9521>`__: BUG: Make issubclass(np.number, numbers.Number) return true
+* `#9522 <https://github.com/numpy/numpy/pull/9522>`__: BUG: Fix problems with obj2sctype
+* `#9524 <https://github.com/numpy/numpy/pull/9524>`__: TST, MAINT: Add `__init__.py` files to tests directories.
+* `#9527 <https://github.com/numpy/numpy/pull/9527>`__: BUG: Fix scalar methods to receive keyword arguments
+* `#9529 <https://github.com/numpy/numpy/pull/9529>`__: BUG: The NAT deprecation warning should not be given for every...
+* `#9536 <https://github.com/numpy/numpy/pull/9536>`__: ENH: Show domain and window as kwargs in repr
+* `#9540 <https://github.com/numpy/numpy/pull/9540>`__: BUG: MaskedArray _optinfo dictionary is not updated when calling...
+* `#9543 <https://github.com/numpy/numpy/pull/9543>`__: DOC: Adding backslash between double-backtick and s.
+* `#9544 <https://github.com/numpy/numpy/pull/9544>`__: MAINT: Use the error_converting macro where possible
+* `#9545 <https://github.com/numpy/numpy/pull/9545>`__: DEP: Deprecate the event argument to datetime types, which is...
+* `#9550 <https://github.com/numpy/numpy/pull/9550>`__: DOC: removes broken docstring example (source code, png, pdf)...
+* `#9552 <https://github.com/numpy/numpy/pull/9552>`__: DOC, BUG: Fix Python 3.6 invalid escape sequence.
+* `#9554 <https://github.com/numpy/numpy/pull/9554>`__: BUG: fix regression in 1.13.x in distutils.mingw32ccompiler.
+* `#9564 <https://github.com/numpy/numpy/pull/9564>`__: BUG: fix distutils/cpuinfo.py:getoutput()
+* `#9574 <https://github.com/numpy/numpy/pull/9574>`__: BUG: deal with broken hypot() for MSVC on win32
+* `#9575 <https://github.com/numpy/numpy/pull/9575>`__: BUG: deal with broken cabs*() for MSVC on win32
+* `#9577 <https://github.com/numpy/numpy/pull/9577>`__: BUG: Missing dirichlet input validation
+* `#9581 <https://github.com/numpy/numpy/pull/9581>`__: DOC: Fix link in numpy.ndarray.copy method (missing backticks)
+* `#9582 <https://github.com/numpy/numpy/pull/9582>`__: ENH: Warn to change lstsq default for rcond
+* `#9586 <https://github.com/numpy/numpy/pull/9586>`__: DOC: update example in np.nonzero docstring
+* `#9588 <https://github.com/numpy/numpy/pull/9588>`__: MAINT: Remove direct access to flatiter attributes
+* `#9590 <https://github.com/numpy/numpy/pull/9590>`__: ENH: Remove unnecessary restriction in noncen-f
+* `#9591 <https://github.com/numpy/numpy/pull/9591>`__: MAINT: Remove unnecessary imports
+* `#9599 <https://github.com/numpy/numpy/pull/9599>`__: BUG: fix infinite loop when creating np.pad on an empty array
+* `#9601 <https://github.com/numpy/numpy/pull/9601>`__: DOC: rot90 wrongly positioned versionadded directive.
+* `#9604 <https://github.com/numpy/numpy/pull/9604>`__: MAINT: Refactor the code used to compute sha256, md5 hashes
+* `#9606 <https://github.com/numpy/numpy/pull/9606>`__: MAINT: Remove global statement in linalg.py
+* `#9609 <https://github.com/numpy/numpy/pull/9609>`__: BUG: Add `__ne__` method to dummy_ctype class.
+* `#9610 <https://github.com/numpy/numpy/pull/9610>`__: BUG: core: fix wrong method flags for scalartypes.c.src:gentype_copy
+* `#9611 <https://github.com/numpy/numpy/pull/9611>`__: MAINT: remove try..except clause.
+* `#9613 <https://github.com/numpy/numpy/pull/9613>`__: DOC: Update release notes for noncentral_f changes.
+* `#9614 <https://github.com/numpy/numpy/pull/9614>`__: MAINT: Fix a comment regarding the formula for arange length
+* `#9618 <https://github.com/numpy/numpy/pull/9618>`__: DOC: Fix type definitions in mtrand
+* `#9619 <https://github.com/numpy/numpy/pull/9619>`__: ENH: Allow Fortran arrays of dimension 0
+* `#9624 <https://github.com/numpy/numpy/pull/9624>`__: BUG: memory leak in np.dot of size 0
+* `#9626 <https://github.com/numpy/numpy/pull/9626>`__: BUG: Fix broken runtests '-t' option.
+* `#9629 <https://github.com/numpy/numpy/pull/9629>`__: BUG: test, fix issue #9620 __radd__ in char scalars
+* `#9630 <https://github.com/numpy/numpy/pull/9630>`__: DOC: Updates order of parameters in save docstring
+* `#9636 <https://github.com/numpy/numpy/pull/9636>`__: MAINT: Fix compiler warnings and update travis jobs
+* `#9638 <https://github.com/numpy/numpy/pull/9638>`__: BUG: ensure consistent result dtype of count_nonzero
+* `#9639 <https://github.com/numpy/numpy/pull/9639>`__: MAINT: Refactor updateifcopy
+* `#9640 <https://github.com/numpy/numpy/pull/9640>`__: BUG: fix padding an empty array in reflect mode.
+* `#9643 <https://github.com/numpy/numpy/pull/9643>`__: DOC: add new steering council members.
+* `#9645 <https://github.com/numpy/numpy/pull/9645>`__: ENH: enable OpenBLAS on windows.
+* `#9648 <https://github.com/numpy/numpy/pull/9648>`__: DOC: Correct the signature in pad doc for callable mode.
+* `#9649 <https://github.com/numpy/numpy/pull/9649>`__: DOC: Fixed doc example of apply along axis with 3D return
+* `#9652 <https://github.com/numpy/numpy/pull/9652>`__: BUG: Make system_info output reproducible
+* `#9658 <https://github.com/numpy/numpy/pull/9658>`__: BUG: Fix usage of keyword "from" as argument name for "can_cast".
+* `#9667 <https://github.com/numpy/numpy/pull/9667>`__: MAINT: Simplify block implementation
+* `#9668 <https://github.com/numpy/numpy/pull/9668>`__: DOC: clarify wording in tutorial
+* `#9672 <https://github.com/numpy/numpy/pull/9672>`__: BUG: dot/matmul 'out' arg should accept any ndarray subclass
+* `#9681 <https://github.com/numpy/numpy/pull/9681>`__: MAINT: Add block benchmarks
+* `#9682 <https://github.com/numpy/numpy/pull/9682>`__: DOC: Add whitespace after "versionadded::" directive so it actually...
+* `#9683 <https://github.com/numpy/numpy/pull/9683>`__: DOC: Add polyutils subpackage to reference documentation
+* `#9685 <https://github.com/numpy/numpy/pull/9685>`__: BUG: Fixes #7395, operator.index now fails on numpy.bool_
+* `#9688 <https://github.com/numpy/numpy/pull/9688>`__: MAINT: rework recursive guard to keep array2string signature
+* `#9691 <https://github.com/numpy/numpy/pull/9691>`__: PEP 3141 numbers should be considered scalars
+* `#9692 <https://github.com/numpy/numpy/pull/9692>`__: ENH: Add support of ARC architecture
+* `#9695 <https://github.com/numpy/numpy/pull/9695>`__: DOC: `start` is not needed even when `step` is given.
+* `#9700 <https://github.com/numpy/numpy/pull/9700>`__: DOC: Add mandatory memo argument to __deepcopy__ method documentation
+* `#9701 <https://github.com/numpy/numpy/pull/9701>`__: DOC: Add keepdims argument for ndarray.max documentation
+* `#9702 <https://github.com/numpy/numpy/pull/9702>`__: DOC: Warn about the difference between np.remainder and math.remainder
+* `#9703 <https://github.com/numpy/numpy/pull/9703>`__: DOC: Fix mistaken word in nanprod docstring
+* `#9707 <https://github.com/numpy/numpy/pull/9707>`__: MAINT: When linspace's step is a NumPy scalar, do multiplication in-place
+* `#9709 <https://github.com/numpy/numpy/pull/9709>`__: DOC: allclose doesn't require matching shapes
+* `#9711 <https://github.com/numpy/numpy/pull/9711>`__: BUG: Make scalar function elision check if writeable.
+* `#9715 <https://github.com/numpy/numpy/pull/9715>`__: MAINT: Fix typo "Porland" -> "Portland" in `building` doc.
+* `#9718 <https://github.com/numpy/numpy/pull/9718>`__: DEP: Deprecate truth testing on empty arrays
+* `#9720 <https://github.com/numpy/numpy/pull/9720>`__: MAINT: Remove unnecessary special-casing of scalars in isclose
+* `#9724 <https://github.com/numpy/numpy/pull/9724>`__: BUG: adjust gfortran version search regex
+* `#9725 <https://github.com/numpy/numpy/pull/9725>`__: MAINT: cleanup circular import b/w arrayprint.py,numeric.py
+* `#9726 <https://github.com/numpy/numpy/pull/9726>`__: ENH: Better error message for savetxt when X.ndim > 2 or X.ndim...
+* `#9737 <https://github.com/numpy/numpy/pull/9737>`__: MAINT: Use zip, not enumerate
+* `#9740 <https://github.com/numpy/numpy/pull/9740>`__: BUG: Ensure `_npy_scaled_cexp{,f,l}` is defined when needed.
+* `#9741 <https://github.com/numpy/numpy/pull/9741>`__: BUG: core: use npy_cabs for abs() for np.complex* scalar types
+* `#9743 <https://github.com/numpy/numpy/pull/9743>`__: MAINT: Use PyArray_CHKFLAGS in more places.
+* `#9749 <https://github.com/numpy/numpy/pull/9749>`__: BUG: Fix loss of precision for large values in long double divmod
+* `#9752 <https://github.com/numpy/numpy/pull/9752>`__: BUG: Errors thrown by 0d arrays in setitem are silenced and replaced
+* `#9753 <https://github.com/numpy/numpy/pull/9753>`__: DOC: Fix ndarray.__setstate__ documentation, it only takes one...
+* `#9755 <https://github.com/numpy/numpy/pull/9755>`__: BUG: Cython 0.27 breaks NumPy on Python 3.
+* `#9756 <https://github.com/numpy/numpy/pull/9756>`__: BUG/TST: Check if precision is lost in longcomplex
+* `#9762 <https://github.com/numpy/numpy/pull/9762>`__: MAINT: Use the PyArray_(GET|SET)_ITEM functions where possible
+* `#9768 <https://github.com/numpy/numpy/pull/9768>`__: MAINT: Cleanup `ma.array.__str__`
+* `#9770 <https://github.com/numpy/numpy/pull/9770>`__: MAINT,BUG: Fix mtrand for Cython 0.27.
+* `#9773 <https://github.com/numpy/numpy/pull/9773>`__: BUG: Fixes optimal einsum path for multi-term intermediates
+* `#9778 <https://github.com/numpy/numpy/pull/9778>`__: BUG: can_cast(127, np.int8) is False
+* `#9779 <https://github.com/numpy/numpy/pull/9779>`__: BUG: np.ma.trace gives the wrong result on ND arrays
+* `#9780 <https://github.com/numpy/numpy/pull/9780>`__: MAINT: Make f2py generated file not contain the (local) date.
+* `#9782 <https://github.com/numpy/numpy/pull/9782>`__: DOC: Update after NumPy 1.13.2 release.
+* `#9784 <https://github.com/numpy/numpy/pull/9784>`__: BUG: remove voidtype-repr recursion in scalartypes.c/arrayprint.py
+* `#9785 <https://github.com/numpy/numpy/pull/9785>`__: BUG: Fix size-checking in masked_where, and structured shrink_mask
+* `#9792 <https://github.com/numpy/numpy/pull/9792>`__: ENH: Various improvements to Maskedarray repr
+* `#9796 <https://github.com/numpy/numpy/pull/9796>`__: TST: linalg: add basic smoketest for cholesky
+* `#9800 <https://github.com/numpy/numpy/pull/9800>`__: DOC: Clean up README
+* `#9803 <https://github.com/numpy/numpy/pull/9803>`__: DOC: add missing underscore in set_printoptions
+* `#9805 <https://github.com/numpy/numpy/pull/9805>`__: CI: set correct test mode for appveyor
+* `#9806 <https://github.com/numpy/numpy/pull/9806>`__: MAINT: Add appveyor badge to README
+* `#9807 <https://github.com/numpy/numpy/pull/9807>`__: MAINT: Make appveyor config a dot-file
+* `#9810 <https://github.com/numpy/numpy/pull/9810>`__: DOC: Improve ndarray.shape documentation.
+* `#9812 <https://github.com/numpy/numpy/pull/9812>`__: DOC: update scipy.integrate recommendation
+* `#9814 <https://github.com/numpy/numpy/pull/9814>`__: BUG: Fix datetime->string conversion
+* `#9815 <https://github.com/numpy/numpy/pull/9815>`__: BUG: fix stray comma in _array2string
+* `#9817 <https://github.com/numpy/numpy/pull/9817>`__: BUG: Added exception for casting numpy.ma.masked to long
+* `#9822 <https://github.com/numpy/numpy/pull/9822>`__: BUG: Allow subclasses of MaskedConstant to behave as unique singletons
+* `#9824 <https://github.com/numpy/numpy/pull/9824>`__: BUG: Fixes for np.random.zipf
+* `#9826 <https://github.com/numpy/numpy/pull/9826>`__: DOC: Add unravel_index examples to np.arg(min|max|sort)
+* `#9828 <https://github.com/numpy/numpy/pull/9828>`__: DOC: Improve documentation of axis parameter in numpy.unpackbits()
+* `#9835 <https://github.com/numpy/numpy/pull/9835>`__: BENCH: Added missing ufunc benchmarks
+* `#9840 <https://github.com/numpy/numpy/pull/9840>`__: DOC: ndarray.__copy__ takes no arguments
+* `#9842 <https://github.com/numpy/numpy/pull/9842>`__: BUG: Prevent invalid array shapes in seed
+* `#9845 <https://github.com/numpy/numpy/pull/9845>`__: DOC: Refine SVD documentation
+* `#9849 <https://github.com/numpy/numpy/pull/9849>`__: MAINT: Fix all special-casing of dtypes in `count_nonzero`
+* `#9854 <https://github.com/numpy/numpy/pull/9854>`__: BLD: distutils: auto-find vcpkg include and library directories
+* `#9856 <https://github.com/numpy/numpy/pull/9856>`__: BUG: Make bool(void_scalar) and void_scalar.astype(bool) consistent
+* `#9858 <https://github.com/numpy/numpy/pull/9858>`__: DOC: Some minor fixes regarding import_array
+* `#9862 <https://github.com/numpy/numpy/pull/9862>`__: BUG: Restore the environment variables when import multiarray...
+* `#9863 <https://github.com/numpy/numpy/pull/9863>`__: ENH: Save to ZIP files without using temporary files.
+* `#9865 <https://github.com/numpy/numpy/pull/9865>`__: DOC: Replace PyFITS reference with Astropy and PyTables with...
+* `#9866 <https://github.com/numpy/numpy/pull/9866>`__: BUG: Fix runtests --benchmark-compare in python 3
+* `#9868 <https://github.com/numpy/numpy/pull/9868>`__: DOC: Update arraypad to use np.pad in examples
+* `#9869 <https://github.com/numpy/numpy/pull/9869>`__: DOC: Make qr options render correctly as list.
+* `#9881 <https://github.com/numpy/numpy/pull/9881>`__: BUG: count_nonzero treats empty axis tuples strangely
+* `#9883 <https://github.com/numpy/numpy/pull/9883>`__: ENH: Implement ndarray.__format__ for 0d arrays
+* `#9884 <https://github.com/numpy/numpy/pull/9884>`__: BUG: Allow `unravel_index(0, ())` to return ()
+* `#9887 <https://github.com/numpy/numpy/pull/9887>`__: BUG: add.reduce gives wrong results for arrays with funny strides
+* `#9888 <https://github.com/numpy/numpy/pull/9888>`__: MAINT: Remove workarounds for gh-9527
+* `#9889 <https://github.com/numpy/numpy/pull/9889>`__: MAINT: Tidy np.histogram, and improve error messages
+* `#9893 <https://github.com/numpy/numpy/pull/9893>`__: ENH: Added compatibility for the NAG Fortran compiler, nagfor
+* `#9896 <https://github.com/numpy/numpy/pull/9896>`__: DOC: Unindent enumeration in savetxt docstring
+* `#9899 <https://github.com/numpy/numpy/pull/9899>`__: Remove unused isscalar imports, and incorrect documentation using...
+* `#9900 <https://github.com/numpy/numpy/pull/9900>`__: MAINT/BUG: Remove special-casing for 0d arrays, now that indexing...
+* `#9904 <https://github.com/numpy/numpy/pull/9904>`__: MAINT: Make warnings for nanmin and nanmax consistent
+* `#9911 <https://github.com/numpy/numpy/pull/9911>`__: CI: travis: switch to container
+* `#9912 <https://github.com/numpy/numpy/pull/9912>`__: BENCH: histogramming benchmarks
+* `#9913 <https://github.com/numpy/numpy/pull/9913>`__: MAINT: Tidy up Maskedarray repr
+* `#9916 <https://github.com/numpy/numpy/pull/9916>`__: DOC: Clarify behavior of genfromtxt names field
+* `#9920 <https://github.com/numpy/numpy/pull/9920>`__: DOC: dot: Add explanation in case `b` has only 1 dimension.
+* `#9925 <https://github.com/numpy/numpy/pull/9925>`__: DOC: ndarray.reshape allows shape as int arguments or tuple
+* `#9930 <https://github.com/numpy/numpy/pull/9930>`__: MAINT: Add parameter checks to polynomial integration functions.
+* `#9936 <https://github.com/numpy/numpy/pull/9936>`__: DOC: Clarify docstring for numpy.array_split
+* `#9941 <https://github.com/numpy/numpy/pull/9941>`__: ENH: Use Dragon4 algorithm to print floating values
+* `#9942 <https://github.com/numpy/numpy/pull/9942>`__: ENH: Add PGI flang compiler support for Windows
+* `#9944 <https://github.com/numpy/numpy/pull/9944>`__: MAINT/BUG: Don't squash useful error messages in favor of generic...
+* `#9945 <https://github.com/numpy/numpy/pull/9945>`__: DOC: fix operation plural in along axis glossary
+* `#9946 <https://github.com/numpy/numpy/pull/9946>`__: DOC: describe the expansion of take and apply_along_axis in detail
+* `#9947 <https://github.com/numpy/numpy/pull/9947>`__: MAINT/TST: Tidy dtype indexing
+* `#9950 <https://github.com/numpy/numpy/pull/9950>`__: BUG: Passing an incorrect type to dtype.__getitem__ should raise...
+* `#9952 <https://github.com/numpy/numpy/pull/9952>`__: ENH: add Decimal support to numpy.lib.financial
+* `#9953 <https://github.com/numpy/numpy/pull/9953>`__: MAINT: Add a PyDataType_ISUNSIZED macro
+* `#9957 <https://github.com/numpy/numpy/pull/9957>`__: DOC: update asv url
+* `#9961 <https://github.com/numpy/numpy/pull/9961>`__: BUG: Allow float64('1e10000') to overflow
+* `#9962 <https://github.com/numpy/numpy/pull/9962>`__: MAINT: Rename formatters to match scalar type names
+* `#9965 <https://github.com/numpy/numpy/pull/9965>`__: BLD: Disable npymath whole program opt (LTCG) on win32
+* `#9966 <https://github.com/numpy/numpy/pull/9966>`__: BUG: str(np.float) should print with the same number of digits...
+* `#9967 <https://github.com/numpy/numpy/pull/9967>`__: MAINT: Separate correct `longdouble.__float__` from incorrect...
+* `#9971 <https://github.com/numpy/numpy/pull/9971>`__: BUG: Fix casting from longdouble to long
+* `#9973 <https://github.com/numpy/numpy/pull/9973>`__: TST: Fix error in test on PyPy, add comment explaining known...
+* `#9976 <https://github.com/numpy/numpy/pull/9976>`__: BUG: Ensure lstsq can handle RHS with all sizes.
+* `#9977 <https://github.com/numpy/numpy/pull/9977>`__: MAINT: distutils: trivial cleanups
+* `#9978 <https://github.com/numpy/numpy/pull/9978>`__: BUG: cast to str_ should not convert to pure-python intermediate
+* `#9983 <https://github.com/numpy/numpy/pull/9983>`__: ENH: let f2py discover location of libgfortran
+* `#9985 <https://github.com/numpy/numpy/pull/9985>`__: ENH: skip NPY_ALLOW_C_API for UFUNC_ERR_IGNORE
+* `#9986 <https://github.com/numpy/numpy/pull/9986>`__: MAINT: Remove similar branches from linalg.lstsq
+* `#9991 <https://github.com/numpy/numpy/pull/9991>`__: MAINT: small robustness change for mingw support on Windows.
+* `#9994 <https://github.com/numpy/numpy/pull/9994>`__: BUG: test was not using 'mode'
+* `#9996 <https://github.com/numpy/numpy/pull/9996>`__: ENH: Adding `order=` keyword to `np.eye()`.
+* `#9997 <https://github.com/numpy/numpy/pull/9997>`__: BUG: prototypes for [cz]dot[uc] are incorrect
+* `#9999 <https://github.com/numpy/numpy/pull/9999>`__: ENH: Make `np.in1d()` work for unorderable object arrays
+* `#10000 <https://github.com/numpy/numpy/pull/10000>`__: MAINT: Fix test_int_from_huge_longdouble on Darwin.
+* `#10005 <https://github.com/numpy/numpy/pull/10005>`__: DOC: reword PyArray_DiscardWritebackIfCopy description
+* `#10006 <https://github.com/numpy/numpy/pull/10006>`__: NEP: Drop Python2 support.
+* `#10007 <https://github.com/numpy/numpy/pull/10007>`__: MAINT: simplify logic from #9983
+* `#10008 <https://github.com/numpy/numpy/pull/10008>`__: MAINT: Backcompat fixes for dragon4 changes
+* `#10011 <https://github.com/numpy/numpy/pull/10011>`__: TST: Group together all the nested_iter tests
+* `#10017 <https://github.com/numpy/numpy/pull/10017>`__: REV: Undo bad rebase in 7fdfdd6a52fc0761c0d45931247c5ed2480224eb...
+* `#10021 <https://github.com/numpy/numpy/pull/10021>`__: ENH: Don't show the boolean dtype in array_repr
+* `#10022 <https://github.com/numpy/numpy/pull/10022>`__: MAINT: Update c-api version and hash for NumPy 1.14.
+* `#10030 <https://github.com/numpy/numpy/pull/10030>`__: MAINT: Legacy mode specified as string, fix all-zeros legacy...
+* `#10031 <https://github.com/numpy/numpy/pull/10031>`__: BUG: Fix f2py string variables in callbacks.
+* `#10032 <https://github.com/numpy/numpy/pull/10032>`__: MAINT: Remove newline before dtype in repr of arrays
+* `#10034 <https://github.com/numpy/numpy/pull/10034>`__: MAINT: legacy-printing-mode preserves 1.13 float & complex str
+* `#10042 <https://github.com/numpy/numpy/pull/10042>`__: BUG: Allow `int` to be called on nested object arrays, fix `np.str_.__int__`
+* `#10044 <https://github.com/numpy/numpy/pull/10044>`__: DEP: FutureWarning for void.item(): Will return bytes
+* `#10049 <https://github.com/numpy/numpy/pull/10049>`__: DOC: Add copy of deprecated defindex.html template.
+* `#10052 <https://github.com/numpy/numpy/pull/10052>`__: BUG: Fix legacy printing mode check.
+* `#10053 <https://github.com/numpy/numpy/pull/10053>`__: STY: C style whitespace fixups
+* `#10054 <https://github.com/numpy/numpy/pull/10054>`__: ENH: Add encoding option to numpy text IO.
+* `#10055 <https://github.com/numpy/numpy/pull/10055>`__: BUG: Changed dump(a, F) so it would close file
+* `#10057 <https://github.com/numpy/numpy/pull/10057>`__: DOC: v/h/dstack docstr shouldn't imply deprecation
+* `#10065 <https://github.com/numpy/numpy/pull/10065>`__: DOC, BLD: Update site.cfg.example on the MKL part.
+* `#10067 <https://github.com/numpy/numpy/pull/10067>`__: MAINT: Replace sphinx extension sphinx.ext.pngmath by sphinx.ext.imgmath.
+* `#10068 <https://github.com/numpy/numpy/pull/10068>`__: BUG: Fix memory leak for subclass slicing
+* `#10072 <https://github.com/numpy/numpy/pull/10072>`__: MAINT: Fix minor typos in numpy/core/fromnumeric.py
+* `#10079 <https://github.com/numpy/numpy/pull/10079>`__: DOC: mention generalized ufuncs, document signature attribute
+* `#10096 <https://github.com/numpy/numpy/pull/10096>`__: BUG: Fix assert_equal on time-like objects
+* `#10097 <https://github.com/numpy/numpy/pull/10097>`__: BUG: Fix crash for 0d timedelta repr
+* `#10101 <https://github.com/numpy/numpy/pull/10101>`__: BUG: Fix out-of-bounds access when handling rank-zero ndarrays.
+* `#10105 <https://github.com/numpy/numpy/pull/10105>`__: DOC: Update license documentation.
+* `#10108 <https://github.com/numpy/numpy/pull/10108>`__: DOC: Add documentation for datetime_data
+* `#10109 <https://github.com/numpy/numpy/pull/10109>`__: DOC: fix the lack of np.
+* `#10111 <https://github.com/numpy/numpy/pull/10111>`__: ENH: Improve alignment of datetime64 arrays containing NaT
+* `#10112 <https://github.com/numpy/numpy/pull/10112>`__: MAINT: Simplify IntegerFormatter
+* `#10113 <https://github.com/numpy/numpy/pull/10113>`__: BUG: Fix further out-of-bounds accesses when handling 0d ndarrays
+* `#10114 <https://github.com/numpy/numpy/pull/10114>`__: MAINT: Remove duplicate cond check from assert_array_compare
+* `#10116 <https://github.com/numpy/numpy/pull/10116>`__: BLD: [ipo] compilation error with intel compiler
+* `#10120 <https://github.com/numpy/numpy/pull/10120>`__: BUG: stray comma should be preserved for legacy printing
+* `#10121 <https://github.com/numpy/numpy/pull/10121>`__: DOC: Summarize printing changes in release notes
+* `#10125 <https://github.com/numpy/numpy/pull/10125>`__: BLD: Add license file to NumPy wheels.
+* `#10129 <https://github.com/numpy/numpy/pull/10129>`__: ENH: Strip trailing spaces from continuation in multiline arrayprint
+* `#10130 <https://github.com/numpy/numpy/pull/10130>`__: MAINT: Simplify _leading_trailing
+* `#10131 <https://github.com/numpy/numpy/pull/10131>`__: BUG: Fix downcasting in _array2string
+* `#10136 <https://github.com/numpy/numpy/pull/10136>`__: BUG: edgeitems kwarg is ignored
+* `#10143 <https://github.com/numpy/numpy/pull/10143>`__: MAINT: Combine legacy sections of _formatArray
+* `#10159 <https://github.com/numpy/numpy/pull/10159>`__: DOC: Update 1.14 notes
+* `#10160 <https://github.com/numpy/numpy/pull/10160>`__: BUG: test, fix problems from PR #9639
+* `#10164 <https://github.com/numpy/numpy/pull/10164>`__: MAINT/BUG: Simplify _formatArray, fixing array_repr(matrix) in...
+* `#10166 <https://github.com/numpy/numpy/pull/10166>`__: DOC: document PyArray_ResolveWritebackIfCopy
+* `#10168 <https://github.com/numpy/numpy/pull/10168>`__: DOC: continuation of PyArray_ResolveIfCopy fixes
+* `#10172 <https://github.com/numpy/numpy/pull/10172>`__: BUG: The last line of formatArray is not always wrapped correctly
+* `#10175 <https://github.com/numpy/numpy/pull/10175>`__: BUG: linewidth was not respected for arrays other than 1d
+* `#10176 <https://github.com/numpy/numpy/pull/10176>`__: ENH: add suffix option to array2str, wraps properly
+* `#10177 <https://github.com/numpy/numpy/pull/10177>`__: MAINT, BUG: Final 1.14 formatting fixes
+* `#10182 <https://github.com/numpy/numpy/pull/10182>`__: BUG: Extra space is inserted on first line for long elements
+* `#10190 <https://github.com/numpy/numpy/pull/10190>`__: BUG: Fix regression in np.ma.load in gh-10055
+* `#10200 <https://github.com/numpy/numpy/pull/10200>`__: BUG: Ufunc reduce reference leak (backport)
+* `#10202 <https://github.com/numpy/numpy/pull/10202>`__: BUG: Fix bugs found by testing in release mode.
+* `#10272 <https://github.com/numpy/numpy/pull/10272>`__: BUG: Align extra-dll folder name with auditwheel
+* `#10275 <https://github.com/numpy/numpy/pull/10275>`__: BUG: fix duplicate message print
+* `#10276 <https://github.com/numpy/numpy/pull/10276>`__: MAINT: Workaround for new travis sdist failures.
+* `#10311 <https://github.com/numpy/numpy/pull/10311>`__: BUG: Make sure einsum default value of `optimize` is True.
+* `#10312 <https://github.com/numpy/numpy/pull/10312>`__: BUG: Handle NaNs correctly in arange
+* `#10313 <https://github.com/numpy/numpy/pull/10313>`__: BUG: Don't reimplement isclose in np.ma
+* `#10315 <https://github.com/numpy/numpy/pull/10315>`__: DOC: NumPy 1.14.0 release prep.
diff --git a/doc/release/1.11.0-notes.rst b/doc/release/1.11.0-notes.rst
index b1dd24145..166502ac5 100644
--- a/doc/release/1.11.0-notes.rst
+++ b/doc/release/1.11.0-notes.rst
@@ -320,7 +320,7 @@ raise a ``TypeError``.
The ``linalg.norm`` function now does all its computations in floating point
and returns floating results. This change fixes bugs due to integer overflow
and the failure of abs with signed integers of minimum value, e.g., int8(-128).
-For consistancy, floats are used even where an integer might work.
+For consistency, floats are used even where an integer might work.
Deprecations
diff --git a/doc/release/1.12.0-notes.rst b/doc/release/1.12.0-notes.rst
index 229593ed9..711055d16 100644
--- a/doc/release/1.12.0-notes.rst
+++ b/doc/release/1.12.0-notes.rst
@@ -190,7 +190,7 @@ ma.median warns and returns nan when unmasked invalid values are encountered
Similar to unmasked median the masked median `ma.median` now emits a Runtime
warning and returns `NaN` in slices where an unmasked `NaN` is present.
-Greater consistancy in ``assert_almost_equal``
+Greater consistency in ``assert_almost_equal``
----------------------------------------------
The precision check for scalars has been changed to match that for arrays. It
is now::
diff --git a/doc/release/1.14.0-notes.rst b/doc/release/1.14.0-notes.rst
index 03b9263bc..0f14f7703 100644
--- a/doc/release/1.14.0-notes.rst
+++ b/doc/release/1.14.0-notes.rst
@@ -179,10 +179,10 @@ functions, and if used would likely correspond to a typo.
Previously, this would promote to ``float64`` when arbitrary orders were
passed, despite not doing so under the simple cases::
- >>> f32 = np.float32([1, 2])
- >>> np.linalg.norm(f32, 2.0).dtype
+ >>> f32 = np.float32([[1, 2]])
+ >>> np.linalg.norm(f32, 2.0, axis=-1).dtype
dtype('float32')
- >>> np.linalg.norm(f32, 2.0001).dtype
+ >>> np.linalg.norm(f32, 2.0001, axis=-1).dtype
dtype('float64') # numpy 1.13
dtype('float32') # numpy 1.14
@@ -307,20 +307,16 @@ In summary, the major changes are:
* User-defined ``dtypes`` (subclasses of ``np.generic``) now need to
implement ``__str__`` and ``__repr__``.
-You may want to do something like::
+Some of these changes are described in more detail below. If you need to retain
+the previous behavior for doctests or other reasons, you may want to do
+something like::
- # FIXME: Set numpy array str/repr to legacy behaviour on numpy > 1.13
+ # FIXME: We need the str/repr formatting used in Numpy < 1.14.
try:
np.set_printoptions(legacy='1.13')
except TypeError:
pass
-after ::
-
- import numpy as np
-
-Some of these changes are described in more detail below.
-
C API changes
=============
diff --git a/doc/release/1.14.1-notes.rst b/doc/release/1.14.1-notes.rst
new file mode 100644
index 000000000..d0512f9b1
--- /dev/null
+++ b/doc/release/1.14.1-notes.rst
@@ -0,0 +1,21 @@
+==========================
+NumPy 1.14.1 Release Notes
+==========================
+
+This is a bugfix release for some problems found since 1.14.0. This release
+includes fixes to the spacing in the str and repr of complex values.
+
+The Python versions supported are 2.7 and 3.4 - 3.6. The Python 3.6 wheels
+available from PIP are built with Python 3.6.2 and should be compatible with
+all previous versions of Python 3.6. It was cythonized with Cython 0.26.1,
+which should be free of the bugs found in 0.27 while also being compatible with
+Python 3.7-dev.
+
+Contributors
+============
+
+A total of xx people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+Pull requests merged
+====================
diff --git a/doc/release/1.15.0-notes.rst b/doc/release/1.15.0-notes.rst
index 16a44113c..0b1408d1f 100644
--- a/doc/release/1.15.0-notes.rst
+++ b/doc/release/1.15.0-notes.rst
@@ -15,6 +15,13 @@ New functions
* `np.ma.stack`, the `np.stack` array-joining function generalized to masked
arrays.
+* `np.printoptions`, the context manager which sets print options temporarily
+ for the scope of the ``with`` block::
+
+ >>> with np.printoptions(precision=2):
+ ... print(np.array([2.0])) / 3
+ [0.67]
+
Deprecations
============
@@ -101,5 +108,11 @@ Change to simd.inc.src to use AVX2 or AVX512 at compile time. Solving the gap
that if compile numpy for avx2 (or 512) with -march=native, still get the SSE
code for the simd functions even though rest of the code gets AVX2.
+``nan_to_num`` always returns scalars when receiving scalar or 0d inputs
+------------------------------------------------------------------------
+Previously an array was returned for integer scalar inputs, which is
+inconsistent with the behavior for float inputs, and that of ufuncs in general.
+For all types of scalar or 0d input, the result is now a scalar.
+
Changes
=======
diff --git a/doc/source/reference/c-api.array.rst b/doc/source/reference/c-api.array.rst
index 82cac676e..ad7c725a8 100644
--- a/doc/source/reference/c-api.array.rst
+++ b/doc/source/reference/c-api.array.rst
@@ -382,10 +382,11 @@ From other objects
sequence, or object that exposes the array interface, *op*. The
parameters allow specification of the required *dtype*, the
minimum (*min_depth*) and maximum (*max_depth*) number of
- dimensions acceptable, and other *requirements* for the array. The
- *dtype* argument needs to be a :c:type:`PyArray_Descr` structure
+ dimensions acceptable, and other *requirements* for the array. This
+ function **steals a reference** to the dtype argument, which needs
+ to be a :c:type:`PyArray_Descr` structure
indicating the desired data-type (including required
- byteorder). The *dtype* argument may be NULL, indicating that any
+ byteorder). The *dtype* argument may be ``NULL``, indicating that any
data-type (and byteorder) is acceptable. Unless
:c:data:`NPY_ARRAY_FORCECAST` is present in ``flags``,
this call will generate an error if the data
diff --git a/doc/source/user/numpy-for-matlab-users.rst b/doc/source/user/numpy-for-matlab-users.rst
index 00a627ac4..ae379624e 100644
--- a/doc/source/user/numpy-for-matlab-users.rst
+++ b/doc/source/user/numpy-for-matlab-users.rst
@@ -310,7 +310,7 @@ Linear Algebra Equivalents
* - ``[ a b; c d ]``
- ``vstack([hstack([a,b]), hstack([c,d])])`` or
- ``bmat('a b; c d').A``
+ ``block([[a, b], [c, d])``
- construct a matrix from blocks ``a``, ``b``, ``c``, and ``d``
* - ``a(end)``
@@ -369,7 +369,7 @@ Linear Algebra Equivalents
- conjugate transpose of ``a``
* - ``a * b``
- - ``a.dot(b)``
+ - ``a.dot(b)`` or ``a@b`` (Python 3.5 or newer)
- matrix multiply
* - ``a .* b``
diff --git a/numpy/_build_utils/common.py b/numpy/_build_utils/common.py
deleted file mode 100644
index 8435c462c..000000000
--- a/numpy/_build_utils/common.py
+++ /dev/null
@@ -1,138 +0,0 @@
-from __future__ import division, absolute_import, print_function
-
-import sys
-import copy
-import binascii
-
-LONG_DOUBLE_REPRESENTATION_SRC = r"""
-/* "before" is 16 bytes to ensure there's no padding between it and "x".
- * We're not expecting any "long double" bigger than 16 bytes or with
- * alignment requirements stricter than 16 bytes. */
-typedef %(type)s test_type;
-
-struct {
- char before[16];
- test_type x;
- char after[8];
-} foo = {
- { '\0', '\0', '\0', '\0', '\0', '\0', '\0', '\0',
- '\001', '\043', '\105', '\147', '\211', '\253', '\315', '\357' },
- -123456789.0,
- { '\376', '\334', '\272', '\230', '\166', '\124', '\062', '\020' }
-};
-"""
-
-def pyod(filename):
- """Python implementation of the od UNIX utility (od -b, more exactly).
-
- Parameters
- ----------
- filename : str
- name of the file to get the dump from.
-
- Returns
- -------
- out : seq
- list of lines of od output
-
- Note
- ----
- We only implement enough to get the necessary information for long double
- representation, this is not intended as a compatible replacement for od.
- """
- def _pyod2():
- out = []
-
- fid = open(filename, 'r')
- try:
- yo = [int(oct(int(binascii.b2a_hex(o), 16))) for o in fid.read()]
- for i in range(0, len(yo), 16):
- line = ['%07d' % int(oct(i))]
- line.extend(['%03d' % c for c in yo[i:i+16]])
- out.append(" ".join(line))
- return out
- finally:
- fid.close()
-
- def _pyod3():
- out = []
-
- fid = open(filename, 'rb')
- try:
- yo2 = [oct(o)[2:] for o in fid.read()]
- for i in range(0, len(yo2), 16):
- line = ['%07d' % int(oct(i)[2:])]
- line.extend(['%03d' % int(c) for c in yo2[i:i+16]])
- out.append(" ".join(line))
- return out
- finally:
- fid.close()
-
- if sys.version_info[0] < 3:
- return _pyod2()
- else:
- return _pyod3()
-
-_BEFORE_SEQ = ['000', '000', '000', '000', '000', '000', '000', '000',
- '001', '043', '105', '147', '211', '253', '315', '357']
-_AFTER_SEQ = ['376', '334', '272', '230', '166', '124', '062', '020']
-
-_IEEE_DOUBLE_BE = ['301', '235', '157', '064', '124', '000', '000', '000']
-_IEEE_DOUBLE_LE = _IEEE_DOUBLE_BE[::-1]
-_INTEL_EXTENDED_12B = ['000', '000', '000', '000', '240', '242', '171', '353',
- '031', '300', '000', '000']
-_INTEL_EXTENDED_16B = ['000', '000', '000', '000', '240', '242', '171', '353',
- '031', '300', '000', '000', '000', '000', '000', '000']
-_IEEE_QUAD_PREC_BE = ['300', '031', '326', '363', '105', '100', '000', '000',
- '000', '000', '000', '000', '000', '000', '000', '000']
-_IEEE_QUAD_PREC_LE = _IEEE_QUAD_PREC_BE[::-1]
-_DOUBLE_DOUBLE_BE = ['301', '235', '157', '064', '124', '000', '000', '000'] + \
- ['000'] * 8
-
-def long_double_representation(lines):
- """Given a binary dump as given by GNU od -b, look for long double
- representation."""
-
- # Read contains a list of 32 items, each item is a byte (in octal
- # representation, as a string). We 'slide' over the output until read is of
- # the form before_seq + content + after_sequence, where content is the long double
- # representation:
- # - content is 12 bytes: 80 bits Intel representation
- # - content is 16 bytes: 80 bits Intel representation (64 bits) or quad precision
- # - content is 8 bytes: same as double (not implemented yet)
- read = [''] * 32
- saw = None
- for line in lines:
- # we skip the first word, as od -b output an index at the beginning of
- # each line
- for w in line.split()[1:]:
- read.pop(0)
- read.append(w)
-
- # If the end of read is equal to the after_sequence, read contains
- # the long double
- if read[-8:] == _AFTER_SEQ:
- saw = copy.copy(read)
- if read[:12] == _BEFORE_SEQ[4:]:
- if read[12:-8] == _INTEL_EXTENDED_12B:
- return 'INTEL_EXTENDED_12_BYTES_LE'
- elif read[:8] == _BEFORE_SEQ[8:]:
- if read[8:-8] == _INTEL_EXTENDED_16B:
- return 'INTEL_EXTENDED_16_BYTES_LE'
- elif read[8:-8] == _IEEE_QUAD_PREC_BE:
- return 'IEEE_QUAD_BE'
- elif read[8:-8] == _IEEE_QUAD_PREC_LE:
- return 'IEEE_QUAD_LE'
- elif read[8:-8] == _DOUBLE_DOUBLE_BE:
- return 'DOUBLE_DOUBLE_BE'
- elif read[:16] == _BEFORE_SEQ:
- if read[16:-8] == _IEEE_DOUBLE_LE:
- return 'IEEE_DOUBLE_LE'
- elif read[16:-8] == _IEEE_DOUBLE_BE:
- return 'IEEE_DOUBLE_BE'
-
- if saw is not None:
- raise ValueError("Unrecognized format (%s)" % saw)
- else:
- # We never detected the after_sequence
- raise ValueError("Could not lock sequences (%s)" % saw)
diff --git a/numpy/_globals.py b/numpy/_globals.py
index 2d7b69bc4..9a7b458f1 100644
--- a/numpy/_globals.py
+++ b/numpy/_globals.py
@@ -52,11 +52,25 @@ class VisibleDeprecationWarning(UserWarning):
"""
pass
-
-class _NoValue(object):
+class _NoValueType(object):
"""Special keyword value.
- This class may be used as the default value assigned to a deprecated
- keyword in order to check if it has been given a user defined value.
+ The instance of this class may be used as the default value assigned to a
+ deprecated keyword in order to check if it has been given a user defined
+ value.
"""
- pass
+ __instance = None
+ def __new__(cls):
+ # ensure that only one instance exists
+ if not cls.__instance:
+ cls.__instance = super(_NoValueType, cls).__new__(cls)
+ return cls.__instance
+
+ # needed for python 2 to preserve identity through a pickle
+ def __reduce__(self):
+ return (self.__class__, ())
+
+ def __repr__(self):
+ return "<no value>"
+
+_NoValue = _NoValueType()
diff --git a/numpy/add_newdocs.py b/numpy/add_newdocs.py
index 44f681ee5..7dfecdb80 100644
--- a/numpy/add_newdocs.py
+++ b/numpy/add_newdocs.py
@@ -1232,7 +1232,8 @@ add_newdoc('numpy.core.multiarray', 'concatenate',
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int, optional
- The axis along which the arrays will be joined. Default is 0.
+ The axis along which the arrays will be joined. If axis is None,
+ arrays are flattened before use. Default is 0.
out : ndarray, optional
If provided, the destination to place the result. The shape must be
correct, matching that of what concatenate would have returned if no
@@ -1276,6 +1277,8 @@ add_newdoc('numpy.core.multiarray', 'concatenate',
>>> np.concatenate((a, b.T), axis=1)
array([[1, 2, 5],
[3, 4, 6]])
+ >>> np.concatenate((a, b), axis=None)
+ array([1, 2, 3, 4, 5, 6])
This function will not preserve masking of MaskedArray inputs.
@@ -4522,7 +4525,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('partition',
"""
a.partition(kth, axis=-1, kind='introselect', order=None)
- Rearranges the elements in the array in such a way that value of the
+ Rearranges the elements in the array in such a way that the value of the
element in kth position is in the position it would be in a sorted array.
All elements smaller than the kth element are moved before this element and
all equal or greater are moved behind it. The ordering of the elements in
@@ -4536,7 +4539,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('partition',
Element index to partition by. The kth element value will be in its
final sorted position and all smaller elements will be moved before it
and all equal or greater elements behind it.
- The order all elements in the partitions is undefined.
+ The order of all elements in the partitions is undefined.
If provided with a sequence of kth it will partition all elements
indexed by kth of them into their sorted position at once.
axis : int, optional
@@ -4546,8 +4549,8 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('partition',
Selection algorithm. Default is 'introselect'.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
- which fields to compare first, second, etc. A single field can
- be specified as a string, and not all fields need be specified,
+ which fields to compare first, second, etc. A single field can
+ be specified as a string, and not all fields need to be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
diff --git a/numpy/core/_internal.py b/numpy/core/_internal.py
index 004c2762b..8c6596d13 100644
--- a/numpy/core/_internal.py
+++ b/numpy/core/_internal.py
@@ -110,6 +110,10 @@ def _array_descr(descriptor):
num = field[1] - offset
result.append(('', '|V%d' % num))
offset += num
+ elif field[1] < offset:
+ raise ValueError(
+ "dtype.descr is not defined for types with overlapping or "
+ "out-of-order fields")
if len(field) > 3:
name = (field[2], field[3])
else:
diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py
index 238e1782f..472318098 100644
--- a/numpy/core/arrayprint.py
+++ b/numpy/core/arrayprint.py
@@ -6,8 +6,8 @@ $Id: arrayprint.py,v 1.9 2005/09/13 13:58:44 teoliphant Exp $
from __future__ import division, absolute_import, print_function
__all__ = ["array2string", "array_str", "array_repr", "set_string_function",
- "set_printoptions", "get_printoptions", "format_float_positional",
- "format_float_scientific"]
+ "set_printoptions", "get_printoptions", "printoptions",
+ "format_float_positional", "format_float_scientific"]
__docformat__ = 'restructuredtext'
#
@@ -49,7 +49,7 @@ from .numeric import concatenate, asarray, errstate
from .numerictypes import (longlong, intc, int_, float_, complex_, bool_,
flexible)
import warnings
-
+import contextlib
_format_options = {
'edgeitems': 3, # repr N leading and trailing items of each dimension
@@ -99,8 +99,10 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None,
Parameters
----------
- precision : int, optional
+ precision : int or None, optional
Number of digits of precision for floating point output (default 8).
+ May be `None` if `floatmode` is not `fixed`, to print as many digits as
+ necessary to uniquely specify the value.
threshold : int, optional
Total number of array elements which trigger summarization
rather than full repr (default 1000).
@@ -240,6 +242,8 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None,
# set the C variable for legacy mode
if _format_options['legacy'] == '1.13':
set_legacy_print_mode(113)
+ # reset the sign option in legacy mode to avoid confusion
+ _format_options['sign'] = '-'
elif _format_options['legacy'] is False:
set_legacy_print_mode(0)
@@ -273,6 +277,39 @@ def get_printoptions():
return _format_options.copy()
+@contextlib.contextmanager
+def printoptions(*args, **kwargs):
+ """Context manager for setting print options.
+
+ Set print options for the scope of the `with` block, and restore the old
+ options at the end. See `set_printoptions` for the full description of
+ available options.
+
+ Examples
+ --------
+
+ >>> with np.printoptions(precision=2):
+ ... print(np.array([2.0])) / 3
+ [0.67]
+
+ The `as`-clause of the `with`-statement gives the current print options:
+
+ >>> with np.printoptions(precision=2) as opts:
+ ... assert_equal(opts, np.get_printoptions())
+
+ See Also
+ --------
+ set_printoptions, get_printoptions
+
+ """
+ opts = np.get_printoptions()
+ try:
+ np.set_printoptions(*args, **kwargs)
+ yield np.get_printoptions()
+ finally:
+ np.set_printoptions(**opts)
+
+
def _leading_trailing(a, edgeitems, index=()):
"""
Keep only the N-D corners (leading and trailing edges) of an array.
@@ -469,7 +506,7 @@ def array2string(a, max_line_width=None, precision=None,
max_line_width : int, optional
The maximum number of columns the string should span. Newline
characters splits the string appropriately after array elements.
- precision : int, optional
+ precision : int or None, optional
Floating point precision. Default is the current printing
precision (usually 8), which can be altered using `set_printoptions`.
suppress_small : bool, optional
@@ -707,7 +744,7 @@ def _formatArray(a, format_function, line_width, next_line_prefix,
line += separator
if legacy == '1.13':
- # width of the seperator is not considered on 1.13
+ # width of the separator is not considered on 1.13
elem_width = curr_width
word = recurser(index + (-1,), next_hanging_indent, next_width)
s, line = _extendLine(
@@ -749,6 +786,13 @@ def _formatArray(a, format_function, line_width, next_line_prefix,
curr_width=line_width)
+def _none_or_positive_arg(x, name):
+ if x is None:
+ return -1
+ if x < 0:
+ raise ValueError("{} must be >= 0".format(name))
+ return x
+
class FloatingFormat(object):
""" Formatter for subtypes of np.floating """
def __init__(self, data, precision, floatmode, suppress_small, sign=False,
@@ -759,17 +803,18 @@ class FloatingFormat(object):
self._legacy = kwarg.get('legacy', False)
if self._legacy == '1.13':
- sign = '-' if data.shape == () else ' '
+ # when not 0d, legacy does not support '-'
+ if data.shape != () and sign == '-':
+ sign = ' '
self.floatmode = floatmode
if floatmode == 'unique':
- self.precision = -1
+ self.precision = None
else:
- if precision < 0:
- raise ValueError(
- "precision must be >= 0 in {} mode".format(floatmode))
self.precision = precision
+ self.precision = _none_or_positive_arg(self.precision, 'precision')
+
self.suppress_small = suppress_small
self.sign = sign
self.exp_format = False
@@ -812,11 +857,9 @@ class FloatingFormat(object):
self.trim = 'k'
self.precision = max(len(s) for s in frac_part)
- # for back-compatibility with np 1.13, use two spaces and full prec
+ # for back-compat with np 1.13, use 2 spaces & sign and full prec
if self._legacy == '1.13':
- # undo addition of sign pos below
- will_add_sign = all(finite_vals > 0) and self.sign == ' '
- self.pad_left = 3 - will_add_sign
+ self.pad_left = 3
else:
# this should be only 1 or 2. Can be calculated from sign.
self.pad_left = max(len(s) for s in int_part)
@@ -835,7 +878,10 @@ class FloatingFormat(object):
sign=self.sign == '+')
for x in finite_vals)
int_part, frac_part = zip(*(s.split('.') for s in strs))
- self.pad_left = max(len(s) for s in int_part)
+ if self._legacy == '1.13':
+ self.pad_left = 1 + max(len(s.lstrip('-+')) for s in int_part)
+ else:
+ self.pad_left = max(len(s) for s in int_part)
self.pad_right = max(len(s) for s in frac_part)
self.exp_size = -1
@@ -847,9 +893,10 @@ class FloatingFormat(object):
self.unique = True
self.trim = '.'
- # account for sign = ' ' by adding one to pad_left
- if all(finite_vals >= 0) and self.sign == ' ':
- self.pad_left += 1
+ if self._legacy != '1.13':
+ # account for sign = ' ' by adding one to pad_left
+ if self.sign == ' ' and not any(np.signbit(finite_vals)):
+ self.pad_left += 1
# if there are non-finite values, may need to increase pad_left
if data.size != finite_vals.size:
@@ -902,7 +949,6 @@ class LongFloatFormat(FloatingFormat):
DeprecationWarning, stacklevel=2)
super(LongFloatFormat, self).__init__(*args, **kwargs)
-
def format_float_scientific(x, precision=None, unique=True, trim='k',
sign=False, pad_left=None, exp_digits=None):
"""
@@ -915,9 +961,9 @@ def format_float_scientific(x, precision=None, unique=True, trim='k',
----------
x : python float or numpy floating scalar
Value to format.
- precision : non-negative integer, optional
- Maximum number of fractional digits to print. May be omitted if
- `unique` is `True`, but is required if unique is `False`.
+ precision : non-negative integer or None, optional
+ Maximum number of digits to print. May be None if `unique` is
+ `True`, but must be an integer if unique is `False`.
unique : boolean, optional
If `True`, use a digit-generation strategy which gives the shortest
representation which uniquely identifies the floating-point number from
@@ -962,9 +1008,9 @@ def format_float_scientific(x, precision=None, unique=True, trim='k',
>>> np.format_float_scientific(s, exp_digits=4)
'1.23e+0024'
"""
- precision = -1 if precision is None else precision
- pad_left = -1 if pad_left is None else pad_left
- exp_digits = -1 if exp_digits is None else exp_digits
+ precision = _none_or_positive_arg(precision, 'precision')
+ pad_left = _none_or_positive_arg(pad_left, 'pad_left')
+ exp_digits = _none_or_positive_arg(exp_digits, 'exp_digits')
return dragon4_scientific(x, precision=precision, unique=unique,
trim=trim, sign=sign, pad_left=pad_left,
exp_digits=exp_digits)
@@ -982,9 +1028,9 @@ def format_float_positional(x, precision=None, unique=True,
----------
x : python float or numpy floating scalar
Value to format.
- precision : non-negative integer, optional
- Maximum number of digits to print. May be omitted if `unique` is
- `True`, but is required if unique is `False`.
+ precision : non-negative integer or None, optional
+ Maximum number of digits to print. May be None if `unique` is
+ `True`, but must be an integer if unique is `False`.
unique : boolean, optional
If `True`, use a digit-generation strategy which gives the shortest
representation which uniquely identifies the floating-point number from
@@ -1035,9 +1081,9 @@ def format_float_positional(x, precision=None, unique=True,
>>> np.format_float_positional(np.float16(0.3), unique=False, precision=10)
'0.3000488281'
"""
- precision = -1 if precision is None else precision
- pad_left = -1 if pad_left is None else pad_left
- pad_right = -1 if pad_right is None else pad_right
+ precision = _none_or_positive_arg(precision, 'precision')
+ pad_left = _none_or_positive_arg(pad_left, 'pad_left')
+ pad_right = _none_or_positive_arg(pad_right, 'pad_right')
return dragon4_positional(x, precision=precision, unique=unique,
fractional=fractional, trim=trim,
sign=sign, pad_left=pad_left,
@@ -1075,15 +1121,25 @@ class ComplexFloatingFormat(object):
if isinstance(sign, bool):
sign = '+' if sign else '-'
- self.real_format = FloatingFormat(x.real, precision, floatmode,
+ floatmode_real = floatmode_imag = floatmode
+ if kwarg.get('legacy', False) == '1.13':
+ floatmode_real = 'maxprec_equal'
+ floatmode_imag = 'maxprec'
+
+ self.real_format = FloatingFormat(x.real, precision, floatmode_real,
suppress_small, sign=sign, **kwarg)
- self.imag_format = FloatingFormat(x.imag, precision, floatmode,
+ self.imag_format = FloatingFormat(x.imag, precision, floatmode_imag,
suppress_small, sign='+', **kwarg)
def __call__(self, x):
r = self.real_format(x.real)
i = self.imag_format(x.imag)
- return r + i + 'j'
+
+ # add the 'j' before the terminal whitespace in i
+ sp = len(i.rstrip())
+ i = i[:sp] + 'j' + i[sp:]
+
+ return r + i
# for back-compatibility, we keep the classes for each complex type too
class ComplexFormat(ComplexFloatingFormat):
@@ -1177,7 +1233,6 @@ class SubArrayFormat(object):
class StructureFormat(object):
def __init__(self, format_functions):
self.format_functions = format_functions
- self.num_fields = len(format_functions)
@classmethod
def from_data(cls, data, **options):
@@ -1194,11 +1249,14 @@ class StructureFormat(object):
return cls(format_functions)
def __call__(self, x):
- s = "("
- for field, format_function in zip(x, self.format_functions):
- s += format_function(field) + ", "
- return (s[:-2] if 1 < self.num_fields else s[:-1]) + ")"
-
+ str_fields = [
+ format_function(field)
+ for field, format_function in zip(x, self.format_functions)
+ ]
+ if len(str_fields) == 1:
+ return "({},)".format(str_fields[0])
+ else:
+ return "({})".format(", ".join(str_fields))
def _void_scalar_repr(x):
"""
@@ -1258,7 +1316,7 @@ def dtype_short_repr(dtype):
"""
# handle these separately so they don't give garbage like str256
if issubclass(dtype.type, flexible):
- if dtype.names:
+ if dtype.names is not None:
return "%s" % str(dtype)
else:
return "'%s'" % str(dtype)
diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py
index 9470e882a..ebcf864ea 100644
--- a/numpy/core/code_generators/generate_umath.py
+++ b/numpy/core/code_generators/generate_umath.py
@@ -84,8 +84,9 @@ def TD(types, f=None, astype=None, in_=None, out=None, simd=None):
if f is not None:
if isinstance(f, str):
func_data = build_func_data(types, f)
+ elif len(f) != len(types):
+ raise ValueError("Number of types and f do not match")
else:
- assert len(f) == len(types)
func_data = f
else:
func_data = (None,) * len(types)
@@ -93,10 +94,14 @@ def TD(types, f=None, astype=None, in_=None, out=None, simd=None):
in_ = (in_,) * len(types)
elif in_ is None:
in_ = (None,) * len(types)
+ elif len(in_) != len(types):
+ raise ValueError("Number of types and inputs do not match")
if isinstance(out, str):
out = (out,) * len(types)
elif out is None:
out = (None,) * len(types)
+ elif len(out) != len(types):
+ raise ValueError("Number of types and outputs do not match")
tds = []
for t, fd, i, o in zip(types, func_data, in_, out):
# [(simd-name, list of types)]
@@ -1035,14 +1040,19 @@ def make_ufuncs(funcdict):
# string literal in C code. We split at endlines because textwrap.wrap
# do not play well with \n
docstring = '\\n\"\"'.join(docstring.split(r"\n"))
- mlist.append(textwrap.dedent("""\
- f = PyUFunc_FromFuncAndData(%s_functions, %s_data, %s_signatures, %d,
- %d, %d, %s, "%s",
- "%s", 0);""") % (name, name, name,
- len(uf.type_descriptions),
- uf.nin, uf.nout,
- uf.identity,
- name, docstring))
+ fmt = textwrap.dedent("""\
+ f = PyUFunc_FromFuncAndData(
+ {name}_functions, {name}_data, {name}_signatures, {nloops},
+ {nin}, {nout}, {identity}, "{name}",
+ "{doc}", 0
+ );
+ if (f == NULL) {{
+ return -1;
+ }}""")
+ mlist.append(fmt.format(
+ name=name, nloops=len(uf.type_descriptions),
+ nin=uf.nin, nout=uf.nout, identity=uf.identity, doc=docstring
+ ))
if uf.typereso is not None:
mlist.append(
r"((PyUFuncObject *)f)->type_resolver = &%s;" % uf.typereso)
@@ -1066,12 +1076,14 @@ def make_code(funcdict, filename):
%s
- static void
+ static int
InitOperators(PyObject *dictionary) {
PyObject *f;
%s
%s
+
+ return 0;
}
""") % (filename, code1, code2, code3)
return code
diff --git a/numpy/core/code_generators/ufunc_docstrings.py b/numpy/core/code_generators/ufunc_docstrings.py
index 604168162..75dee7084 100644
--- a/numpy/core/code_generators/ufunc_docstrings.py
+++ b/numpy/core/code_generators/ufunc_docstrings.py
@@ -297,7 +297,7 @@ add_newdoc('numpy.core.umath', 'arcsinh',
Returns
-------
out : ndarray
- Array of of the same shape as `x`.
+ Array of the same shape as `x`.
Notes
-----
diff --git a/numpy/core/einsumfunc.py b/numpy/core/einsumfunc.py
index c5b37b7e2..1190f063e 100644
--- a/numpy/core/einsumfunc.py
+++ b/numpy/core/einsumfunc.py
@@ -4,6 +4,7 @@ Implementation of optimized einsum.
"""
from __future__ import division, absolute_import, print_function
+from numpy.compat import basestring
from numpy.core.multiarray import c_einsum
from numpy.core.numeric import asarray, asanyarray, result_type, tensordot, dot
@@ -399,7 +400,7 @@ def _parse_einsum_input(operands):
if len(operands) == 0:
raise ValueError("No input operands")
- if isinstance(operands[0], str):
+ if isinstance(operands[0], basestring):
subscripts = operands[0].replace(" ", "")
operands = [asanyarray(v) for v in operands[1:]]
@@ -665,7 +666,7 @@ def einsum_path(*operands, **kwargs):
memory_limit = None
# No optimization or a named path algorithm
- if (path_type is False) or isinstance(path_type, str):
+ if (path_type is False) or isinstance(path_type, basestring):
pass
# Given an explicit path
@@ -673,7 +674,7 @@ def einsum_path(*operands, **kwargs):
pass
# Path tuple with memory limit
- elif ((len(path_type) == 2) and isinstance(path_type[0], str) and
+ elif ((len(path_type) == 2) and isinstance(path_type[0], basestring) and
isinstance(path_type[1], (int, float))):
memory_limit = int(path_type[1])
path_type = path_type[0]
@@ -700,14 +701,18 @@ def einsum_path(*operands, **kwargs):
sh = operands[tnum].shape
if len(sh) != len(term):
raise ValueError("Einstein sum subscript %s does not contain the "
- "correct number of indices for operand %d.",
- input_subscripts[tnum], tnum)
+ "correct number of indices for operand %d."
+ % (input_subscripts[tnum], tnum))
for cnum, char in enumerate(term):
dim = sh[cnum]
if char in dimension_dict.keys():
- if dimension_dict[char] != dim:
- raise ValueError("Size of label '%s' for operand %d does "
- "not match previous terms.", char, tnum)
+ # For broadcasting cases we always want the largest dim size
+ if dimension_dict[char] == 1:
+ dimension_dict[char] = dim
+ elif dim not in (1, dimension_dict[char]):
+ raise ValueError("Size of label '%s' for operand %d (%d) "
+ "does not match previous terms (%d)."
+ % (char, tnum, dimension_dict[char], dim))
else:
dimension_dict[char] = dim
@@ -1056,8 +1061,8 @@ def einsum(*operands, **kwargs):
"""
- # Grab non-einsum kwargs
- optimize_arg = kwargs.pop('optimize', True)
+ # Grab non-einsum kwargs; never optimize 2-argument case.
+ optimize_arg = kwargs.pop('optimize', len(operands) > 3)
# If no optimization, run pure einsum
if optimize_arg is False:
@@ -1099,13 +1104,22 @@ def einsum(*operands, **kwargs):
if specified_out and ((num + 1) == len(contraction_list)):
handle_out = True
- # Call tensordot
+ # Handle broadcasting vs BLAS cases
if blas:
-
# Checks have already been handled
input_str, results_index = einsum_str.split('->')
input_left, input_right = input_str.split(',')
-
+ if 1 in tmp_operands[0] or 1 in tmp_operands[1]:
+ left_dims = {dim: size for dim, size in
+ zip(input_left, tmp_operands[0].shape)}
+ right_dims = {dim: size for dim, size in
+ zip(input_right, tmp_operands[1].shape)}
+ # If dims do not match we are broadcasting, BLAS off
+ if any(left_dims[ind] != right_dims[ind] for ind in idx_rm):
+ blas = False
+
+ # Call tensordot if still possible
+ if blas:
tensor_result = input_left + input_right
for s in idx_rm:
tensor_result = tensor_result.replace(s, "")
diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py
index 8203684e9..43584349f 100644
--- a/numpy/core/fromnumeric.py
+++ b/numpy/core/fromnumeric.py
@@ -596,7 +596,7 @@ def partition(a, kth, axis=-1, kind='introselect', order=None):
Element index to partition by. The k-th value of the element
will be in its final sorted position and all smaller elements
will be moved before it and all equal or greater elements behind
- it. The order all elements in the partitions is undefined. If
+ it. The order of all elements in the partitions is undefined. If
provided with a sequence of k-th it will partition all elements
indexed by k-th of them into their sorted position at once.
axis : int or None, optional
diff --git a/numpy/core/include/numpy/ndarraytypes.h b/numpy/core/include/numpy/ndarraytypes.h
index 19bbc7435..cf73cecea 100644
--- a/numpy/core/include/numpy/ndarraytypes.h
+++ b/numpy/core/include/numpy/ndarraytypes.h
@@ -235,29 +235,34 @@ typedef enum {
* TIMEZONE: 5
* NULL TERMINATOR: 1
*/
-#define NPY_DATETIME_MAX_ISO8601_STRLEN (21+3*5+1+3*6+6+1)
+#define NPY_DATETIME_MAX_ISO8601_STRLEN (21 + 3*5 + 1 + 3*6 + 6 + 1)
+/* The FR in the unit names stands for frequency */
typedef enum {
- NPY_FR_Y = 0, /* Years */
- NPY_FR_M = 1, /* Months */
- NPY_FR_W = 2, /* Weeks */
+ /* Force signed enum type, must be -1 for code compatibility */
+ NPY_FR_ERROR = -1, /* error or undetermined */
+
+ /* Start of valid units */
+ NPY_FR_Y = 0, /* Years */
+ NPY_FR_M = 1, /* Months */
+ NPY_FR_W = 2, /* Weeks */
/* Gap where 1.6 NPY_FR_B (value 3) was */
- NPY_FR_D = 4, /* Days */
- NPY_FR_h = 5, /* hours */
- NPY_FR_m = 6, /* minutes */
- NPY_FR_s = 7, /* seconds */
- NPY_FR_ms = 8, /* milliseconds */
- NPY_FR_us = 9, /* microseconds */
- NPY_FR_ns = 10,/* nanoseconds */
- NPY_FR_ps = 11,/* picoseconds */
- NPY_FR_fs = 12,/* femtoseconds */
- NPY_FR_as = 13,/* attoseconds */
- NPY_FR_GENERIC = 14 /* Generic, unbound units, can convert to anything */
+ NPY_FR_D = 4, /* Days */
+ NPY_FR_h = 5, /* hours */
+ NPY_FR_m = 6, /* minutes */
+ NPY_FR_s = 7, /* seconds */
+ NPY_FR_ms = 8, /* milliseconds */
+ NPY_FR_us = 9, /* microseconds */
+ NPY_FR_ns = 10, /* nanoseconds */
+ NPY_FR_ps = 11, /* picoseconds */
+ NPY_FR_fs = 12, /* femtoseconds */
+ NPY_FR_as = 13, /* attoseconds */
+ NPY_FR_GENERIC = 14 /* unbound units, can convert to anything */
} NPY_DATETIMEUNIT;
/*
* NOTE: With the NPY_FR_B gap for 1.6 ABI compatibility, NPY_DATETIME_NUMUNITS
- * is technically one more than the actual number of units.
+ * is technically one more than the actual number of units.
*/
#define NPY_DATETIME_NUMUNITS (NPY_FR_GENERIC + 1)
#define NPY_DATETIME_DEFAULTUNIT NPY_FR_GENERIC
diff --git a/numpy/core/include/numpy/npy_3kcompat.h b/numpy/core/include/numpy/npy_3kcompat.h
index c0aa1eb2e..56fbd99af 100644
--- a/numpy/core/include/numpy/npy_3kcompat.h
+++ b/numpy/core/include/numpy/npy_3kcompat.h
@@ -94,6 +94,8 @@ static NPY_INLINE int PyInt_Check(PyObject *op) {
#define PyUString_InternFromString PyUnicode_InternFromString
#define PyUString_Format PyUnicode_Format
+#define PyBaseString_Check(obj) (PyUnicode_Check(obj))
+
#else
#define PyBytes_Type PyString_Type
@@ -123,6 +125,8 @@ static NPY_INLINE int PyInt_Check(PyObject *op) {
#define PyUString_InternFromString PyString_InternFromString
#define PyUString_Format PyString_Format
+#define PyBaseString_Check(obj) (PyBytes_Check(obj) || PyUnicode_Check(obj))
+
#endif /* NPY_PY3K */
diff --git a/numpy/core/include/numpy/utils.h b/numpy/core/include/numpy/utils.h
index cc968a354..32218b8c7 100644
--- a/numpy/core/include/numpy/utils.h
+++ b/numpy/core/include/numpy/utils.h
@@ -6,6 +6,8 @@
#define __COMP_NPY_UNUSED __attribute__ ((__unused__))
# elif defined(__ICC)
#define __COMP_NPY_UNUSED __attribute__ ((__unused__))
+ # elif defined(__clang__)
+ #define __COMP_NPY_UNUSED __attribute__ ((unused))
#else
#define __COMP_NPY_UNUSED
#endif
diff --git a/numpy/core/numerictypes.py b/numpy/core/numerictypes.py
index b61f5e7bc..aa91ecb44 100644
--- a/numpy/core/numerictypes.py
+++ b/numpy/core/numerictypes.py
@@ -238,8 +238,8 @@ def bitname(obj):
else:
newname = name
info = typeinfo[english_upper(newname)]
- assert(info[-1] == obj) # sanity check
- bits = info[2]
+ assert(info.type == obj) # sanity check
+ bits = info.bits
except KeyError: # bit-width name
base, bits = _evalname(name)
@@ -284,51 +284,53 @@ def bitname(obj):
def _add_types():
- for a in typeinfo.keys():
- name = english_lower(a)
- if isinstance(typeinfo[a], tuple):
- typeobj = typeinfo[a][-1]
-
+ for type_name, info in typeinfo.items():
+ name = english_lower(type_name)
+ if not isinstance(info, type):
# define C-name and insert typenum and typechar references also
- allTypes[name] = typeobj
- sctypeDict[name] = typeobj
- sctypeDict[typeinfo[a][0]] = typeobj
- sctypeDict[typeinfo[a][1]] = typeobj
+ allTypes[name] = info.type
+ sctypeDict[name] = info.type
+ sctypeDict[info.char] = info.type
+ sctypeDict[info.num] = info.type
else: # generic class
- allTypes[name] = typeinfo[a]
+ allTypes[name] = info
_add_types()
def _add_aliases():
- for a in typeinfo.keys():
- name = english_lower(a)
- if not isinstance(typeinfo[a], tuple):
+ for type_name, info in typeinfo.items():
+ if isinstance(info, type):
continue
- typeobj = typeinfo[a][-1]
+ name = english_lower(type_name)
+
# insert bit-width version for this class (if relevant)
- base, bit, char = bitname(typeobj)
+ base, bit, char = bitname(info.type)
if base[-3:] == 'int' or char[0] in 'ui':
continue
if base != '':
myname = "%s%d" % (base, bit)
- if ((name != 'longdouble' and name != 'clongdouble') or
- myname not in allTypes.keys()):
- allTypes[myname] = typeobj
- sctypeDict[myname] = typeobj
+ if (name not in ('longdouble', 'clongdouble') or
+ myname not in allTypes):
+ base_capitalize = english_capitalize(base)
if base == 'complex':
- na_name = '%s%d' % (english_capitalize(base), bit//2)
+ na_name = '%s%d' % (base_capitalize, bit//2)
elif base == 'bool':
- na_name = english_capitalize(base)
- sctypeDict[na_name] = typeobj
+ na_name = base_capitalize
else:
- na_name = "%s%d" % (english_capitalize(base), bit)
- sctypeDict[na_name] = typeobj
- sctypeNA[na_name] = typeobj
- sctypeDict[na_name] = typeobj
- sctypeNA[typeobj] = na_name
- sctypeNA[typeinfo[a][0]] = na_name
+ na_name = "%s%d" % (base_capitalize, bit)
+
+ allTypes[myname] = info.type
+
+ # add mapping for both the bit name and the numarray name
+ sctypeDict[myname] = info.type
+ sctypeDict[na_name] = info.type
+
+ # add forward, reverse, and string mapping to numarray
+ sctypeNA[na_name] = info.type
+ sctypeNA[info.type] = na_name
+ sctypeNA[info.char] = na_name
if char != '':
- sctypeDict[char] = typeobj
+ sctypeDict[char] = info.type
sctypeNA[char] = na_name
_add_aliases()
@@ -339,34 +341,22 @@ _add_aliases()
def _add_integer_aliases():
_ctypes = ['LONG', 'LONGLONG', 'INT', 'SHORT', 'BYTE']
for ctype in _ctypes:
- val = typeinfo[ctype]
- bits = val[2]
- charname = 'i%d' % (bits//8,)
- ucharname = 'u%d' % (bits//8,)
- intname = 'int%d' % bits
- UIntname = 'UInt%d' % bits
- Intname = 'Int%d' % bits
- uval = typeinfo['U'+ctype]
- typeobj = val[-1]
- utypeobj = uval[-1]
- if intname not in allTypes.keys():
- uintname = 'uint%d' % bits
- allTypes[intname] = typeobj
- allTypes[uintname] = utypeobj
- sctypeDict[intname] = typeobj
- sctypeDict[uintname] = utypeobj
- sctypeDict[Intname] = typeobj
- sctypeDict[UIntname] = utypeobj
- sctypeDict[charname] = typeobj
- sctypeDict[ucharname] = utypeobj
- sctypeNA[Intname] = typeobj
- sctypeNA[UIntname] = utypeobj
- sctypeNA[charname] = typeobj
- sctypeNA[ucharname] = utypeobj
- sctypeNA[typeobj] = Intname
- sctypeNA[utypeobj] = UIntname
- sctypeNA[val[0]] = Intname
- sctypeNA[uval[0]] = UIntname
+ i_info = typeinfo[ctype]
+ u_info = typeinfo['U'+ctype]
+ bits = i_info.bits # same for both
+
+ for info, charname, intname, Intname in [
+ (i_info,'i%d' % (bits//8,), 'int%d' % bits, 'Int%d' % bits),
+ (u_info,'u%d' % (bits//8,), 'uint%d' % bits, 'UInt%d' % bits)]:
+ if intname not in allTypes.keys():
+ allTypes[intname] = info.type
+ sctypeDict[intname] = info.type
+ sctypeDict[Intname] = info.type
+ sctypeDict[charname] = info.type
+ sctypeNA[Intname] = info.type
+ sctypeNA[charname] = info.type
+ sctypeNA[info.type] = Intname
+ sctypeNA[info.char] = Intname
_add_integer_aliases()
# We use these later
@@ -427,11 +417,10 @@ _set_up_aliases()
# Now, construct dictionary to lookup character codes from types
_sctype2char_dict = {}
def _construct_char_code_lookup():
- for name in typeinfo.keys():
- tup = typeinfo[name]
- if isinstance(tup, tuple):
- if tup[0] not in ['p', 'P']:
- _sctype2char_dict[tup[-1]] = tup[0]
+ for name, info in typeinfo.items():
+ if not isinstance(info, type):
+ if info.char not in ['p', 'P']:
+ _sctype2char_dict[info.type] = info.char
_construct_char_code_lookup()
@@ -776,15 +765,15 @@ _alignment = _typedict()
_maxvals = _typedict()
_minvals = _typedict()
def _construct_lookups():
- for name, val in typeinfo.items():
- if not isinstance(val, tuple):
+ for name, info in typeinfo.items():
+ if isinstance(info, type):
continue
- obj = val[-1]
- nbytes[obj] = val[2] // 8
- _alignment[obj] = val[3]
- if (len(val) > 5):
- _maxvals[obj] = val[4]
- _minvals[obj] = val[5]
+ obj = info.type
+ nbytes[obj] = info.bits // 8
+ _alignment[obj] = info.alignment
+ if len(info) > 5:
+ _maxvals[obj] = info.max
+ _minvals[obj] = info.min
else:
_maxvals[obj] = None
_minvals[obj] = None
diff --git a/numpy/core/records.py b/numpy/core/records.py
index 76783bb67..612d39322 100644
--- a/numpy/core/records.py
+++ b/numpy/core/records.py
@@ -38,6 +38,7 @@ from __future__ import division, absolute_import, print_function
import sys
import os
+import warnings
from . import numeric as sb
from . import numerictypes as nt
@@ -223,10 +224,14 @@ class record(nt.void):
__module__ = 'numpy'
def __repr__(self):
- return self.__str__()
+ if get_printoptions()['legacy'] == '1.13':
+ return self.__str__()
+ return super(record, self).__repr__()
def __str__(self):
- return str(self.item())
+ if get_printoptions()['legacy'] == '1.13':
+ return str(self.item())
+ return super(record, self).__str__()
def __getattribute__(self, attr):
if attr in ['setfield', 'getfield', 'dtype']:
@@ -673,7 +678,7 @@ def fromrecords(recList, dtype=None, shape=None, formats=None, names=None,
try:
retval = sb.array(recList, dtype=descr)
- except TypeError: # list of lists instead of list of tuples
+ except (TypeError, ValueError):
if (shape is None or shape == 0):
shape = len(recList)
if isinstance(shape, (int, long)):
@@ -683,6 +688,12 @@ def fromrecords(recList, dtype=None, shape=None, formats=None, names=None,
_array = recarray(shape, descr)
for k in range(_array.size):
_array[k] = tuple(recList[k])
+ # list of lists instead of list of tuples ?
+ # 2018-02-07, 1.14.1
+ warnings.warn(
+ "fromrecords expected a list of tuples, may have received a list "
+ "of lists instead. In the future that will raise an error",
+ FutureWarning, stacklevel=2)
return _array
else:
if shape is not None and retval.shape != shape:
diff --git a/numpy/core/setup.py b/numpy/core/setup.py
index 981b0b7f0..11b1acb07 100644
--- a/numpy/core/setup.py
+++ b/numpy/core/setup.py
@@ -750,6 +750,7 @@ def configuration(parent_package='',top_path=None):
join('src', 'multiarray', 'sequence.h'),
join('src', 'multiarray', 'shape.h'),
join('src', 'multiarray', 'strfuncs.h'),
+ join('src', 'multiarray', 'typeinfo.h'),
join('src', 'multiarray', 'ucsnarrow.h'),
join('src', 'multiarray', 'usertypes.h'),
join('src', 'multiarray', 'vdot.h'),
@@ -827,6 +828,7 @@ def configuration(parent_package='',top_path=None):
join('src', 'multiarray', 'scalartypes.c.src'),
join('src', 'multiarray', 'strfuncs.c'),
join('src', 'multiarray', 'temp_elide.c'),
+ join('src', 'multiarray', 'typeinfo.c'),
join('src', 'multiarray', 'usertypes.c'),
join('src', 'multiarray', 'ucsnarrow.c'),
join('src', 'multiarray', 'vdot.c'),
diff --git a/numpy/core/setup_common.py b/numpy/core/setup_common.py
index bd093c5c8..1fe953910 100644
--- a/numpy/core/setup_common.py
+++ b/numpy/core/setup_common.py
@@ -166,7 +166,7 @@ OPTIONAL_FUNCTION_ATTRIBUTES = [('__attribute__((optimize("unroll-loops")))',
# variable attributes tested via "int %s a" % attribute
OPTIONAL_VARIABLE_ATTRIBUTES = ["__thread", "__declspec(thread)"]
-# Subset of OPTIONAL_STDFUNCS which may alreay have HAVE_* defined by Python.h
+# Subset of OPTIONAL_STDFUNCS which may already have HAVE_* defined by Python.h
OPTIONAL_STDFUNCS_MAYBE = [
"expm1", "log1p", "acosh", "atanh", "asinh", "hypot", "copysign",
"ftello", "fseeko"
diff --git a/numpy/core/src/multiarray/array_assign_scalar.c b/numpy/core/src/multiarray/array_assign_scalar.c
index 7c1b1f16a..3d259ae05 100644
--- a/numpy/core/src/multiarray/array_assign_scalar.c
+++ b/numpy/core/src/multiarray/array_assign_scalar.c
@@ -233,7 +233,7 @@ PyArray_AssignRawScalar(PyArrayObject *dst,
* Use a static buffer to store the aligned/cast version,
* or allocate some memory if more space is needed.
*/
- if (sizeof(scalarbuffer) >= PyArray_DESCR(dst)->elsize) {
+ if ((int)sizeof(scalarbuffer) >= PyArray_DESCR(dst)->elsize) {
tmp_src_data = (char *)&scalarbuffer[0];
}
else {
diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src
index d0370fe6b..e8aa19416 100644
--- a/numpy/core/src/multiarray/arraytypes.c.src
+++ b/numpy/core/src/multiarray/arraytypes.c.src
@@ -24,6 +24,7 @@
#include "_datetime.h"
#include "arrayobject.h"
#include "alloc.h"
+#include "typeinfo.h"
#ifdef NPY_HAVE_SSE2_INTRINSICS
#include <emmintrin.h>
#endif
@@ -4820,21 +4821,19 @@ set_typeinfo(PyObject *dict)
* #cn = i*7, N, i, l, i, N, i#
*/
- PyDict_SetItemString(infodict, "@name@",
-#if defined(NPY_PY3K)
- s = Py_BuildValue("Ciii@cx@@cn@O",
-#else
- s = Py_BuildValue("ciii@cx@@cn@O",
-#endif
- NPY_@name@LTR,
- NPY_@name@,
- NPY_BITSOF_@uname@,
- _ALIGN(@type@),
- @max@,
- @min@,
- (PyObject *) &Py@Name@ArrType_Type));
+ s = PyArray_typeinforanged(
+ NPY_@name@LTR, NPY_@name@, NPY_BITSOF_@uname@, _ALIGN(@type@),
+ Py_BuildValue("@cx@", @max@),
+ Py_BuildValue("@cn@", @min@),
+ &Py@Name@ArrType_Type
+ );
+ if (s == NULL) {
+ return -1;
+ }
+ PyDict_SetItemString(infodict, "@name@", s);
Py_DECREF(s);
+
/**end repeat**/
@@ -4848,91 +4847,80 @@ set_typeinfo(PyObject *dict)
* CFloat, CDouble, CLongDouble#
* #num = 1, 1, 1, 1, 2, 2, 2#
*/
-
- PyDict_SetItemString(infodict, "@name@",
-#if defined(NPY_PY3K)
- s = Py_BuildValue("CiiiO", NPY_@name@LTR,
-#else
- s = Py_BuildValue("ciiiO", NPY_@name@LTR,
-#endif
- NPY_@name@,
- NPY_BITSOF_@name@,
- @num@ * _ALIGN(@type@) > NPY_MAX_COPY_ALIGNMENT ?
- NPY_MAX_COPY_ALIGNMENT : @num@ * _ALIGN(@type@),
- (PyObject *) &Py@Name@ArrType_Type));
+ s = PyArray_typeinfo(
+ NPY_@name@LTR, NPY_@name@, NPY_BITSOF_@name@,
+ @num@ * _ALIGN(@type@) > NPY_MAX_COPY_ALIGNMENT ?
+ NPY_MAX_COPY_ALIGNMENT : @num@ * _ALIGN(@type@),
+ &Py@Name@ArrType_Type
+ );
+ if (s == NULL) {
+ return -1;
+ }
+ PyDict_SetItemString(infodict, "@name@", s);
Py_DECREF(s);
/**end repeat**/
- PyDict_SetItemString(infodict, "OBJECT",
-#if defined(NPY_PY3K)
- s = Py_BuildValue("CiiiO", NPY_OBJECTLTR,
-#else
- s = Py_BuildValue("ciiiO", NPY_OBJECTLTR,
-#endif
- NPY_OBJECT,
- sizeof(PyObject *) * CHAR_BIT,
- _ALIGN(PyObject *),
- (PyObject *) &PyObjectArrType_Type));
+ s = PyArray_typeinfo(
+ NPY_OBJECTLTR, NPY_OBJECT, sizeof(PyObject *) * CHAR_BIT,
+ _ALIGN(PyObject *),
+ &PyObjectArrType_Type
+ );
+ if (s == NULL) {
+ return -1;
+ }
+ PyDict_SetItemString(infodict, "OBJECT", s);
Py_DECREF(s);
- PyDict_SetItemString(infodict, "STRING",
-#if defined(NPY_PY3K)
- s = Py_BuildValue("CiiiO", NPY_STRINGLTR,
-#else
- s = Py_BuildValue("ciiiO", NPY_STRINGLTR,
-#endif
- NPY_STRING,
- 0,
- _ALIGN(char),
- (PyObject *) &PyStringArrType_Type));
+ s = PyArray_typeinfo(
+ NPY_STRINGLTR, NPY_STRING, 0, _ALIGN(char),
+ &PyStringArrType_Type
+ );
+ if (s == NULL) {
+ return -1;
+ }
+ PyDict_SetItemString(infodict, "STRING", s);
Py_DECREF(s);
- PyDict_SetItemString(infodict, "UNICODE",
-#if defined(NPY_PY3K)
- s = Py_BuildValue("CiiiO", NPY_UNICODELTR,
-#else
- s = Py_BuildValue("ciiiO", NPY_UNICODELTR,
-#endif
- NPY_UNICODE,
- 0,
- _ALIGN(npy_ucs4),
- (PyObject *) &PyUnicodeArrType_Type));
+ s = PyArray_typeinfo(
+ NPY_UNICODELTR, NPY_UNICODE, 0, _ALIGN(npy_ucs4),
+ &PyUnicodeArrType_Type
+ );
+ if (s == NULL) {
+ return -1;
+ }
+ PyDict_SetItemString(infodict, "UNICODE", s);
Py_DECREF(s);
- PyDict_SetItemString(infodict, "VOID",
-#if defined(NPY_PY3K)
- s = Py_BuildValue("CiiiO", NPY_VOIDLTR,
-#else
- s = Py_BuildValue("ciiiO", NPY_VOIDLTR,
-#endif
- NPY_VOID,
- 0,
- _ALIGN(char),
- (PyObject *) &PyVoidArrType_Type));
+ s = PyArray_typeinfo(
+ NPY_VOIDLTR, NPY_VOID, 0, _ALIGN(char),
+ &PyVoidArrType_Type
+ );
+ if (s == NULL) {
+ return -1;
+ }
+ PyDict_SetItemString(infodict, "VOID", s);
Py_DECREF(s);
- PyDict_SetItemString(infodict, "DATETIME",
-#if defined(NPY_PY3K)
- s = Py_BuildValue("CiiiNNO", NPY_DATETIMELTR,
-#else
- s = Py_BuildValue("ciiiNNO", NPY_DATETIMELTR,
-#endif
- NPY_DATETIME,
- NPY_BITSOF_DATETIME,
- _ALIGN(npy_datetime),
- MyPyLong_FromInt64(NPY_MAX_DATETIME),
- MyPyLong_FromInt64(NPY_MIN_DATETIME),
- (PyObject *) &PyDatetimeArrType_Type));
+ s = PyArray_typeinforanged(
+ NPY_DATETIMELTR, NPY_DATETIME, NPY_BITSOF_DATETIME,
+ _ALIGN(npy_datetime),
+ MyPyLong_FromInt64(NPY_MAX_DATETIME),
+ MyPyLong_FromInt64(NPY_MIN_DATETIME),
+ &PyDatetimeArrType_Type
+ );
+ if (s == NULL) {
+ return -1;
+ }
+ PyDict_SetItemString(infodict, "DATETIME", s);
Py_DECREF(s);
- PyDict_SetItemString(infodict, "TIMEDELTA",
-#if defined(NPY_PY3K)
- s = Py_BuildValue("CiiiNNO", NPY_TIMEDELTALTR,
-#else
- s = Py_BuildValue("ciiiNNO",NPY_TIMEDELTALTR,
-#endif
- NPY_TIMEDELTA,
- NPY_BITSOF_TIMEDELTA,
- _ALIGN(npy_timedelta),
- MyPyLong_FromInt64(NPY_MAX_TIMEDELTA),
- MyPyLong_FromInt64(NPY_MIN_TIMEDELTA),
- (PyObject *)&PyTimedeltaArrType_Type));
+ s = PyArray_typeinforanged(
+ NPY_TIMEDELTALTR, NPY_TIMEDELTA, NPY_BITSOF_TIMEDELTA,
+ _ALIGN(npy_timedelta),
+ MyPyLong_FromInt64(NPY_MAX_TIMEDELTA),
+ MyPyLong_FromInt64(NPY_MIN_TIMEDELTA),
+ &PyTimedeltaArrType_Type
+ );
+ if (s == NULL) {
+ return -1;
+ }
+ PyDict_SetItemString(infodict, "TIMEDELTA", s);
Py_DECREF(s);
#define SETTYPE(name) \
diff --git a/numpy/core/src/multiarray/buffer.c b/numpy/core/src/multiarray/buffer.c
index e76d406de..f892cf6cd 100644
--- a/numpy/core/src/multiarray/buffer.c
+++ b/numpy/core/src/multiarray/buffer.c
@@ -12,6 +12,7 @@
#include "npy_pycompat.h"
#include "buffer.h"
+#include "common.h"
#include "numpyos.h"
#include "arrayobject.h"
@@ -243,14 +244,19 @@ _buffer_format_string(PyArray_Descr *descr, _tmp_string_t *str,
child = (PyArray_Descr*)PyTuple_GetItem(item, 0);
offset_obj = PyTuple_GetItem(item, 1);
- new_offset = base_offset + PyInt_AsLong(offset_obj);
+ new_offset = PyInt_AsLong(offset_obj);
+ if (error_converting(new_offset)) {
+ return -1;
+ }
+ new_offset += base_offset;
/* Insert padding manually */
if (*offset > new_offset) {
- PyErr_SetString(PyExc_RuntimeError,
- "This should never happen: Invalid offset in "
- "buffer format string generation. Please "
- "report a bug to the Numpy developers.");
+ PyErr_SetString(
+ PyExc_ValueError,
+ "dtypes with overlapping or out-of-order fields are not "
+ "representable as buffers. Consider reordering the fields."
+ );
return -1;
}
while (*offset < new_offset) {
diff --git a/numpy/core/src/multiarray/common.c b/numpy/core/src/multiarray/common.c
index 099cc0394..10efdc4c8 100644
--- a/numpy/core/src/multiarray/common.c
+++ b/numpy/core/src/multiarray/common.c
@@ -588,7 +588,7 @@ _zerofill(PyArrayObject *ret)
NPY_NO_EXPORT int
_IsAligned(PyArrayObject *ap)
{
- unsigned int i;
+ int i;
npy_uintp aligned;
npy_uintp alignment = PyArray_DESCR(ap)->alignment;
diff --git a/numpy/core/src/multiarray/convert_datatype.c b/numpy/core/src/multiarray/convert_datatype.c
index eea65375e..6ff381862 100644
--- a/numpy/core/src/multiarray/convert_datatype.c
+++ b/numpy/core/src/multiarray/convert_datatype.c
@@ -1294,6 +1294,37 @@ PyArray_PromoteTypes(PyArray_Descr *type1, PyArray_Descr *type2)
}
/*
+ * Produces the smallest size and lowest kind type to which all
+ * input types can be cast.
+ */
+NPY_NO_EXPORT PyArray_Descr *
+PyArray_PromoteTypeSequence(PyArray_Descr **types, npy_intp ntypes)
+{
+ npy_intp i;
+ PyArray_Descr *ret = NULL;
+ if (ntypes == 0) {
+ PyErr_SetString(PyExc_TypeError, "at least one type needed to promote");
+ return NULL;
+ }
+ ret = types[0];
+ Py_INCREF(ret);
+ for (i = 1; i < ntypes; ++i) {
+ PyArray_Descr *type = types[i];
+
+ /* Only call promote if the types aren't the same dtype */
+ if (type != ret || !PyArray_ISNBO(type->byteorder)) {
+ PyArray_Descr *tmp = PyArray_PromoteTypes(type, ret);
+ Py_DECREF(ret);
+ ret = tmp;
+ if (ret == NULL) {
+ return NULL;
+ }
+ }
+ }
+ return ret;
+}
+
+/*
* NOTE: While this is unlikely to be a performance problem, if
* it is it could be reverted to a simple positive/negative
* check as the previous system used.
@@ -1579,16 +1610,12 @@ static int min_scalar_type_num(char *valueptr, int type_num,
return type_num;
}
-/*NUMPY_API
- * If arr is a scalar (has 0 dimensions) with a built-in number data type,
- * finds the smallest type size/kind which can still represent its data.
- * Otherwise, returns the array's data type.
- *
- */
+
NPY_NO_EXPORT PyArray_Descr *
-PyArray_MinScalarType(PyArrayObject *arr)
+PyArray_MinScalarType_internal(PyArrayObject *arr, int *is_small_unsigned)
{
PyArray_Descr *dtype = PyArray_DESCR(arr);
+ *is_small_unsigned = 0;
/*
* If the array isn't a numeric scalar, just return the array's dtype.
*/
@@ -1599,18 +1626,30 @@ PyArray_MinScalarType(PyArrayObject *arr)
else {
char *data = PyArray_BYTES(arr);
int swap = !PyArray_ISNBO(dtype->byteorder);
- int is_small_unsigned = 0;
/* An aligned memory buffer large enough to hold any type */
npy_longlong value[4];
dtype->f->copyswap(&value, data, swap, NULL);
return PyArray_DescrFromType(
min_scalar_type_num((char *)&value,
- dtype->type_num, &is_small_unsigned));
+ dtype->type_num, is_small_unsigned));
}
}
+/*NUMPY_API
+ * If arr is a scalar (has 0 dimensions) with a built-in number data type,
+ * finds the smallest type size/kind which can still represent its data.
+ * Otherwise, returns the array's data type.
+ *
+ */
+NPY_NO_EXPORT PyArray_Descr *
+PyArray_MinScalarType(PyArrayObject *arr)
+{
+ int is_small_unsigned;
+ return PyArray_MinScalarType_internal(arr, &is_small_unsigned);
+}
+
/*
* Provides an ordering for the dtype 'kind' character codes, to help
* determine when to use the min_scalar_type function. This groups
@@ -1722,75 +1761,32 @@ PyArray_ResultType(npy_intp narrs, PyArrayObject **arr,
/* Loop through all the types, promoting them */
if (!use_min_scalar) {
+ /* Build a single array of all the dtypes */
+ PyArray_Descr **all_dtypes = PyArray_malloc(
+ sizeof(*all_dtypes) * (narrs + ndtypes));
+ if (all_dtypes == NULL) {
+ return NULL;
+ }
for (i = 0; i < narrs; ++i) {
- PyArray_Descr *tmp = PyArray_DESCR(arr[i]);
- /* Combine it with the existing type */
- if (ret == NULL) {
- ret = tmp;
- Py_INCREF(ret);
- }
- else {
- /* Only call promote if the types aren't the same dtype */
- if (tmp != ret || !PyArray_ISNBO(ret->byteorder)) {
- tmpret = PyArray_PromoteTypes(tmp, ret);
- Py_DECREF(ret);
- ret = tmpret;
- if (ret == NULL) {
- return NULL;
- }
- }
- }
+ all_dtypes[i] = PyArray_DESCR(arr[i]);
}
-
for (i = 0; i < ndtypes; ++i) {
- PyArray_Descr *tmp = dtypes[i];
- /* Combine it with the existing type */
- if (ret == NULL) {
- ret = tmp;
- Py_INCREF(ret);
- }
- else {
- /* Only call promote if the types aren't the same dtype */
- if (tmp != ret || !PyArray_ISNBO(tmp->byteorder)) {
- tmpret = PyArray_PromoteTypes(tmp, ret);
- Py_DECREF(ret);
- ret = tmpret;
- if (ret == NULL) {
- return NULL;
- }
- }
- }
+ all_dtypes[narrs + i] = dtypes[i];
+ }
+ ret = PyArray_PromoteTypeSequence(all_dtypes, narrs + ndtypes);
+ PyArray_free(all_dtypes);
+ if (ret == NULL) {
+ return NULL;
}
}
else {
for (i = 0; i < narrs; ++i) {
- /* Get the min scalar type for the array */
- PyArray_Descr *tmp = PyArray_DESCR(arr[i]);
- int tmp_is_small_unsigned = 0;
- /*
- * If it's a scalar, find the min scalar type. The function
- * is expanded here so that we can flag whether we've got an
- * unsigned integer which would fit an a signed integer
- * of the same size, something not exposed in the public API.
- */
- if (PyArray_NDIM(arr[i]) == 0 &&
- PyTypeNum_ISNUMBER(tmp->type_num)) {
- char *data = PyArray_BYTES(arr[i]);
- int swap = !PyArray_ISNBO(tmp->byteorder);
- int type_num;
- /* An aligned memory buffer large enough to hold any type */
- npy_longlong value[4];
- tmp->f->copyswap(&value, data, swap, NULL);
- type_num = min_scalar_type_num((char *)&value,
- tmp->type_num, &tmp_is_small_unsigned);
- tmp = PyArray_DescrFromType(type_num);
- if (tmp == NULL) {
- Py_XDECREF(ret);
- return NULL;
- }
- }
- else {
- Py_INCREF(tmp);
+ int tmp_is_small_unsigned;
+ PyArray_Descr *tmp = PyArray_MinScalarType_internal(
+ arr[i], &tmp_is_small_unsigned);
+ if (tmp == NULL) {
+ Py_XDECREF(ret);
+ return NULL;
}
/* Combine it with the existing type */
if (ret == NULL) {
diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c
index f4236f36d..3d6b161b1 100644
--- a/numpy/core/src/multiarray/ctors.c
+++ b/numpy/core/src/multiarray/ctors.c
@@ -1843,7 +1843,7 @@ PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth,
* NPY_ARRAY_WRITEBACKIFCOPY flag sets this flag in the returned
* array if a copy is made and the base argument points to the (possibly)
* misbehaved array. Before returning to python, PyArray_ResolveWritebackIfCopy
- * must be called to update the contents of the orignal array from the copy.
+ * must be called to update the contents of the original array from the copy.
*
* NPY_ARRAY_FORCECAST will cause a cast to occur regardless of whether or not
* it is safe.
@@ -2181,7 +2181,6 @@ _is_default_descr(PyObject *descr, PyObject *typestr) {
NPY_NO_EXPORT PyObject *
PyArray_FromInterface(PyObject *origin)
{
- PyObject *tmp = NULL;
PyObject *iface = NULL;
PyObject *attr = NULL;
PyObject *base = NULL;
@@ -2216,9 +2215,15 @@ PyArray_FromInterface(PyObject *origin)
#if defined(NPY_PY3K)
/* Allow unicode type strings */
if (PyUnicode_Check(attr)) {
- tmp = PyUnicode_AsASCIIString(attr);
+ PyObject *tmp = PyUnicode_AsASCIIString(attr);
+ if (tmp == NULL) {
+ goto fail;
+ }
attr = tmp;
}
+ else {
+ Py_INCREF(attr);
+ }
#endif
if (!PyBytes_Check(attr)) {
PyErr_SetString(PyExc_TypeError,
@@ -2227,11 +2232,6 @@ PyArray_FromInterface(PyObject *origin)
}
/* Get dtype from type string */
dtype = _array_typedescr_fromstr(PyString_AS_STRING(attr));
-#if defined(NPY_PY3K)
- if (tmp == attr) {
- Py_DECREF(tmp);
- }
-#endif
if (dtype == NULL) {
goto fail;
}
@@ -2251,6 +2251,10 @@ PyArray_FromInterface(PyObject *origin)
dtype = new_dtype;
}
}
+
+#if defined(NPY_PY3K)
+ Py_DECREF(attr); /* Pairs with the unicode handling above */
+#endif
/* Get shape tuple from interface specification */
attr = PyDict_GetItemString(iface, "shape");
@@ -2278,7 +2282,7 @@ PyArray_FromInterface(PyObject *origin)
else {
n = PyTuple_GET_SIZE(attr);
for (i = 0; i < n; i++) {
- tmp = PyTuple_GET_ITEM(attr, i);
+ PyObject *tmp = PyTuple_GET_ITEM(attr, i);
dims[i] = PyArray_PyIntAsIntp(tmp);
if (error_converting(dims[i])) {
goto fail;
@@ -2395,7 +2399,7 @@ PyArray_FromInterface(PyObject *origin)
goto fail;
}
for (i = 0; i < n; i++) {
- tmp = PyTuple_GET_ITEM(attr, i);
+ PyObject *tmp = PyTuple_GET_ITEM(attr, i);
strides[i] = PyArray_PyIntAsIntp(tmp);
if (error_converting(strides[i])) {
Py_DECREF(ret);
diff --git a/numpy/core/src/multiarray/datetime.c b/numpy/core/src/multiarray/datetime.c
index 18b549cf8..a7d991581 100644
--- a/numpy/core/src/multiarray/datetime.c
+++ b/numpy/core/src/multiarray/datetime.c
@@ -778,8 +778,9 @@ parse_datetime_extended_unit_from_string(char *str, Py_ssize_t len,
goto bad_input;
}
out_meta->base = parse_datetime_unit_from_string(substr,
- substrend-substr, metastr);
- if (out_meta->base == -1) {
+ substrend - substr,
+ metastr);
+ if (out_meta->base == NPY_FR_ERROR ) {
return -1;
}
substr = substrend;
@@ -1073,8 +1074,9 @@ static npy_uint64
get_datetime_units_factor(NPY_DATETIMEUNIT bigbase, NPY_DATETIMEUNIT littlebase)
{
npy_uint64 factor = 1;
- int unit = (int)bigbase;
- while (littlebase > unit) {
+ NPY_DATETIMEUNIT unit = bigbase;
+
+ while (unit < littlebase) {
factor *= _datetime_factors[unit];
/*
* Detect overflow by disallowing the top 16 bits to be 1.
@@ -1719,7 +1721,7 @@ datetime_type_promotion(PyArray_Descr *type1, PyArray_Descr *type2)
* a date time unit enum value. The 'metastr' parameter
* is used for error messages, and may be NULL.
*
- * Returns 0 on success, -1 on failure.
+ * Returns NPY_DATETIMEUNIT on success, NPY_FR_ERROR on failure.
*/
NPY_NO_EXPORT NPY_DATETIMEUNIT
parse_datetime_unit_from_string(char *str, Py_ssize_t len, char *metastr)
@@ -1775,7 +1777,7 @@ parse_datetime_unit_from_string(char *str, Py_ssize_t len, char *metastr)
"Invalid datetime unit in metadata string \"%s\"",
metastr);
}
- return -1;
+ return NPY_FR_ERROR;
}
@@ -1847,7 +1849,7 @@ convert_datetime_metadata_tuple_to_datetime_metadata(PyObject *tuple,
}
out_meta->base = parse_datetime_unit_from_string(basestr, len, NULL);
- if (out_meta->base == -1) {
+ if (out_meta->base == NPY_FR_ERROR) {
Py_DECREF(unit_str);
return -1;
}
@@ -2418,7 +2420,7 @@ convert_pyobject_to_datetime(PyArray_DatetimeMetaData *meta, PyObject *obj,
char *str = NULL;
Py_ssize_t len = 0;
npy_datetimestruct dts;
- NPY_DATETIMEUNIT bestunit = -1;
+ NPY_DATETIMEUNIT bestunit = NPY_FR_ERROR;
/* Convert to an ASCII string for the date parser */
if (PyUnicode_Check(obj)) {
@@ -2444,7 +2446,7 @@ convert_pyobject_to_datetime(PyArray_DatetimeMetaData *meta, PyObject *obj,
}
/* Use the detected unit if none was specified */
- if (meta->base == -1) {
+ if (meta->base == NPY_FR_ERROR) {
meta->base = bestunit;
meta->num = 1;
}
@@ -2460,7 +2462,7 @@ convert_pyobject_to_datetime(PyArray_DatetimeMetaData *meta, PyObject *obj,
/* Do no conversion on raw integers */
else if (PyInt_Check(obj) || PyLong_Check(obj)) {
/* Don't allow conversion from an integer without specifying a unit */
- if (meta->base == -1 || meta->base == NPY_FR_GENERIC) {
+ if (meta->base == NPY_FR_ERROR || meta->base == NPY_FR_GENERIC) {
PyErr_SetString(PyExc_ValueError, "Converting an integer to a "
"NumPy datetime requires a specified unit");
return -1;
@@ -2473,7 +2475,7 @@ convert_pyobject_to_datetime(PyArray_DatetimeMetaData *meta, PyObject *obj,
PyDatetimeScalarObject *dts = (PyDatetimeScalarObject *)obj;
/* Copy the scalar directly if units weren't specified */
- if (meta->base == -1) {
+ if (meta->base == NPY_FR_ERROR) {
*meta = dts->obmeta;
*out = dts->obval;
@@ -2512,7 +2514,7 @@ convert_pyobject_to_datetime(PyArray_DatetimeMetaData *meta, PyObject *obj,
obj);
/* Copy the value directly if units weren't specified */
- if (meta->base == -1) {
+ if (meta->base == NPY_FR_ERROR) {
*meta = *arr_meta;
*out = dt;
@@ -2536,7 +2538,7 @@ convert_pyobject_to_datetime(PyArray_DatetimeMetaData *meta, PyObject *obj,
else {
int code;
npy_datetimestruct dts;
- NPY_DATETIMEUNIT bestunit = -1;
+ NPY_DATETIMEUNIT bestunit = NPY_FR_ERROR;
code = convert_pydatetime_to_datetimestruct(obj, &dts, &bestunit, 1);
if (code == -1) {
@@ -2544,7 +2546,7 @@ convert_pyobject_to_datetime(PyArray_DatetimeMetaData *meta, PyObject *obj,
}
else if (code == 0) {
/* Use the detected unit if none was specified */
- if (meta->base == -1) {
+ if (meta->base == NPY_FR_ERROR) {
meta->base = bestunit;
meta->num = 1;
}
@@ -2571,7 +2573,7 @@ convert_pyobject_to_datetime(PyArray_DatetimeMetaData *meta, PyObject *obj,
*/
if (casting == NPY_UNSAFE_CASTING ||
(obj == Py_None && casting == NPY_SAME_KIND_CASTING)) {
- if (meta->base == -1) {
+ if (meta->base == NPY_FR_ERROR) {
meta->base = NPY_FR_GENERIC;
meta->num = 1;
}
@@ -2647,7 +2649,7 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj,
if (succeeded) {
/* Use generic units if none was specified */
- if (meta->base == -1) {
+ if (meta->base == NPY_FR_ERROR) {
meta->base = NPY_FR_GENERIC;
meta->num = 1;
}
@@ -2658,7 +2660,7 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj,
/* Do no conversion on raw integers */
else if (PyInt_Check(obj) || PyLong_Check(obj)) {
/* Use the default unit if none was specified */
- if (meta->base == -1) {
+ if (meta->base == NPY_FR_ERROR) {
meta->base = NPY_DATETIME_DEFAULTUNIT;
meta->num = 1;
}
@@ -2671,7 +2673,7 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj,
PyTimedeltaScalarObject *dts = (PyTimedeltaScalarObject *)obj;
/* Copy the scalar directly if units weren't specified */
- if (meta->base == -1) {
+ if (meta->base == NPY_FR_ERROR) {
*meta = dts->obmeta;
*out = dts->obval;
@@ -2710,7 +2712,7 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj,
obj);
/* Copy the value directly if units weren't specified */
- if (meta->base == -1) {
+ if (meta->base == NPY_FR_ERROR) {
*meta = *arr_meta;
*out = dt;
@@ -2779,7 +2781,7 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj,
td = days*(24*60*60*1000000LL) + seconds*1000000LL + useconds;
/* Use microseconds if none was specified */
- if (meta->base == -1) {
+ if (meta->base == NPY_FR_ERROR) {
meta->base = NPY_FR_us;
meta->num = 1;
@@ -2833,7 +2835,7 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj,
*/
if (casting == NPY_UNSAFE_CASTING ||
(obj == Py_None && casting == NPY_SAME_KIND_CASTING)) {
- if (meta->base == -1) {
+ if (meta->base == NPY_FR_ERROR) {
meta->base = NPY_FR_GENERIC;
meta->num = 1;
}
@@ -3167,7 +3169,7 @@ convert_pyobjects_to_datetimes(int count,
}
/* Use the inputs to resolve the unit metadata if requested */
- if (inout_meta->base == -1) {
+ if (inout_meta->base == NPY_FR_ERROR) {
/* Allocate an array of metadata corresponding to the objects */
meta = PyArray_malloc(count * sizeof(PyArray_DatetimeMetaData));
if (meta == NULL) {
@@ -3177,7 +3179,7 @@ convert_pyobjects_to_datetimes(int count,
/* Convert all the objects into timedeltas or datetimes */
for (i = 0; i < count; ++i) {
- meta[i].base = -1;
+ meta[i].base = NPY_FR_ERROR;
meta[i].num = 1;
/* NULL -> NaT */
@@ -3344,7 +3346,7 @@ datetime_arange(PyObject *start, PyObject *stop, PyObject *step,
*/
if (meta_tmp->base == NPY_FR_GENERIC) {
dtype = NULL;
- meta.base = -1;
+ meta.base = NPY_FR_ERROR;
}
/* Otherwise use the provided metadata */
else {
@@ -3360,7 +3362,7 @@ datetime_arange(PyObject *start, PyObject *stop, PyObject *step,
type_nums[0] = NPY_TIMEDELTA;
}
- meta.base = -1;
+ meta.base = NPY_FR_ERROR;
}
if (type_nums[0] == NPY_DATETIME && start == NULL) {
@@ -3550,7 +3552,7 @@ find_string_array_datetime64_type(PyArrayObject *arr,
memcpy(tmp_buffer, data, maxlen);
tmp_buffer[maxlen] = '\0';
- tmp_meta.base = -1;
+ tmp_meta.base = NPY_FR_ERROR;
if (parse_iso_8601_datetime(tmp_buffer, maxlen, -1,
NPY_UNSAFE_CASTING, &dts,
&tmp_meta.base, NULL) < 0) {
@@ -3559,7 +3561,7 @@ find_string_array_datetime64_type(PyArrayObject *arr,
}
/* Otherwise parse the data in place */
else {
- tmp_meta.base = -1;
+ tmp_meta.base = NPY_FR_ERROR;
if (parse_iso_8601_datetime(data, tmp - data, -1,
NPY_UNSAFE_CASTING, &dts,
&tmp_meta.base, NULL) < 0) {
@@ -3651,7 +3653,7 @@ recursive_find_object_datetime64_type(PyObject *obj,
npy_datetime tmp = 0;
PyArray_DatetimeMetaData tmp_meta;
- tmp_meta.base = -1;
+ tmp_meta.base = NPY_FR_ERROR;
tmp_meta.num = 1;
if (convert_pyobject_to_datetime(&tmp_meta, obj,
diff --git a/numpy/core/src/multiarray/datetime_strings.c b/numpy/core/src/multiarray/datetime_strings.c
index b9aeda508..96cb66b95 100644
--- a/numpy/core/src/multiarray/datetime_strings.c
+++ b/numpy/core/src/multiarray/datetime_strings.c
@@ -307,8 +307,8 @@ parse_iso_8601_datetime(char *str, Py_ssize_t len,
}
/* Check the casting rule */
- if (unit != -1 && !can_cast_datetime64_units(bestunit, unit,
- casting)) {
+ if (unit != NPY_FR_ERROR &&
+ !can_cast_datetime64_units(bestunit, unit, casting)) {
PyErr_Format(PyExc_TypeError, "Cannot parse \"%s\" as unit "
"'%s' using casting rule %s",
str, _datetime_strings[unit],
@@ -347,8 +347,8 @@ parse_iso_8601_datetime(char *str, Py_ssize_t len,
}
/* Check the casting rule */
- if (unit != -1 && !can_cast_datetime64_units(bestunit, unit,
- casting)) {
+ if (unit != NPY_FR_ERROR &&
+ !can_cast_datetime64_units(bestunit, unit, casting)) {
PyErr_Format(PyExc_TypeError, "Cannot parse \"%s\" as unit "
"'%s' using casting rule %s",
str, _datetime_strings[unit],
@@ -730,8 +730,8 @@ finish:
}
/* Check the casting rule */
- if (unit != -1 && !can_cast_datetime64_units(bestunit, unit,
- casting)) {
+ if (unit != NPY_FR_ERROR &&
+ !can_cast_datetime64_units(bestunit, unit, casting)) {
PyErr_Format(PyExc_TypeError, "Cannot parse \"%s\" as unit "
"'%s' using casting rule %s",
str, _datetime_strings[unit],
@@ -760,14 +760,12 @@ get_datetime_iso_8601_strlen(int local, NPY_DATETIMEUNIT base)
{
int len = 0;
- /* If no unit is provided, return the maximum length */
- if (base == -1) {
- return NPY_DATETIME_MAX_ISO8601_STRLEN;
- }
-
switch (base) {
- /* Generic units can only be used to represent NaT */
+ case NPY_FR_ERROR:
+ /* If no unit is provided, return the maximum length */
+ return NPY_DATETIME_MAX_ISO8601_STRLEN;
case NPY_FR_GENERIC:
+ /* Generic units can only be used to represent NaT */
return 4;
case NPY_FR_as:
len += 3; /* "###" */
@@ -928,7 +926,7 @@ make_iso_8601_datetime(npy_datetimestruct *dts, char *outstr, npy_intp outlen,
}
/* Automatically detect a good unit */
- if (base == -1) {
+ if (base == NPY_FR_ERROR) {
base = lossless_unit_from_datetimestruct(dts);
/*
* If there's a timezone, use at least minutes precision,
@@ -1406,20 +1404,24 @@ array_datetime_as_string(PyObject *NPY_UNUSED(self), PyObject *args,
goto fail;
}
- /* unit == -1 means to autodetect the unit from the datetime data */
+ /*
+ * unit == NPY_FR_ERROR means to autodetect the unit
+ * from the datetime data
+ * */
if (strcmp(str, "auto") == 0) {
- unit = -1;
+ unit = NPY_FR_ERROR;
}
else {
unit = parse_datetime_unit_from_string(str, len, NULL);
- if (unit == -1) {
+ if (unit == NPY_FR_ERROR) {
Py_DECREF(strobj);
goto fail;
}
}
Py_DECREF(strobj);
- if (unit != -1 && !can_cast_datetime64_units(meta->base, unit, casting)) {
+ if (unit != NPY_FR_ERROR &&
+ !can_cast_datetime64_units(meta->base, unit, casting)) {
PyErr_Format(PyExc_TypeError, "Cannot create a datetime "
"string as units '%s' from a NumPy datetime "
"with units '%s' according to the rule %s",
diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c
index f6b29edfe..80161b71c 100644
--- a/numpy/core/src/multiarray/descriptor.c
+++ b/numpy/core/src/multiarray/descriptor.c
@@ -512,11 +512,7 @@ _convert_from_array_descr(PyObject *obj, int align)
}
if ((PyDict_GetItem(fields, name) != NULL)
|| (title
-#if defined(NPY_PY3K)
- && PyUString_Check(title)
-#else
- && (PyUString_Check(title) || PyUnicode_Check(title))
-#endif
+ && PyBaseString_Check(title)
&& (PyDict_GetItem(fields, title) != NULL))) {
#if defined(NPY_PY3K)
name = PyUnicode_AsUTF8String(name);
@@ -551,11 +547,7 @@ _convert_from_array_descr(PyObject *obj, int align)
Py_INCREF(title);
PyTuple_SET_ITEM(tup, 2, title);
PyDict_SetItem(fields, name, tup);
-#if defined(NPY_PY3K)
- if (PyUString_Check(title)) {
-#else
- if (PyUString_Check(title) || PyUnicode_Check(title)) {
-#endif
+ if (PyBaseString_Check(title)) {
if (PyDict_GetItem(fields, title) != NULL) {
PyErr_SetString(PyExc_ValueError,
"title already used as a name or title.");
@@ -1181,11 +1173,7 @@ _convert_from_dict(PyObject *obj, int align)
Py_DECREF(tup);
goto fail;
}
-#if defined(NPY_PY3K)
- if (!PyUString_Check(name)) {
-#else
- if (!(PyUString_Check(name) || PyUnicode_Check(name))) {
-#endif
+ if (!PyBaseString_Check(name)) {
PyErr_SetString(PyExc_ValueError,
"field names must be strings");
Py_DECREF(tup);
@@ -1202,11 +1190,7 @@ _convert_from_dict(PyObject *obj, int align)
PyDict_SetItem(fields, name, tup);
Py_DECREF(name);
if (len == 3) {
-#if defined(NPY_PY3K)
- if (PyUString_Check(title)) {
-#else
- if (PyUString_Check(title) || PyUnicode_Check(title)) {
-#endif
+ if (PyBaseString_Check(title)) {
if (PyDict_GetItem(fields, title) != NULL) {
PyErr_SetString(PyExc_ValueError,
"title already used as a name or title.");
@@ -3821,11 +3805,7 @@ descr_subscript(PyArray_Descr *self, PyObject *op)
return NULL;
}
-#if defined(NPY_PY3K)
- if (PyUString_Check(op)) {
-#else
- if (PyUString_Check(op) || PyUnicode_Check(op)) {
-#endif
+ if (PyBaseString_Check(op)) {
return _subscript_by_name(self, op);
}
else {
diff --git a/numpy/core/src/multiarray/dragon4.c b/numpy/core/src/multiarray/dragon4.c
index 8606adf99..1cdf021d2 100644
--- a/numpy/core/src/multiarray/dragon4.c
+++ b/numpy/core/src/multiarray/dragon4.c
@@ -1002,7 +1002,7 @@ BigInt_ShiftLeft(BigInt *result, npy_uint32 shift)
* * exponent - value exponent in base 2
* * mantissaBit - index of the highest set mantissa bit
* * hasUnequalMargins - is the high margin twice as large as the low margin
- * * cutoffMode - how to intepret cutoffNumber: fractional or total digits?
+ * * cutoffMode - how to interpret cutoffNumber: fractional or total digits?
* * cutoffNumber - cut off printing after this many digits. -1 for no cutoff
* * pOutBuffer - buffer to output into
* * bufferSize - maximum characters that can be printed to pOutBuffer
@@ -1381,7 +1381,7 @@ Dragon4(const npy_uint64 mantissa, const npy_int32 exponent,
/*
* if we are directly in the middle, round towards the even digit (i.e.
- * IEEE rouding rules)
+ * IEEE rounding rules)
*/
if (compare == 0) {
roundDown = (outputDigit & 1) == 0;
@@ -1590,7 +1590,7 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, npy_uint64 mantissa,
npy_int32 printExponent;
npy_int32 numDigits, numWholeDigits, has_sign=0;
- npy_int32 maxPrintLen = bufferSize - 1, pos = 0;
+ npy_int32 maxPrintLen = (npy_int32)bufferSize - 1, pos = 0;
/* track the # of digits past the decimal point that have been printed */
npy_int32 numFractionDigits = 0;
@@ -1637,11 +1637,11 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, npy_uint64 mantissa,
}
}
/* insert the decimal point prior to the fraction */
- else if (numDigits > (npy_uint32)numWholeDigits) {
- npy_uint32 maxFractionDigits;
+ else if (numDigits > numWholeDigits) {
+ npy_int32 maxFractionDigits;
numFractionDigits = numDigits - numWholeDigits;
- maxFractionDigits = maxPrintLen - numWholeDigits -1-pos;
+ maxFractionDigits = maxPrintLen - numWholeDigits - 1 - pos;
if (numFractionDigits > maxFractionDigits) {
numFractionDigits = maxFractionDigits;
}
@@ -1656,19 +1656,20 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, npy_uint64 mantissa,
}
else {
/* shift out the fraction to make room for the leading zeros */
- npy_uint32 numFractionZeros = 0;
+ npy_int32 numFractionZeros = 0;
if (pos + 2 < maxPrintLen) {
- npy_uint32 maxFractionZeros, digitsStartIdx, maxFractionDigits, i;
+ npy_int32 maxFractionZeros, digitsStartIdx, maxFractionDigits, i;
maxFractionZeros = maxPrintLen - 2 - pos;
- numFractionZeros = (npy_uint32)-printExponent - 1;
+ numFractionZeros = -(printExponent + 1);
if (numFractionZeros > maxFractionZeros) {
numFractionZeros = maxFractionZeros;
}
digitsStartIdx = 2 + numFractionZeros;
- /* shift the significant digits right such that there is room for
+ /*
+ * shift the significant digits right such that there is room for
* leading zeros
*/
numFractionDigits = numDigits;
@@ -1719,10 +1720,10 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, npy_uint64 mantissa,
}
else if (trim_mode == TrimMode_None &&
digit_mode != DigitMode_Unique &&
- precision > (npy_int32)numFractionDigits && pos < maxPrintLen) {
+ precision > numFractionDigits && pos < maxPrintLen) {
/* add trailing zeros up to precision length */
/* compute the number of trailing zeros needed */
- npy_uint32 count = precision - numFractionDigits;
+ npy_int32 count = precision - numFractionDigits;
if (pos + count > maxPrintLen) {
count = maxPrintLen - pos;
}
@@ -1751,7 +1752,7 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, npy_uint64 mantissa,
/* add any whitespace padding to right side */
if (digits_right >= numFractionDigits) {
- npy_uint32 count = digits_right - numFractionDigits;
+ npy_int32 count = digits_right - numFractionDigits;
/* in trim_mode DptZeros, if right padding, add a space for the . */
if (trim_mode == TrimMode_DptZeros && numFractionDigits == 0
@@ -1769,8 +1770,8 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, npy_uint64 mantissa,
}
/* add any whitespace padding to left side */
if (digits_left > numWholeDigits + has_sign) {
- npy_uint32 shift = digits_left - (numWholeDigits + has_sign);
- npy_uint32 count = pos;
+ npy_int32 shift = digits_left - (numWholeDigits + has_sign);
+ npy_int32 count = pos;
if (count + shift > maxPrintLen){
count = maxPrintLen - shift;
@@ -1781,7 +1782,7 @@ FormatPositional(char *buffer, npy_uint32 bufferSize, npy_uint64 mantissa,
}
pos = shift + count;
for ( ; shift > 0; shift--) {
- buffer[shift-1] = ' ';
+ buffer[shift - 1] = ' ';
}
}
@@ -1871,7 +1872,8 @@ FormatScientific (char *buffer, npy_uint32 bufferSize, npy_uint64 mantissa,
/* insert the decimal point prior to the fractional number */
numFractionDigits = numDigits-1;
if (numFractionDigits > 0 && bufferSize > 1) {
- npy_uint32 maxFractionDigits = bufferSize-2;
+ npy_int32 maxFractionDigits = (npy_int32)bufferSize - 2;
+
if (numFractionDigits > maxFractionDigits) {
numFractionDigits = maxFractionDigits;
}
@@ -1905,9 +1907,10 @@ FormatScientific (char *buffer, npy_uint32 bufferSize, npy_uint64 mantissa,
if (precision > (npy_int32)numFractionDigits) {
char *pEnd;
/* compute the number of trailing zeros needed */
- npy_uint32 numZeros = (precision - numFractionDigits);
- if (numZeros > bufferSize-1) {
- numZeros = bufferSize-1;
+ npy_int32 numZeros = (precision - numFractionDigits);
+
+ if (numZeros > (npy_int32)bufferSize - 1) {
+ numZeros = (npy_int32)bufferSize - 1;
}
for (pEnd = pCurOut + numZeros; pCurOut < pEnd; ++pCurOut) {
@@ -1941,7 +1944,7 @@ FormatScientific (char *buffer, npy_uint32 bufferSize, npy_uint64 mantissa,
/* print the exponent into a local buffer and copy into output buffer */
if (bufferSize > 1) {
char exponentBuffer[7];
- npy_uint32 digits[5];
+ npy_int32 digits[5];
npy_int32 i, exp_size, count;
if (exp_digits > 5) {
@@ -1978,8 +1981,8 @@ FormatScientific (char *buffer, npy_uint32 bufferSize, npy_uint64 mantissa,
/* copy the exponent buffer into the output */
count = exp_size + 2;
- if (count > bufferSize-1) {
- count = bufferSize-1;
+ if (count > (npy_int32)bufferSize - 1) {
+ count = (npy_int32)bufferSize - 1;
}
memcpy(pCurOut, exponentBuffer, count);
pCurOut += count;
diff --git a/numpy/core/src/multiarray/einsum.c.src b/numpy/core/src/multiarray/einsum.c.src
index 943b8aecf..7db606194 100644
--- a/numpy/core/src/multiarray/einsum.c.src
+++ b/numpy/core/src/multiarray/einsum.c.src
@@ -1905,7 +1905,7 @@ parse_operand_subscripts(char *subscripts, int length,
/*
* Find any labels duplicated for this operand, and turn them
- * into negative offets to the axis to merge with.
+ * into negative offsets to the axis to merge with.
*
* In C, the char type may be signed or unsigned, but with
* twos complement arithmetic the char is ok either way here, and
diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c
index 486eb43ce..eb9ef5915 100644
--- a/numpy/core/src/multiarray/item_selection.c
+++ b/numpy/core/src/multiarray/item_selection.c
@@ -14,6 +14,7 @@
#include "npy_pycompat.h"
+#include "multiarraymodule.h"
#include "common.h"
#include "arrayobject.h"
#include "ctors.h"
@@ -1818,26 +1819,17 @@ PyArray_Diagonal(PyArrayObject *self, int offset, int axis1, int axis2)
}
/* Handle negative axes with standard Python indexing rules */
- if (axis1 < 0) {
- axis1 += ndim;
+ if (check_and_adjust_axis_msg(&axis1, ndim, npy_ma_str_axis1) < 0) {
+ return NULL;
}
- if (axis2 < 0) {
- axis2 += ndim;
+ if (check_and_adjust_axis_msg(&axis2, ndim, npy_ma_str_axis2) < 0) {
+ return NULL;
}
-
- /* Error check the two axes */
if (axis1 == axis2) {
PyErr_SetString(PyExc_ValueError,
"axis1 and axis2 cannot be the same");
return NULL;
}
- else if (axis1 < 0 || axis1 >= ndim || axis2 < 0 || axis2 >= ndim) {
- PyErr_Format(PyExc_ValueError,
- "axis1(=%d) and axis2(=%d) "
- "must be within range (ndim=%d)",
- axis1, axis2, ndim);
- return NULL;
- }
/* Get the shape and strides of the two axes */
shape = PyArray_SHAPE(self);
diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c
index e0589f1c8..eca4e98be 100644
--- a/numpy/core/src/multiarray/mapping.c
+++ b/numpy/core/src/multiarray/mapping.c
@@ -1396,11 +1396,7 @@ _get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view)
*view = NULL;
/* first check for a single field name */
-#if defined(NPY_PY3K)
- if (PyUnicode_Check(ind)) {
-#else
- if (PyString_Check(ind) || PyUnicode_Check(ind)) {
-#endif
+ if (PyBaseString_Check(ind)) {
PyObject *tup;
PyArray_Descr *fieldtype;
npy_intp offset;
@@ -1477,11 +1473,7 @@ _get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view)
return -1;
}
-#if defined(NPY_PY3K)
- if (!PyUnicode_Check(name)) {
-#else
- if (!PyString_Check(name) && !PyUnicode_Check(name)) {
-#endif
+ if (!PyBaseString_Check(name)) {
Py_DECREF(name);
Py_DECREF(fields);
Py_DECREF(names);
@@ -1521,7 +1513,7 @@ _get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view)
PyObject *errmsg = PyUString_FromString(
"duplicate field of name ");
PyUString_ConcatAndDel(&errmsg, name);
- PyErr_SetObject(PyExc_KeyError, errmsg);
+ PyErr_SetObject(PyExc_ValueError, errmsg);
Py_DECREF(errmsg);
Py_DECREF(fields);
Py_DECREF(names);
diff --git a/numpy/core/src/multiarray/multiarray_tests.c.src b/numpy/core/src/multiarray/multiarray_tests.c.src
index e223b2c7c..d63349560 100644
--- a/numpy/core/src/multiarray/multiarray_tests.c.src
+++ b/numpy/core/src/multiarray/multiarray_tests.c.src
@@ -8,6 +8,8 @@
#include "npy_extint128.h"
#include "common.h"
+#define ARRAY_SIZE(a) (sizeof(a)/sizeof(a[0]))
+
/* test PyArray_IsPythonScalar, before including private py3 compat header */
static PyObject *
IsPythonScalar(PyObject * dummy, PyObject *args)
@@ -1036,7 +1038,7 @@ array_solve_diophantine(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject
return NULL;
}
- if (PyTuple_GET_SIZE(A) > sizeof(terms) / sizeof(diophantine_term_t)) {
+ if (PyTuple_GET_SIZE(A) > (Py_ssize_t)ARRAY_SIZE(terms)) {
PyErr_SetString(PyExc_ValueError, "too many terms in equation");
goto fail;
}
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index 8e7352e4f..3e322c7e2 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -62,6 +62,7 @@ NPY_NO_EXPORT int NPY_NUMUSERTYPES = 0;
#include "compiled_base.h"
#include "mem_overlap.h"
#include "alloc.h"
+#include "typeinfo.h"
#include "get_attr_string.h"
@@ -4689,6 +4690,8 @@ NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_order = NULL;
NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_copy = NULL;
NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_dtype = NULL;
NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_ndmin = NULL;
+NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_axis1 = NULL;
+NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_axis2 = NULL;
static int
intern_strings(void)
@@ -4703,12 +4706,14 @@ intern_strings(void)
npy_ma_str_copy = PyUString_InternFromString("copy");
npy_ma_str_dtype = PyUString_InternFromString("dtype");
npy_ma_str_ndmin = PyUString_InternFromString("ndmin");
+ npy_ma_str_axis1 = PyUString_InternFromString("axis1");
+ npy_ma_str_axis2 = PyUString_InternFromString("axis2");
return npy_ma_str_array && npy_ma_str_array_prepare &&
npy_ma_str_array_wrap && npy_ma_str_array_finalize &&
npy_ma_str_buffer && npy_ma_str_ufunc &&
npy_ma_str_order && npy_ma_str_copy && npy_ma_str_dtype &&
- npy_ma_str_ndmin;
+ npy_ma_str_ndmin && npy_ma_str_axis1 && npy_ma_str_axis2;
}
@@ -4879,6 +4884,13 @@ PyMODINIT_FUNC initmultiarray(void) {
(PyObject *)&NpyBusDayCalendar_Type);
set_flaginfo(d);
+ /* Create the typeinfo types */
+ typeinfo_init_structsequences();
+ PyDict_SetItemString(d,
+ "typeinfo", (PyObject *)&PyArray_typeinfoType);
+ PyDict_SetItemString(d,
+ "typeinforanged", (PyObject *)&PyArray_typeinforangedType);
+
if (!intern_strings()) {
goto err;
}
diff --git a/numpy/core/src/multiarray/multiarraymodule.h b/numpy/core/src/multiarray/multiarraymodule.h
index 82ae24845..3de68c549 100644
--- a/numpy/core/src/multiarray/multiarraymodule.h
+++ b/numpy/core/src/multiarray/multiarraymodule.h
@@ -11,5 +11,7 @@ NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_order;
NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_copy;
NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_dtype;
NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_ndmin;
+NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_axis1;
+NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_axis2;
#endif
diff --git a/numpy/core/src/multiarray/refcount.c b/numpy/core/src/multiarray/refcount.c
index 88f660118..4b018b056 100644
--- a/numpy/core/src/multiarray/refcount.c
+++ b/numpy/core/src/multiarray/refcount.c
@@ -276,7 +276,9 @@ _fillobject(char *optr, PyObject *obj, PyArray_Descr *dtype)
}
else {
npy_intp i;
- for (i = 0; i < dtype->elsize / sizeof(obj); i++) {
+ npy_intp nsize = dtype->elsize / sizeof(obj);
+
+ for (i = 0; i < nsize; i++) {
Py_XINCREF(obj);
NPY_COPY_PYOBJECT_PTR(optr, &obj);
optr += sizeof(obj);
diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src
index dca6e3840..ee83206de 100644
--- a/numpy/core/src/multiarray/scalartypes.c.src
+++ b/numpy/core/src/multiarray/scalartypes.c.src
@@ -2366,11 +2366,7 @@ voidtype_ass_subscript(PyVoidScalarObject *self, PyObject *ind, PyObject *val)
return -1;
}
-#if defined(NPY_PY3K)
- if (PyUString_Check(ind)) {
-#else
- if (PyBytes_Check(ind) || PyUnicode_Check(ind)) {
-#endif
+ if (PyBaseString_Check(ind)) {
/*
* Much like in voidtype_setfield, we cannot simply use ndarray's
* __setitem__ since assignment to void scalars should not broadcast
diff --git a/numpy/core/src/multiarray/shape.c b/numpy/core/src/multiarray/shape.c
index 40925d8b9..29c122bd3 100644
--- a/numpy/core/src/multiarray/shape.c
+++ b/numpy/core/src/multiarray/shape.c
@@ -17,6 +17,7 @@
#include "shape.h"
+#include "multiarraymodule.h" /* for interned strings */
#include "templ_common.h" /* for npy_mul_with_overflow_intp */
#include "common.h" /* for convert_shape_to_string */
#include "alloc.h"
@@ -339,7 +340,9 @@ _putzero(char *optr, PyObject *zero, PyArray_Descr *dtype)
}
else {
npy_intp i;
- for (i = 0; i < dtype->elsize / sizeof(zero); i++) {
+ npy_intp nsize = dtype->elsize / sizeof(zero);
+
+ for (i = 0; i < nsize; i++) {
Py_INCREF(zero);
NPY_COPY_PYOBJECT_PTR(optr, &zero);
optr += sizeof(zero);
@@ -646,20 +649,10 @@ PyArray_SwapAxes(PyArrayObject *ap, int a1, int a2)
int n = PyArray_NDIM(ap);
int i;
- if (a1 < 0) {
- a1 += n;
- }
- if (a2 < 0) {
- a2 += n;
- }
- if ((a1 < 0) || (a1 >= n)) {
- PyErr_SetString(PyExc_ValueError,
- "bad axis1 argument to swapaxes");
+ if (check_and_adjust_axis_msg(&a1, n, npy_ma_str_axis1) < 0) {
return NULL;
}
- if ((a2 < 0) || (a2 >= n)) {
- PyErr_SetString(PyExc_ValueError,
- "bad axis2 argument to swapaxes");
+ if (check_and_adjust_axis_msg(&a2, n, npy_ma_str_axis2) < 0) {
return NULL;
}
diff --git a/numpy/core/src/multiarray/temp_elide.c b/numpy/core/src/multiarray/temp_elide.c
index e5175f162..3d2f976f2 100644
--- a/numpy/core/src/multiarray/temp_elide.c
+++ b/numpy/core/src/multiarray/temp_elide.c
@@ -7,6 +7,7 @@
#include "numpy/arrayobject.h"
#define NPY_NUMBER_MAX(a, b) ((a) > (b) ? (a) : (b))
+#define ARRAY_SIZE(a) (sizeof(a)/sizeof(a[0]))
/*
* Functions used to try to avoid/elide temporaries in python expressions
@@ -181,6 +182,7 @@ check_callers(int * cannot)
Dl_info info;
int in_python = 0;
int in_multiarray = 0;
+
#if NPY_ELIDE_DEBUG >= 2
dladdr(buffer[i], &info);
printf("%s(%p) %s(%p)\n", info.dli_fname, info.dli_fbase,
@@ -242,14 +244,14 @@ check_callers(int * cannot)
}
if (info.dli_sname &&
strcmp(info.dli_sname, PYFRAMEEVAL_FUNC) == 0) {
- if (n_pyeval < sizeof(pyeval_addr) / sizeof(pyeval_addr[0])) {
+ if (n_pyeval < (npy_intp)ARRAY_SIZE(pyeval_addr)) {
/* store address to not have to dladdr it again */
pyeval_addr[n_pyeval++] = buffer[i];
}
ok = 1;
break;
}
- else if (n_py_addr < sizeof(py_addr) / sizeof(py_addr[0])) {
+ else if (n_py_addr < (npy_intp)ARRAY_SIZE(py_addr)) {
/* store other py function to not have to dladdr it again */
py_addr[n_py_addr++] = buffer[i];
}
diff --git a/numpy/core/src/multiarray/typeinfo.c b/numpy/core/src/multiarray/typeinfo.c
new file mode 100644
index 000000000..f0af76809
--- /dev/null
+++ b/numpy/core/src/multiarray/typeinfo.c
@@ -0,0 +1,114 @@
+/*
+ * Provides namedtuples for numpy.core.multiarray.typeinfo
+ * Unfortunately, we need two different types to cover the cases where min/max
+ * do and do not appear in the tuple.
+ */
+#define PY_SSIZE_T_CLEAN
+#include <Python.h>
+
+/* In python 2, this is not exported from Python.h */
+#include <structseq.h>
+
+#define NPY_NO_DEPRECATED_API NPY_API_VERSION
+#define _MULTIARRAYMODULE
+#include "npy_pycompat.h"
+
+
+PyTypeObject PyArray_typeinfoType;
+PyTypeObject PyArray_typeinforangedType;
+
+static PyStructSequence_Field typeinfo_fields[] = {
+ {"char", "The character used to represent the type"},
+ {"num", "The numeric id assigned to the type"},
+ {"bits", "The number of bits in the type"},
+ {"alignment", "The alignment of the type in bytes"},
+ {"type", "The python type object this info is about"},
+ {NULL, NULL,}
+};
+
+static PyStructSequence_Field typeinforanged_fields[] = {
+ {"char", "The character used to represent the type"},
+ {"num", "The numeric id assigned to the type"},
+ {"bits", "The number of bits in the type"},
+ {"alignment", "The alignment of the type in bytes"},
+ {"max", "The maximum value of this type"},
+ {"min", "The minimum value of this type"},
+ {"type", "The python type object this info is about"},
+ {NULL, NULL,}
+};
+
+static PyStructSequence_Desc typeinfo_desc = {
+ "numpy.core.multiarray.typeinfo", /* name */
+ "Information about a scalar numpy type", /* doc */
+ typeinfo_fields, /* fields */
+ 5, /* n_in_sequence */
+};
+
+static PyStructSequence_Desc typeinforanged_desc = {
+ "numpy.core.multiarray.typeinforanged", /* name */
+ "Information about a scalar numpy type with a range", /* doc */
+ typeinforanged_fields, /* fields */
+ 7, /* n_in_sequence */
+};
+
+PyObject *
+PyArray_typeinfo(
+ char typechar, int typenum, int nbits, int align,
+ PyTypeObject *type_obj)
+{
+ PyObject *entry = PyStructSequence_New(&PyArray_typeinfoType);
+ if (entry == NULL)
+ return NULL;
+#if defined(NPY_PY3K)
+ PyStructSequence_SET_ITEM(entry, 0, Py_BuildValue("C", typechar));
+#else
+ PyStructSequence_SET_ITEM(entry, 0, Py_BuildValue("c", typechar));
+#endif
+ PyStructSequence_SET_ITEM(entry, 1, Py_BuildValue("i", typenum));
+ PyStructSequence_SET_ITEM(entry, 2, Py_BuildValue("i", nbits));
+ PyStructSequence_SET_ITEM(entry, 3, Py_BuildValue("i", align));
+ PyStructSequence_SET_ITEM(entry, 4, Py_BuildValue("O", (PyObject *) type_obj));
+
+ if (PyErr_Occurred()) {
+ Py_DECREF(entry);
+ return NULL;
+ }
+
+ return entry;
+}
+
+PyObject *
+PyArray_typeinforanged(
+ char typechar, int typenum, int nbits, int align,
+ PyObject *max, PyObject *min, PyTypeObject *type_obj)
+{
+ PyObject *entry = PyStructSequence_New(&PyArray_typeinforangedType);
+ if (entry == NULL)
+ return NULL;
+#if defined(NPY_PY3K)
+ PyStructSequence_SET_ITEM(entry, 0, Py_BuildValue("C", typechar));
+#else
+ PyStructSequence_SET_ITEM(entry, 0, Py_BuildValue("c", typechar));
+#endif
+ PyStructSequence_SET_ITEM(entry, 1, Py_BuildValue("i", typenum));
+ PyStructSequence_SET_ITEM(entry, 2, Py_BuildValue("i", nbits));
+ PyStructSequence_SET_ITEM(entry, 3, Py_BuildValue("i", align));
+ PyStructSequence_SET_ITEM(entry, 4, max);
+ PyStructSequence_SET_ITEM(entry, 5, min);
+ PyStructSequence_SET_ITEM(entry, 6, Py_BuildValue("O", (PyObject *) type_obj));
+
+ if (PyErr_Occurred()) {
+ Py_DECREF(entry);
+ return NULL;
+ }
+
+ return entry;
+}
+
+void typeinfo_init_structsequences(void)
+{
+ PyStructSequence_InitType(
+ &PyArray_typeinfoType, &typeinfo_desc);
+ PyStructSequence_InitType(
+ &PyArray_typeinforangedType, &typeinforanged_desc);
+}
diff --git a/numpy/core/src/multiarray/typeinfo.h b/numpy/core/src/multiarray/typeinfo.h
new file mode 100644
index 000000000..5899c2093
--- /dev/null
+++ b/numpy/core/src/multiarray/typeinfo.h
@@ -0,0 +1,19 @@
+#ifndef _NPY_PRIVATE_TYPEINFO_H_
+#define _NPY_PRIVATE_TYPEINFO_H_
+
+void typeinfo_init_structsequences(void);
+
+extern PyTypeObject PyArray_typeinfoType;
+extern PyTypeObject PyArray_typeinforangedType;
+
+PyObject *
+PyArray_typeinfo(
+ char typechar, int typenum, int nbits, int align,
+ PyTypeObject *type_obj);
+
+PyObject *
+PyArray_typeinforanged(
+ char typechar, int typenum, int nbits, int align,
+ PyObject *max, PyObject *min, PyTypeObject *type_obj);
+
+#endif
diff --git a/numpy/core/src/multiarray/vdot.c b/numpy/core/src/multiarray/vdot.c
index 4be85672e..424a21710 100644
--- a/numpy/core/src/multiarray/vdot.c
+++ b/numpy/core/src/multiarray/vdot.c
@@ -1,4 +1,5 @@
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
+#define _MULTIARRAYMODULE
#include <Python.h>
#include "common.h"
diff --git a/numpy/core/src/npymath/halffloat.c b/numpy/core/src/npymath/halffloat.c
index 951768256..c2bd28d60 100644
--- a/numpy/core/src/npymath/halffloat.c
+++ b/numpy/core/src/npymath/halffloat.c
@@ -281,7 +281,7 @@ npy_uint16 npy_floatbits_to_halfbits(npy_uint32 f)
if (f_exp <= 0x38000000u) {
/*
* Signed zeros, subnormal floats, and floats with small
- * exponents all convert to signed zero halfs.
+ * exponents all convert to signed zero half-floats.
*/
if (f_exp < 0x33000000u) {
#if NPY_HALF_GENERATE_UNDERFLOW
@@ -396,7 +396,7 @@ npy_uint16 npy_doublebits_to_halfbits(npy_uint64 d)
if (d_exp <= 0x3f00000000000000ULL) {
/*
* Signed zeros, subnormal floats, and floats with small
- * exponents all convert to signed zero halfs.
+ * exponents all convert to signed zero half-floats.
*/
if (d_exp < 0x3e60000000000000ULL) {
#if NPY_HALF_GENERATE_UNDERFLOW
diff --git a/numpy/core/src/npymath/npy_math_complex.c.src b/numpy/core/src/npymath/npy_math_complex.c.src
index fb31e8e6a..ea784ec5b 100644
--- a/numpy/core/src/npymath/npy_math_complex.c.src
+++ b/numpy/core/src/npymath/npy_math_complex.c.src
@@ -35,11 +35,17 @@
#include "npy_math_private.h"
#include <numpy/utils.h>
-
-#define raise_inexact() do { volatile npy_float junk = 1 + tiny; } while(0)
+/*
+ * Hack inherited from BSD, the intent is to set the FPU inexact
+ * flag in an efficient way. The flag is IEEE specific. See
+ * https://github.com/freebsd/freebsd/blob/4c6378299/lib/msun/src/catrig.c#L42
+ */
+#define raise_inexact() do { \
+ volatile npy_float NPY_UNUSED(junk) = 1 + tiny; \
+} while (0)
-static __COMP_NPY_UNUSED npy_float tiny = 3.9443045e-31f;
+static const volatile npy_float tiny = 3.9443045e-31f;
/**begin repeat
diff --git a/numpy/core/src/npysort/quicksort.c.src b/numpy/core/src/npysort/quicksort.c.src
index ff0e8a149..49a2c4906 100644
--- a/numpy/core/src/npysort/quicksort.c.src
+++ b/numpy/core/src/npysort/quicksort.c.src
@@ -482,7 +482,7 @@ npy_quicksort(void *start, npy_intp num, void *varr)
pj = pr - elsize;
GENERIC_SWAP(pm, pj, elsize);
/*
- * Generic comparisons may be buggy, so don't rely on the sentinals
+ * Generic comparisons may be buggy, so don't rely on the sentinels
* to keep the pointers from going out of bounds.
*/
for (;;) {
diff --git a/numpy/core/src/private/npy_binsearch.h.src b/numpy/core/src/private/npy_binsearch.h.src
index 3b2c59487..ce3b34b0e 100644
--- a/numpy/core/src/private/npy_binsearch.h.src
+++ b/numpy/core/src/private/npy_binsearch.h.src
@@ -5,6 +5,8 @@
#include <numpy/npy_common.h>
#include <numpy/ndarraytypes.h>
+#define ARRAY_SIZE(a) (sizeof(a)/sizeof(a[0]))
+
typedef void (PyArray_BinSearchFunc)(const char*, const char*, char*,
npy_intp, npy_intp,
npy_intp, npy_intp, npy_intp,
@@ -16,15 +18,15 @@ typedef int (PyArray_ArgBinSearchFunc)(const char*, const char*,
npy_intp, npy_intp, npy_intp,
PyArrayObject*);
-struct binsearch_map {
- enum NPY_TYPES typenum;
+typedef struct {
+ int typenum;
PyArray_BinSearchFunc *binsearch[NPY_NSEARCHSIDES];
-};
+} binsearch_map;
-struct argbinsearch_map {
- enum NPY_TYPES typenum;
+typedef struct {
+ int typenum;
PyArray_ArgBinSearchFunc *argbinsearch[NPY_NSEARCHSIDES];
-};
+} argbinsearch_map;
/**begin repeat
*
@@ -72,7 +74,7 @@ npy_argbinsearch_@side@(const char *arr, const char *key,
* #Arg = , Arg#
*/
-static struct @arg@binsearch_map _@arg@binsearch_map[] = {
+static @arg@binsearch_map _@arg@binsearch_map[] = {
/* If adding new types, make sure to keep them ordered by type num */
/**begin repeat1
*
@@ -100,10 +102,9 @@ static PyArray_@Arg@BinSearchFunc *gen@arg@binsearch_map[] = {
static NPY_INLINE PyArray_@Arg@BinSearchFunc*
get_@arg@binsearch_func(PyArray_Descr *dtype, NPY_SEARCHSIDE side)
{
- static npy_intp num_funcs = sizeof(_@arg@binsearch_map) /
- sizeof(_@arg@binsearch_map[0]);
+ npy_intp nfuncs = ARRAY_SIZE(_@arg@binsearch_map);
npy_intp min_idx = 0;
- npy_intp max_idx = num_funcs;
+ npy_intp max_idx = nfuncs;
int type = dtype->type_num;
if (side >= NPY_NSEARCHSIDES) {
@@ -125,7 +126,8 @@ get_@arg@binsearch_func(PyArray_Descr *dtype, NPY_SEARCHSIDE side)
}
}
- if (min_idx < num_funcs && _@arg@binsearch_map[min_idx].typenum == type) {
+ if (min_idx < nfuncs &&
+ _@arg@binsearch_map[min_idx].typenum == type) {
return _@arg@binsearch_map[min_idx].@arg@binsearch[side];
}
@@ -137,4 +139,6 @@ get_@arg@binsearch_func(PyArray_Descr *dtype, NPY_SEARCHSIDE side)
}
/**end repeat**/
+#undef ARRAY_SIZE
+
#endif
diff --git a/numpy/core/src/private/npy_partition.h.src b/numpy/core/src/private/npy_partition.h.src
index 07aecd4f8..a22cf911c 100644
--- a/numpy/core/src/private/npy_partition.h.src
+++ b/numpy/core/src/private/npy_partition.h.src
@@ -24,8 +24,9 @@
#include <numpy/npy_common.h>
#include <numpy/ndarraytypes.h>
-#define NPY_MAX_PIVOT_STACK 50
+#define ARRAY_SIZE(a) (sizeof(a)/sizeof(a[0]))
+#define NPY_MAX_PIVOT_STACK 50
/**begin repeat
*
@@ -56,7 +57,7 @@ NPY_VISIBILITY_HIDDEN int aintroselect_@suff@(@type@ *v, npy_intp* tosort, npy_i
/**end repeat**/
typedef struct {
- enum NPY_TYPES typenum;
+ int typenum;
PyArray_PartitionFunc * part[NPY_NSELECTS];
PyArray_ArgPartitionFunc * argpart[NPY_NSELECTS];
} part_map;
@@ -92,10 +93,12 @@ static NPY_INLINE PyArray_PartitionFunc *
get_partition_func(int type, NPY_SELECTKIND which)
{
npy_intp i;
+ npy_intp ntypes = ARRAY_SIZE(_part_map);
+
if (which >= NPY_NSELECTS) {
return NULL;
}
- for (i = 0; i < sizeof(_part_map)/sizeof(_part_map[0]); i++) {
+ for (i = 0; i < ntypes; i++) {
if (type == _part_map[i].typenum) {
return _part_map[i].part[which];
}
@@ -108,10 +111,12 @@ static NPY_INLINE PyArray_ArgPartitionFunc *
get_argpartition_func(int type, NPY_SELECTKIND which)
{
npy_intp i;
+ npy_intp ntypes = ARRAY_SIZE(_part_map);
+
if (which >= NPY_NSELECTS) {
return NULL;
}
- for (i = 0; i < sizeof(_part_map)/sizeof(_part_map[0]); i++) {
+ for (i = 0; i < ntypes; i++) {
if (type == _part_map[i].typenum) {
return _part_map[i].argpart[which];
}
@@ -119,4 +124,6 @@ get_argpartition_func(int type, NPY_SELECTKIND which)
return NULL;
}
+#undef ARRAY_SIZE
+
#endif
diff --git a/numpy/core/src/umath/scalarmath.c.src b/numpy/core/src/umath/scalarmath.c.src
index 3b23151f1..7b424cc74 100644
--- a/numpy/core/src/umath/scalarmath.c.src
+++ b/numpy/core/src/umath/scalarmath.c.src
@@ -1424,7 +1424,11 @@ static PyObject *
#ifndef NPY_PY3K
/* Invoke long.__int__ to try to downcast */
- long_result = Py_TYPE(long_result)->tp_as_number->nb_int(long_result);
+ {
+ PyObject *before_downcast = long_result;
+ long_result = Py_TYPE(long_result)->tp_as_number->nb_int(long_result);
+ Py_DECREF(before_downcast);
+ }
#endif
return long_result;
diff --git a/numpy/core/src/umath/test_rational.c.src b/numpy/core/src/umath/test_rational.c.src
index 26c3d3799..ffc92b732 100644
--- a/numpy/core/src/umath/test_rational.c.src
+++ b/numpy/core/src/umath/test_rational.c.src
@@ -394,14 +394,14 @@ pyrational_new(PyTypeObject* type, PyObject* args, PyObject* kwds) {
return 0;
}
size = PyTuple_GET_SIZE(args);
- if (size>2) {
+ if (size > 2) {
PyErr_SetString(PyExc_TypeError,
"expected rational or numerator and optional denominator");
return 0;
}
- x[0] = PyTuple_GET_ITEM(args,0);
- x[1] = PyTuple_GET_ITEM(args,1);
- if (size==1) {
+
+ if (size == 1) {
+ x[0] = PyTuple_GET_ITEM(args, 0);
if (PyRational_Check(x[0])) {
Py_INCREF(x[0]);
return x[0];
@@ -424,9 +424,11 @@ pyrational_new(PyTypeObject* type, PyObject* args, PyObject* kwds) {
return 0;
}
}
- for (i=0;i<size;i++) {
+
+ for (i=0; i<size; i++) {
PyObject* y;
int eq;
+ x[i] = PyTuple_GET_ITEM(args, i);
n[i] = PyInt_AsLong(x[i]);
if (error_converting(n[i])) {
if (PyErr_ExceptionMatches(PyExc_TypeError)) {
diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c
index 304b4fc27..c67f60752 100644
--- a/numpy/core/src/umath/ufunc_object.c
+++ b/numpy/core/src/umath/ufunc_object.c
@@ -185,6 +185,43 @@ _find_array_method(PyObject *args, int nin, PyObject *method_name)
}
/*
+ * Returns an incref'ed pointer to the proper __array_prepare__/__array_wrap__
+ * method for a ufunc output argument, given the output argument `obj`, and the
+ * method chosen from the inputs `input_method`.
+ */
+static PyObject *
+_get_output_array_method(PyObject *obj, PyObject *method,
+ PyObject *input_method) {
+ if (obj != Py_None) {
+ PyObject *ometh;
+
+ if (PyArray_CheckExact(obj)) {
+ /*
+ * No need to wrap regular arrays - None signals to not call
+ * wrap/prepare at all
+ */
+ Py_RETURN_NONE;
+ }
+
+ ometh = PyObject_GetAttr(obj, method);
+ if (ometh == NULL) {
+ PyErr_Clear();
+ }
+ else if (!PyCallable_Check(ometh)) {
+ Py_DECREF(ometh);
+ }
+ else {
+ /* Use the wrap/prepare method of the output if it's callable */
+ return ometh;
+ }
+ }
+
+ /* Fall back on the input's wrap/prepare */
+ Py_XINCREF(input_method);
+ return input_method;
+}
+
+/*
* This function analyzes the input arguments
* and determines an appropriate __array_prepare__ function to call
* for the outputs.
@@ -206,13 +243,12 @@ _find_array_prepare(PyObject *args, PyObject *kwds,
{
Py_ssize_t nargs;
int i;
- PyObject *obj, *prep;
/*
* Determine the prepping function given by the input arrays
* (could be NULL).
*/
- prep = _find_array_method(args, nin, npy_um_str_array_prepare);
+ PyObject *prep = _find_array_method(args, nin, npy_um_str_array_prepare);
/*
* For all the output arrays decide what to do.
*
@@ -228,9 +264,7 @@ _find_array_prepare(PyObject *args, PyObject *kwds,
nargs = PyTuple_GET_SIZE(args);
for (i = 0; i < nout; i++) {
int j = nin + i;
- int incref = 1;
- output_prep[i] = prep;
- obj = NULL;
+ PyObject *obj = NULL;
if (j < nargs) {
obj = PyTuple_GET_ITEM(args, j);
/* Output argument one may also be in a keyword argument */
@@ -243,27 +277,13 @@ _find_array_prepare(PyObject *args, PyObject *kwds,
obj = PyDict_GetItem(kwds, npy_um_str_out);
}
- if (obj != Py_None && obj != NULL) {
- if (PyArray_CheckExact(obj)) {
- /* None signals to not call any wrapping */
- output_prep[i] = Py_None;
- }
- else {
- PyObject *oprep = PyObject_GetAttr(obj,
- npy_um_str_array_prepare);
- incref = 0;
- if (!(oprep) || !(PyCallable_Check(oprep))) {
- Py_XDECREF(oprep);
- oprep = prep;
- incref = 1;
- PyErr_Clear();
- }
- output_prep[i] = oprep;
- }
+ if (obj == NULL) {
+ Py_XINCREF(prep);
+ output_prep[i] = prep;
}
-
- if (incref) {
- Py_XINCREF(output_prep[i]);
+ else {
+ output_prep[i] = _get_output_array_method(
+ obj, npy_um_str_array_prepare, prep);
}
}
Py_XDECREF(prep);
@@ -1256,7 +1276,7 @@ iterator_loop(PyUFuncObject *ufunc,
arr_prep[i], arr_prep_args, i) < 0) {
for(iop = 0; iop < nin+i; ++iop) {
if (op_it[iop] != op[iop]) {
- /* ignore errrors */
+ /* ignore errors */
PyArray_ResolveWritebackIfCopy(op_it[iop]);
}
}
@@ -3894,38 +3914,6 @@ fail:
}
/*
- * Returns an incref'ed pointer to the proper wrapping object for a
- * ufunc output argument, given the output argument 'out', and the
- * input's wrapping function, 'wrap'.
- */
-static PyObject*
-_get_out_wrap(PyObject *out, PyObject *wrap) {
- PyObject *owrap;
-
- if (out == Py_None) {
- /* Iterator allocated outputs get the input's wrapping */
- Py_XINCREF(wrap);
- return wrap;
- }
- if (PyArray_CheckExact(out)) {
- /* None signals to not call any wrapping */
- Py_RETURN_NONE;
- }
- /*
- * For array subclasses use their __array_wrap__ method, or the
- * input's wrapping if not available
- */
- owrap = PyObject_GetAttr(out, npy_um_str_array_wrap);
- if (owrap == NULL || !PyCallable_Check(owrap)) {
- Py_XDECREF(owrap);
- owrap = wrap;
- Py_XINCREF(wrap);
- PyErr_Clear();
- }
- return owrap;
-}
-
-/*
* This function analyzes the input arguments
* and determines an appropriate __array_wrap__ function to call
* for the outputs.
@@ -4001,7 +3989,8 @@ handle_out:
}
else {
/* If the kwarg is not a tuple then it is an array (or None) */
- output_wrap[0] = _get_out_wrap(obj, wrap);
+ output_wrap[0] = _get_output_array_method(
+ obj, npy_um_str_array_wrap, wrap);
start_idx = 1;
nargs = 1;
}
@@ -4012,8 +4001,8 @@ handle_out:
int j = idx_offset + i;
if (j < nargs) {
- output_wrap[i] = _get_out_wrap(PyTuple_GET_ITEM(obj, j),
- wrap);
+ output_wrap[i] = _get_output_array_method(
+ PyTuple_GET_ITEM(obj, j), npy_um_str_array_wrap, wrap);
}
else {
output_wrap[i] = wrap;
diff --git a/numpy/core/src/umath/umathmodule.c b/numpy/core/src/umath/umathmodule.c
index 1a6cee030..03bf5bfd8 100644
--- a/numpy/core/src/umath/umathmodule.c
+++ b/numpy/core/src/umath/umathmodule.c
@@ -359,12 +359,10 @@ PyMODINIT_FUNC initumath(void)
goto err;
}
- s = PyString_FromString("0.4.0");
- PyDict_SetItemString(d, "__version__", s);
- Py_DECREF(s);
-
/* Load the ufunc operators into the array module's namespace */
- InitOperators(d);
+ if (InitOperators(d) < 0) {
+ goto err;
+ }
PyDict_SetItemString(d, "pi", s = PyFloat_FromDouble(NPY_PI));
Py_DECREF(s);
diff --git a/numpy/core/tests/test_arrayprint.py b/numpy/core/tests/test_arrayprint.py
index d491d53aa..faf0a3272 100644
--- a/numpy/core/tests/test_arrayprint.py
+++ b/numpy/core/tests/test_arrayprint.py
@@ -28,7 +28,7 @@ class TestArrayRepr(object):
' [3, 4]])')
# two dimensional with flexible dtype
- xstruct = np.ones((2,2), dtype=[('a', 'i4')]).view(sub)
+ xstruct = np.ones((2,2), dtype=[('a', '<i4')]).view(sub)
assert_equal(repr(xstruct),
"sub([[(1,), (1,)],\n"
" [(1,), (1,)]], dtype=[('a', '<i4')])"
@@ -64,6 +64,12 @@ class TestArrayRepr(object):
# gh-9345
repr(np.void(b'test')) # RecursionError ?
+ def test_fieldless_structured(self):
+ # gh-10366
+ no_fields = np.dtype([])
+ arr_no_fields = np.empty(4, dtype=no_fields)
+ assert_equal(repr(arr_no_fields), 'array([(), (), (), ()], dtype=[])')
+
class TestComplexArray(object):
def test_str(self):
@@ -356,13 +362,14 @@ class TestPrintOptions(object):
def test_0d_arrays(self):
unicode = type(u'')
- assert_equal(unicode(np.array(u'café', np.unicode_)), u'café')
+
+ assert_equal(unicode(np.array(u'café', '<U4')), u'café')
if sys.version_info[0] >= 3:
- assert_equal(repr(np.array('café', np.unicode_)),
+ assert_equal(repr(np.array('café', '<U4')),
"array('café', dtype='<U4')")
else:
- assert_equal(repr(np.array(u'café', np.unicode_)),
+ assert_equal(repr(np.array(u'café', '<U4')),
"array(u'caf\\xe9', dtype='<U4')")
assert_equal(str(np.array('test', np.str_)), 'test')
@@ -427,21 +434,30 @@ class TestPrintOptions(object):
def test_sign_spacing(self):
a = np.arange(4.)
b = np.array([1.234e9])
+ c = np.array([1.0 + 1.0j, 1.123456789 + 1.123456789j], dtype='c16')
assert_equal(repr(a), 'array([0., 1., 2., 3.])')
assert_equal(repr(np.array(1.)), 'array(1.)')
assert_equal(repr(b), 'array([1.234e+09])')
assert_equal(repr(np.array([0.])), 'array([0.])')
+ assert_equal(repr(c),
+ "array([1. +1.j , 1.12345679+1.12345679j])")
+ assert_equal(repr(np.array([0., -0.])), 'array([ 0., -0.])')
np.set_printoptions(sign=' ')
assert_equal(repr(a), 'array([ 0., 1., 2., 3.])')
assert_equal(repr(np.array(1.)), 'array( 1.)')
assert_equal(repr(b), 'array([ 1.234e+09])')
+ assert_equal(repr(c),
+ "array([ 1. +1.j , 1.12345679+1.12345679j])")
+ assert_equal(repr(np.array([0., -0.])), 'array([ 0., -0.])')
np.set_printoptions(sign='+')
assert_equal(repr(a), 'array([+0., +1., +2., +3.])')
assert_equal(repr(np.array(1.)), 'array(+1.)')
assert_equal(repr(b), 'array([+1.234e+09])')
+ assert_equal(repr(c),
+ "array([+1. +1.j , +1.12345679+1.12345679j])")
np.set_printoptions(legacy='1.13')
assert_equal(repr(a), 'array([ 0., 1., 2., 3.])')
@@ -449,6 +465,10 @@ class TestPrintOptions(object):
assert_equal(repr(-b), 'array([ -1.23400000e+09])')
assert_equal(repr(np.array(1.)), 'array(1.0)')
assert_equal(repr(np.array([0.])), 'array([ 0.])')
+ assert_equal(repr(c),
+ "array([ 1.00000000+1.j , 1.12345679+1.12345679j])")
+ # gh-10383
+ assert_equal(str(np.array([-1., 10])), "[ -1. 10.]")
assert_raises(TypeError, np.set_printoptions, wrongarg=True)
@@ -458,7 +478,7 @@ class TestPrintOptions(object):
repr(np.array([1e4, 0.1], dtype='f2'))
def test_sign_spacing_structured(self):
- a = np.ones(2, dtype='f,f')
+ a = np.ones(2, dtype='<f,<f')
assert_equal(repr(a),
"array([(1., 1.), (1., 1.)], dtype=[('f0', '<f4'), ('f1', '<f4')])")
assert_equal(repr(a[0]), "(1., 1.)")
@@ -472,6 +492,7 @@ class TestPrintOptions(object):
0.0862072768214508, 0.39112753029631175],
dtype=np.float64)
z = np.arange(6, dtype=np.float16)/10
+ c = np.array([1.0 + 1.0j, 1.123456789 + 1.123456789j], dtype='c16')
# also make sure 1e23 is right (is between two fp numbers)
w = np.array(['1e{}'.format(i) for i in range(25)], dtype=np.float64)
@@ -497,6 +518,8 @@ class TestPrintOptions(object):
" 1.e+16, 1.e+17, 1.e+18, 1.e+19, 1.e+20, 1.e+21, 1.e+22, 1.e+23,\n"
" 1.e+24])")
assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])")
+ assert_equal(repr(c),
+ "array([1. +1.j , 1.123456789+1.123456789j])")
# maxprec mode, precision=8
np.set_printoptions(floatmode='maxprec', precision=8)
@@ -511,6 +534,8 @@ class TestPrintOptions(object):
assert_equal(repr(w[::5]),
"array([1.e+00, 1.e+05, 1.e+10, 1.e+15, 1.e+20])")
assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])")
+ assert_equal(repr(c),
+ "array([1. +1.j , 1.12345679+1.12345679j])")
# fixed mode, precision=4
np.set_printoptions(floatmode='fixed', precision=4)
@@ -525,6 +550,8 @@ class TestPrintOptions(object):
"array([1.0000e+00, 1.0000e+05, 1.0000e+10, 1.0000e+15, 1.0000e+20])")
assert_equal(repr(wp), "array([1.2340e+001, 1.0000e+002, 1.0000e+123])")
assert_equal(repr(np.zeros(3)), "array([0.0000, 0.0000, 0.0000])")
+ assert_equal(repr(c),
+ "array([1.0000+1.0000j, 1.1235+1.1235j])")
# for larger precision, representation error becomes more apparent:
np.set_printoptions(floatmode='fixed', precision=8)
assert_equal(repr(z),
@@ -544,6 +571,8 @@ class TestPrintOptions(object):
assert_equal(repr(w[::5]),
"array([1.e+00, 1.e+05, 1.e+10, 1.e+15, 1.e+20])")
assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])")
+ assert_equal(repr(c),
+ "array([1.00000000+1.00000000j, 1.12345679+1.12345679j])")
def test_legacy_mode_scalars(self):
# in legacy mode, str of floats get truncated, and complex scalars
@@ -715,5 +744,37 @@ def test_unicode_object_array():
assert_equal(repr(x), expected)
+class TestContextManager(object):
+ def test_ctx_mgr(self):
+ # test that context manager actuall works
+ with np.printoptions(precision=2):
+ s = str(np.array([2.0]) / 3)
+ assert_equal(s, '[0.67]')
+
+ def test_ctx_mgr_restores(self):
+ # test that print options are actually restrored
+ opts = np.get_printoptions()
+ with np.printoptions(precision=opts['precision'] - 1,
+ linewidth=opts['linewidth'] - 4):
+ pass
+ assert_equal(np.get_printoptions(), opts)
+
+ def test_ctx_mgr_exceptions(self):
+ # test that print options are restored even if an exeption is raised
+ opts = np.get_printoptions()
+ try:
+ with np.printoptions(precision=2, linewidth=11):
+ raise ValueError
+ except ValueError:
+ pass
+ assert_equal(np.get_printoptions(), opts)
+
+ def test_ctx_mgr_as_smth(self):
+ opts = {"precision": 2}
+ with np.printoptions(**opts) as ctx:
+ saved_opts = ctx.copy()
+ assert_equal({k: saved_opts[k] for k in opts}, opts)
+
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py
index b48983e2e..110ae378b 100644
--- a/numpy/core/tests/test_dtype.py
+++ b/numpy/core/tests/test_dtype.py
@@ -40,7 +40,7 @@ class TestBuiltin(object):
assert_(dt.byteorder != dt2.byteorder, "bogus test")
assert_dtype_equal(dt, dt2)
else:
- self.assertTrue(dt.byteorder != dt3.byteorder, "bogus test")
+ assert_(dt.byteorder != dt3.byteorder, "bogus test")
assert_dtype_equal(dt, dt3)
def test_equivalent_dtype_hashing(self):
diff --git a/numpy/core/tests/test_einsum.py b/numpy/core/tests/test_einsum.py
index da83bb8c4..9bd85fdb9 100644
--- a/numpy/core/tests/test_einsum.py
+++ b/numpy/core/tests/test_einsum.py
@@ -481,6 +481,25 @@ class TestEinSum(object):
r = np.arange(4).reshape(2, 2) + 7
assert_equal(np.einsum('z,mz,zm->', p, q, r), 253)
+ # singleton dimensions broadcast (gh-10343)
+ p = np.ones((10,2))
+ q = np.ones((1,2))
+ assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True),
+ np.einsum('ij,ij->j', p, q, optimize=False))
+ assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True),
+ [10.] * 2)
+
+ p = np.ones((1, 5))
+ q = np.ones((5, 5))
+ for optimize in (True, False):
+ assert_array_equal(np.einsum("...ij,...jk->...ik", p, p,
+ optimize=optimize),
+ np.einsum("...ij,...jk->...ik", p, q,
+ optimize=optimize))
+ assert_array_equal(np.einsum("...ij,...jk->...ik", p, q,
+ optimize=optimize),
+ np.full((1, 5), 5))
+
def test_einsum_sums_int8(self):
self.check_einsum_sums('i1')
@@ -538,6 +557,13 @@ class TestEinSum(object):
assert_equal(np.einsum('ij...,j...->i...', a, b), [[[2], [2]]])
assert_equal(np.einsum('ij...,j...->i...', a, b, optimize=True), [[[2], [2]]])
+ # Regression test for issue #10369 (test unicode inputs with Python 2)
+ assert_equal(np.einsum(u'ij...,j...->i...', a, b), [[[2], [2]]])
+ assert_equal(np.einsum('...i,...i', [1, 2, 3], [2, 3, 4]), 20)
+ assert_equal(np.einsum(u'...i,...i', [1, 2, 3], [2, 3, 4]), 20)
+ assert_equal(np.einsum('...i,...i', [1, 2, 3], [2, 3, 4],
+ optimize=u'greedy'), 20)
+
# The iterator had an issue with buffering this reduction
a = np.ones((5, 12, 4, 2, 3), np.int64)
b = np.ones((5, 12, 11), np.int64)
diff --git a/numpy/core/tests/test_mem_overlap.py b/numpy/core/tests/test_mem_overlap.py
index 53d56b5e7..9c17ed210 100644
--- a/numpy/core/tests/test_mem_overlap.py
+++ b/numpy/core/tests/test_mem_overlap.py
@@ -94,7 +94,7 @@ def test_overlapping_assignments():
srcidx = tuple([a[0] for a in ind])
dstidx = tuple([a[1] for a in ind])
- yield _check_assignment, srcidx, dstidx
+ _check_assignment(srcidx, dstidx)
@dec.slow
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index e90d2180a..d65bb17e6 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -1157,9 +1157,11 @@ class TestStructured(object):
def test_multiindex_titles(self):
a = np.zeros(4, dtype=[(('a', 'b'), 'i'), ('c', 'i'), ('d', 'i')])
assert_raises(KeyError, lambda : a[['a','c']])
- assert_raises(KeyError, lambda : a[['b','b']])
+ assert_raises(KeyError, lambda : a[['a','a']])
+ assert_raises(ValueError, lambda : a[['b','b']]) # field exists, but repeated
a[['b','c']] # no exception
+
class TestBool(object):
def test_test_interning(self):
a0 = np.bool_(0)
@@ -2633,6 +2635,10 @@ class TestMethods(object):
assert_equal(a.diagonal(0), [0, 5, 10])
assert_equal(a.diagonal(1), [1, 6, 11])
assert_equal(a.diagonal(-1), [4, 9])
+ assert_raises(np.AxisError, a.diagonal, axis1=0, axis2=5)
+ assert_raises(np.AxisError, a.diagonal, axis1=5, axis2=0)
+ assert_raises(np.AxisError, a.diagonal, axis1=5, axis2=5)
+ assert_raises(ValueError, a.diagonal, axis1=1, axis2=1)
b = np.arange(8).reshape((2, 2, 2))
assert_equal(b.diagonal(), [[0, 6], [1, 7]])
@@ -2646,6 +2652,7 @@ class TestMethods(object):
# Order of axis argument doesn't matter:
assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]])
+
def test_diagonal_view_notwriteable(self):
# this test is only for 1.9, the diagonal view will be
# writeable in 1.10.
@@ -2862,10 +2869,10 @@ class TestMethods(object):
assert_(a.flags['OWNDATA'])
b = a.copy()
# check exceptions
- assert_raises(ValueError, a.swapaxes, -5, 0)
- assert_raises(ValueError, a.swapaxes, 4, 0)
- assert_raises(ValueError, a.swapaxes, 0, -5)
- assert_raises(ValueError, a.swapaxes, 0, 4)
+ assert_raises(np.AxisError, a.swapaxes, -5, 0)
+ assert_raises(np.AxisError, a.swapaxes, 4, 0)
+ assert_raises(np.AxisError, a.swapaxes, 0, -5)
+ assert_raises(np.AxisError, a.swapaxes, 0, 4)
for i in range(-4, 4):
for j in range(-4, 4):
@@ -6454,6 +6461,19 @@ class TestNewBufferProtocol(object):
shape, strides = get_buffer_info(arr, ['C_CONTIGUOUS'])
assert_(strides[-1] == 8)
+ def test_out_of_order_fields(self):
+ dt = np.dtype(dict(
+ formats=['<i4', '<i4'],
+ names=['one', 'two'],
+ offsets=[4, 0],
+ itemsize=8
+ ))
+
+ # overlapping fields cannot be represented by PEP3118
+ arr = np.empty(1, dt)
+ with assert_raises(ValueError):
+ memoryview(arr)
+
class TestArrayAttributeDeletion(object):
diff --git a/numpy/core/tests/test_print.py b/numpy/core/tests/test_print.py
index 4b5c5f81f..6ebb4733c 100644
--- a/numpy/core/tests/test_print.py
+++ b/numpy/core/tests/test_print.py
@@ -40,7 +40,7 @@ def test_float_types():
"""
for t in [np.float32, np.double, np.longdouble]:
- yield check_float_type, t
+ check_float_type(t)
def check_nan_inf_float(tp):
for x in [np.inf, -np.inf, np.nan]:
@@ -56,7 +56,7 @@ def test_nan_inf_float():
"""
for t in [np.float32, np.double, np.longdouble]:
- yield check_nan_inf_float, t
+ check_nan_inf_float(t)
def check_complex_type(tp):
for x in [0, 1, -1, 1e20]:
@@ -84,7 +84,7 @@ def test_complex_types():
"""
for t in [np.complex64, np.cdouble, np.clongdouble]:
- yield check_complex_type, t
+ check_complex_type(t)
def test_complex_inf_nan():
"""Check inf/nan formatting of complex types."""
@@ -108,7 +108,7 @@ def test_complex_inf_nan():
}
for tp in [np.complex64, np.cdouble, np.clongdouble]:
for c, s in TESTS.items():
- yield _check_complex_inf_nan, c, s, tp
+ _check_complex_inf_nan(c, s, tp)
def _check_complex_inf_nan(c, s, dtype):
assert_equal(str(dtype(c)), s)
@@ -164,12 +164,12 @@ def check_complex_type_print(tp):
def test_float_type_print():
"""Check formatting when using print """
for t in [np.float32, np.double, np.longdouble]:
- yield check_float_type_print, t
+ check_float_type_print(t)
def test_complex_type_print():
"""Check formatting when using print """
for t in [np.complex64, np.cdouble, np.clongdouble]:
- yield check_complex_type_print, t
+ check_complex_type_print(t)
def test_scalar_format():
"""Test the str.format method with NumPy scalar types"""
diff --git a/numpy/core/tests/test_records.py b/numpy/core/tests/test_records.py
index 73cfe3570..d5423b1f1 100644
--- a/numpy/core/tests/test_records.py
+++ b/numpy/core/tests/test_records.py
@@ -4,6 +4,7 @@ import sys
import collections
import pickle
import warnings
+import textwrap
from os import path
import numpy as np
@@ -103,7 +104,7 @@ class TestFromrecords(object):
def test_recarray_repr(self):
a = np.array([(1, 0.1), (2, 0.2)],
- dtype=[('foo', int), ('bar', float)])
+ dtype=[('foo', '<i4'), ('bar', '<f8')])
a = np.rec.array(a)
assert_equal(
repr(a),
@@ -112,6 +113,31 @@ class TestFromrecords(object):
dtype=[('foo', '<i4'), ('bar', '<f8')])""")
)
+ # make sure non-structured dtypes also show up as rec.array
+ a = np.array(np.ones(4, dtype='f8'))
+ assert_(repr(np.rec.array(a)).startswith('rec.array'))
+
+ # check that the 'np.record' part of the dtype isn't shown
+ a = np.rec.array(np.ones(3, dtype='i4,i4'))
+ assert_equal(repr(a).find('numpy.record'), -1)
+ a = np.rec.array(np.ones(3, dtype='i4'))
+ assert_(repr(a).find('dtype=int32') != -1)
+
+ def test_0d_recarray_repr(self):
+ arr_0d = np.rec.array((1, 2.0, '2003'), dtype='<i4,<f8,<M8[Y]')
+ assert_equal(repr(arr_0d), textwrap.dedent("""\
+ rec.array((1, 2., '2003'),
+ dtype=[('f0', '<i4'), ('f1', '<f8'), ('f2', '<M8[Y]')])"""))
+
+ record = arr_0d[()]
+ assert_equal(repr(record), "(1, 2., '2003')")
+ # 1.13 converted to python scalars before the repr
+ try:
+ np.set_printoptions(legacy='1.13')
+ assert_equal(repr(record), '(1, 2.0, datetime.date(2003, 1, 1))')
+ finally:
+ np.set_printoptions(legacy=False)
+
def test_recarray_from_repr(self):
a = np.array([(1,'ABC'), (2, "DEF")],
dtype=[('foo', int), ('bar', 'S4')])
@@ -197,17 +223,6 @@ class TestFromrecords(object):
assert_equal(arr2.dtype.type, arr.dtype.type)
assert_equal(type(arr2), type(arr))
- def test_recarray_repr(self):
- # make sure non-structured dtypes also show up as rec.array
- a = np.array(np.ones(4, dtype='f8'))
- assert_(repr(np.rec.array(a)).startswith('rec.array'))
-
- # check that the 'np.record' part of the dtype isn't shown
- a = np.rec.array(np.ones(3, dtype='i4,i4'))
- assert_equal(repr(a).find('numpy.record'), -1)
- a = np.rec.array(np.ones(3, dtype='i4'))
- assert_(repr(a).find('dtype=int32') != -1)
-
def test_recarray_from_names(self):
ra = np.rec.array([
(1, 'abc', 3.7000002861022949, 0),
@@ -340,6 +355,19 @@ class TestRecord(object):
with assert_raises(ValueError):
r.setfield([2,3], *r.dtype.fields['f'])
+ def test_out_of_order_fields(self):
+ # names in the same order, padding added to descr
+ x = self.data[['col1', 'col2']]
+ assert_equal(x.dtype.names, ('col1', 'col2'))
+ assert_equal(x.dtype.descr,
+ [('col1', '<i4'), ('col2', '<i4'), ('', '|V4')])
+
+ # names change order to match indexing, as of 1.14 - descr can't
+ # represent that
+ y = self.data[['col2', 'col1']]
+ assert_equal(y.dtype.names, ('col2', 'col1'))
+ assert_raises(ValueError, lambda: y.dtype.descr)
+
def test_pickle_1(self):
# Issue #1529
a = np.array([(1, [])], dtype=[('a', np.int32), ('b', np.int32, 0)])
diff --git a/numpy/core/tests/test_scalarmath.py b/numpy/core/tests/test_scalarmath.py
index 50824da41..7d0be9cf7 100644
--- a/numpy/core/tests/test_scalarmath.py
+++ b/numpy/core/tests/test_scalarmath.py
@@ -4,6 +4,7 @@ import sys
import warnings
import itertools
import operator
+import platform
import numpy as np
from numpy.testing import (
@@ -420,6 +421,7 @@ class TestConversion(object):
assert_raises(OverflowError, x.__int__)
assert_equal(len(sup.log), 1)
+ @dec.knownfailureif(platform.machine().startswith("ppc64"))
@dec.skipif(np.finfo(np.double) == np.finfo(np.longdouble))
def test_int_from_huge_longdouble(self):
# Produce a longdouble that would overflow a double,
@@ -538,7 +540,7 @@ class TestRepr(object):
# long double test cannot work, because eval goes through a python
# float
for t in [np.float32, np.float64]:
- yield self._test_type_repr, t
+ self._test_type_repr(t)
if not IS_PYPY:
diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py
index 93764e7b7..ac97b8b0d 100644
--- a/numpy/core/tests/test_umath.py
+++ b/numpy/core/tests/test_umath.py
@@ -2335,53 +2335,53 @@ class TestComplexFunctions(object):
def test_branch_cuts(self):
# check branch cuts and continuity on them
- yield _check_branch_cut, np.log, -0.5, 1j, 1, -1, True
- yield _check_branch_cut, np.log2, -0.5, 1j, 1, -1, True
- yield _check_branch_cut, np.log10, -0.5, 1j, 1, -1, True
- yield _check_branch_cut, np.log1p, -1.5, 1j, 1, -1, True
- yield _check_branch_cut, np.sqrt, -0.5, 1j, 1, -1, True
+ _check_branch_cut(np.log, -0.5, 1j, 1, -1, True)
+ _check_branch_cut(np.log2, -0.5, 1j, 1, -1, True)
+ _check_branch_cut(np.log10, -0.5, 1j, 1, -1, True)
+ _check_branch_cut(np.log1p, -1.5, 1j, 1, -1, True)
+ _check_branch_cut(np.sqrt, -0.5, 1j, 1, -1, True)
- yield _check_branch_cut, np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True
- yield _check_branch_cut, np.arccos, [ -2, 2], [1j, 1j], 1, -1, True
- yield _check_branch_cut, np.arctan, [0-2j, 2j], [1, 1], -1, 1, True
+ _check_branch_cut(np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True)
+ _check_branch_cut(np.arccos, [ -2, 2], [1j, 1j], 1, -1, True)
+ _check_branch_cut(np.arctan, [0-2j, 2j], [1, 1], -1, 1, True)
- yield _check_branch_cut, np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True
- yield _check_branch_cut, np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True
- yield _check_branch_cut, np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True
+ _check_branch_cut(np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True)
+ _check_branch_cut(np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True)
+ _check_branch_cut(np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True)
# check against bogus branch cuts: assert continuity between quadrants
- yield _check_branch_cut, np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1
- yield _check_branch_cut, np.arccos, [0-2j, 2j], [ 1, 1], 1, 1
- yield _check_branch_cut, np.arctan, [ -2, 2], [1j, 1j], 1, 1
+ _check_branch_cut(np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1)
+ _check_branch_cut(np.arccos, [0-2j, 2j], [ 1, 1], 1, 1)
+ _check_branch_cut(np.arctan, [ -2, 2], [1j, 1j], 1, 1)
- yield _check_branch_cut, np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1
- yield _check_branch_cut, np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1
- yield _check_branch_cut, np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1
+ _check_branch_cut(np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1)
+ _check_branch_cut(np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1)
+ _check_branch_cut(np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1)
def test_branch_cuts_complex64(self):
# check branch cuts and continuity on them
- yield _check_branch_cut, np.log, -0.5, 1j, 1, -1, True, np.complex64
- yield _check_branch_cut, np.log2, -0.5, 1j, 1, -1, True, np.complex64
- yield _check_branch_cut, np.log10, -0.5, 1j, 1, -1, True, np.complex64
- yield _check_branch_cut, np.log1p, -1.5, 1j, 1, -1, True, np.complex64
- yield _check_branch_cut, np.sqrt, -0.5, 1j, 1, -1, True, np.complex64
+ _check_branch_cut(np.log, -0.5, 1j, 1, -1, True, np.complex64)
+ _check_branch_cut(np.log2, -0.5, 1j, 1, -1, True, np.complex64)
+ _check_branch_cut(np.log10, -0.5, 1j, 1, -1, True, np.complex64)
+ _check_branch_cut(np.log1p, -1.5, 1j, 1, -1, True, np.complex64)
+ _check_branch_cut(np.sqrt, -0.5, 1j, 1, -1, True, np.complex64)
- yield _check_branch_cut, np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64
- yield _check_branch_cut, np.arccos, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64
- yield _check_branch_cut, np.arctan, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64
+ _check_branch_cut(np.arcsin, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64)
+ _check_branch_cut(np.arccos, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64)
+ _check_branch_cut(np.arctan, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64)
- yield _check_branch_cut, np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64
- yield _check_branch_cut, np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True, np.complex64
- yield _check_branch_cut, np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64
+ _check_branch_cut(np.arcsinh, [0-2j, 2j], [1, 1], -1, 1, True, np.complex64)
+ _check_branch_cut(np.arccosh, [ -1, 0.5], [1j, 1j], 1, -1, True, np.complex64)
+ _check_branch_cut(np.arctanh, [ -2, 2], [1j, 1j], 1, -1, True, np.complex64)
# check against bogus branch cuts: assert continuity between quadrants
- yield _check_branch_cut, np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64
- yield _check_branch_cut, np.arccos, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64
- yield _check_branch_cut, np.arctan, [ -2, 2], [1j, 1j], 1, 1, False, np.complex64
+ _check_branch_cut(np.arcsin, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64)
+ _check_branch_cut(np.arccos, [0-2j, 2j], [ 1, 1], 1, 1, False, np.complex64)
+ _check_branch_cut(np.arctan, [ -2, 2], [1j, 1j], 1, 1, False, np.complex64)
- yield _check_branch_cut, np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1, False, np.complex64
- yield _check_branch_cut, np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1, False, np.complex64
- yield _check_branch_cut, np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1, False, np.complex64
+ _check_branch_cut(np.arcsinh, [ -2, 2, 0], [1j, 1j, 1], 1, 1, False, np.complex64)
+ _check_branch_cut(np.arccosh, [0-2j, 2j, 2], [1, 1, 1j], 1, 1, False, np.complex64)
+ _check_branch_cut(np.arctanh, [0-2j, 2j, 0], [1, 1, 1j], 1, 1, False, np.complex64)
def test_against_cmath(self):
import cmath
@@ -2489,7 +2489,7 @@ class TestComplexFunctions(object):
def test_loss_of_precision(self):
for dtype in [np.complex64, np.complex_]:
- yield self.check_loss_of_precision, dtype
+ self.check_loss_of_precision(dtype)
@dec.knownfailureif(is_longdouble_finfo_bogus(), "Bogus long double finfo")
def test_loss_of_precision_longcomplex(self):
diff --git a/numpy/distutils/command/config.py b/numpy/distutils/command/config.py
index a7368a7ae..66d4ed58d 100644
--- a/numpy/distutils/command/config.py
+++ b/numpy/distutils/command/config.py
@@ -359,7 +359,7 @@ int main (void)
decl : dict
for every (key, value), the declaration in the value will be
used for function in key. If a function is not in the
- dictionay, no declaration will be used.
+ dictionary, no declaration will be used.
call : dict
for every item (f, value), if the value is True, a call will be
done to the function f.
diff --git a/numpy/distutils/conv_template.py b/numpy/distutils/conv_template.py
index a42611051..4a8746236 100644
--- a/numpy/distutils/conv_template.py
+++ b/numpy/distutils/conv_template.py
@@ -315,8 +315,7 @@ def unique_key(adict):
return newkey
-if __name__ == "__main__":
-
+def main():
try:
file = sys.argv[1]
except IndexError:
@@ -335,3 +334,6 @@ if __name__ == "__main__":
e = get_exception()
raise ValueError("In %s loop at %s" % (file, e))
outfile.write(writestr)
+
+if __name__ == "__main__":
+ main()
diff --git a/numpy/distutils/from_template.py b/numpy/distutils/from_template.py
index b19c7cc0b..8f587eab9 100644
--- a/numpy/distutils/from_template.py
+++ b/numpy/distutils/from_template.py
@@ -238,8 +238,7 @@ _special_names = find_repl_patterns('''
<ctypereal=float,double,\\0,\\1>
''')
-if __name__ == "__main__":
-
+def main():
try:
file = sys.argv[1]
except IndexError:
@@ -254,3 +253,6 @@ if __name__ == "__main__":
allstr = fid.read()
writestr = process_str(allstr)
outfile.write(writestr)
+
+if __name__ == "__main__":
+ main()
diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py
index 38edf4691..1d08942f6 100644
--- a/numpy/distutils/misc_util.py
+++ b/numpy/distutils/misc_util.py
@@ -2304,7 +2304,8 @@ if os.path.isdir(extra_dll_dir) and sys.platform == 'win32':
windll.kernel32.SetDefaultDllDirectories(0x1000)
except AttributeError:
def _AddDllDirectory(dll_directory):
- os.environ["PATH"] += os.pathsep + dll_directory
+ os.environ.setdefault('PATH', '')
+ os.environ['PATH'] += os.pathsep + dll_directory
_AddDllDirectory(extra_dll_dir)
diff --git a/numpy/doc/subclassing.py b/numpy/doc/subclassing.py
index c34278868..467e31cea 100644
--- a/numpy/doc/subclassing.py
+++ b/numpy/doc/subclassing.py
@@ -441,7 +441,7 @@ The signature of ``__array_ufunc__`` is::
function. This includes any ``out`` arguments, which are always
contained in a tuple.
-A typical implementation would convert any inputs or ouputs that are
+A typical implementation would convert any inputs or outputs that are
instances of one's own class, pass everything on to a superclass using
``super()``, and finally return the results after possible
back-conversion. An example, taken from the test case
diff --git a/numpy/f2py/tests/test_array_from_pyobj.py b/numpy/f2py/tests/test_array_from_pyobj.py
index 776ec3471..cd6149c9a 100644
--- a/numpy/f2py/tests/test_array_from_pyobj.py
+++ b/numpy/f2py/tests/test_array_from_pyobj.py
@@ -141,7 +141,7 @@ class Type(object):
dtype0 = name
name = None
for n, i in typeinfo.items():
- if isinstance(i, tuple) and dtype0.type is i[-1]:
+ if not isinstance(i, type) and dtype0.type is i.type:
name = n
break
obj = cls._type_cache.get(name.upper(), None)
@@ -154,11 +154,12 @@ class Type(object):
def _init(self, name):
self.NAME = name.upper()
+ info = typeinfo[self.NAME]
self.type_num = getattr(wrap, 'NPY_' + self.NAME)
- assert_equal(self.type_num, typeinfo[self.NAME][1])
- self.dtype = typeinfo[self.NAME][-1]
- self.elsize = typeinfo[self.NAME][2] / 8
- self.dtypechar = typeinfo[self.NAME][0]
+ assert_equal(self.type_num, info.num)
+ self.dtype = info.type
+ self.elsize = info.bits / 8
+ self.dtypechar = info.char
def cast_types(self):
return [self.__class__(_m) for _m in _cast_dict[self.NAME]]
@@ -167,28 +168,28 @@ class Type(object):
return [self.__class__(_m) for _m in _type_names]
def smaller_types(self):
- bits = typeinfo[self.NAME][3]
+ bits = typeinfo[self.NAME].alignment
types = []
for name in _type_names:
- if typeinfo[name][3] < bits:
+ if typeinfo[name].alignment < bits:
types.append(Type(name))
return types
def equal_types(self):
- bits = typeinfo[self.NAME][3]
+ bits = typeinfo[self.NAME].alignment
types = []
for name in _type_names:
if name == self.NAME:
continue
- if typeinfo[name][3] == bits:
+ if typeinfo[name].alignment == bits:
types.append(Type(name))
return types
def larger_types(self):
- bits = typeinfo[self.NAME][3]
+ bits = typeinfo[self.NAME].alignment
types = []
for name in _type_names:
- if typeinfo[name][3] > bits:
+ if typeinfo[name].alignment > bits:
types.append(Type(name))
return types
diff --git a/numpy/fft/helper.py b/numpy/fft/helper.py
index 0856d6759..1a1266e12 100644
--- a/numpy/fft/helper.py
+++ b/numpy/fft/helper.py
@@ -6,11 +6,8 @@ from __future__ import division, absolute_import, print_function
import collections
import threading
-
from numpy.compat import integer_types
-from numpy.core import (
- asarray, concatenate, arange, take, integer, empty
- )
+from numpy.core import integer, empty, arange, asarray, roll
# Created by Pearu Peterson, September 2002
@@ -63,19 +60,16 @@ def fftshift(x, axes=None):
[-1., -3., -2.]])
"""
- tmp = asarray(x)
- ndim = tmp.ndim
+ x = asarray(x)
if axes is None:
- axes = list(range(ndim))
+ axes = tuple(range(x.ndim))
+ shift = [dim // 2 for dim in x.shape]
elif isinstance(axes, integer_types):
- axes = (axes,)
- y = tmp
- for k in axes:
- n = tmp.shape[k]
- p2 = (n+1)//2
- mylist = concatenate((arange(p2, n), arange(p2)))
- y = take(y, mylist, k)
- return y
+ shift = x.shape[axes] // 2
+ else:
+ shift = [x.shape[ax] // 2 for ax in axes]
+
+ return roll(x, shift, axes)
def ifftshift(x, axes=None):
@@ -112,19 +106,16 @@ def ifftshift(x, axes=None):
[-3., -2., -1.]])
"""
- tmp = asarray(x)
- ndim = tmp.ndim
+ x = asarray(x)
if axes is None:
- axes = list(range(ndim))
+ axes = tuple(range(x.ndim))
+ shift = [-(dim // 2) for dim in x.shape]
elif isinstance(axes, integer_types):
- axes = (axes,)
- y = tmp
- for k in axes:
- n = tmp.shape[k]
- p2 = n-(n+1)//2
- mylist = concatenate((arange(p2, n), arange(p2)))
- y = take(y, mylist, k)
- return y
+ shift = -(x.shape[axes] // 2)
+ else:
+ shift = [-(x.shape[ax] // 2) for ax in axes]
+
+ return roll(x, shift, axes)
def fftfreq(n, d=1.0):
diff --git a/numpy/fft/tests/test_helper.py b/numpy/fft/tests/test_helper.py
index f02edf7cc..4a19b8c60 100644
--- a/numpy/fft/tests/test_helper.py
+++ b/numpy/fft/tests/test_helper.py
@@ -4,13 +4,9 @@ Copied from fftpack.helper by Pearu Peterson, October 2005
"""
from __future__ import division, absolute_import, print_function
-
import numpy as np
-from numpy.testing import (
- run_module_suite, assert_array_almost_equal, assert_equal,
- )
-from numpy import fft
-from numpy import pi
+from numpy.testing import run_module_suite, assert_array_almost_equal, assert_equal
+from numpy import fft, pi
from numpy.fft.helper import _FFTCache
@@ -36,10 +32,108 @@ class TestFFTShift(object):
shifted = [[-1, -3, -2], [2, 0, 1], [-4, 3, 4]]
assert_array_almost_equal(fft.fftshift(freqs, axes=(0, 1)), shifted)
assert_array_almost_equal(fft.fftshift(freqs, axes=0),
- fft.fftshift(freqs, axes=(0,)))
+ fft.fftshift(freqs, axes=(0,)))
assert_array_almost_equal(fft.ifftshift(shifted, axes=(0, 1)), freqs)
assert_array_almost_equal(fft.ifftshift(shifted, axes=0),
- fft.ifftshift(shifted, axes=(0,)))
+ fft.ifftshift(shifted, axes=(0,)))
+
+ assert_array_almost_equal(fft.fftshift(freqs), shifted)
+ assert_array_almost_equal(fft.ifftshift(shifted), freqs)
+
+ def test_uneven_dims(self):
+ """ Test 2D input, which has uneven dimension sizes """
+ freqs = [
+ [0, 1],
+ [2, 3],
+ [4, 5]
+ ]
+
+ # shift in dimension 0
+ shift_dim0 = [
+ [4, 5],
+ [0, 1],
+ [2, 3]
+ ]
+ assert_array_almost_equal(fft.fftshift(freqs, axes=0), shift_dim0)
+ assert_array_almost_equal(fft.ifftshift(shift_dim0, axes=0), freqs)
+ assert_array_almost_equal(fft.fftshift(freqs, axes=(0,)), shift_dim0)
+ assert_array_almost_equal(fft.ifftshift(shift_dim0, axes=[0]), freqs)
+
+ # shift in dimension 1
+ shift_dim1 = [
+ [1, 0],
+ [3, 2],
+ [5, 4]
+ ]
+ assert_array_almost_equal(fft.fftshift(freqs, axes=1), shift_dim1)
+ assert_array_almost_equal(fft.ifftshift(shift_dim1, axes=1), freqs)
+
+ # shift in both dimensions
+ shift_dim_both = [
+ [5, 4],
+ [1, 0],
+ [3, 2]
+ ]
+ assert_array_almost_equal(fft.fftshift(freqs, axes=(0, 1)), shift_dim_both)
+ assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=(0, 1)), freqs)
+ assert_array_almost_equal(fft.fftshift(freqs, axes=[0, 1]), shift_dim_both)
+ assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=[0, 1]), freqs)
+
+ # axes=None (default) shift in all dimensions
+ assert_array_almost_equal(fft.fftshift(freqs, axes=None), shift_dim_both)
+ assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=None), freqs)
+ assert_array_almost_equal(fft.fftshift(freqs), shift_dim_both)
+ assert_array_almost_equal(fft.ifftshift(shift_dim_both), freqs)
+
+ def test_equal_to_original(self):
+ """ Test that the new (>=v1.15) implementation (see #10073) is equal to the original (<=v1.14) """
+ from numpy.compat import integer_types
+ from numpy.core import asarray, concatenate, arange, take
+
+ def original_fftshift(x, axes=None):
+ """ How fftshift was implemented in v1.14"""
+ tmp = asarray(x)
+ ndim = tmp.ndim
+ if axes is None:
+ axes = list(range(ndim))
+ elif isinstance(axes, integer_types):
+ axes = (axes,)
+ y = tmp
+ for k in axes:
+ n = tmp.shape[k]
+ p2 = (n + 1) // 2
+ mylist = concatenate((arange(p2, n), arange(p2)))
+ y = take(y, mylist, k)
+ return y
+
+ def original_ifftshift(x, axes=None):
+ """ How ifftshift was implemented in v1.14 """
+ tmp = asarray(x)
+ ndim = tmp.ndim
+ if axes is None:
+ axes = list(range(ndim))
+ elif isinstance(axes, integer_types):
+ axes = (axes,)
+ y = tmp
+ for k in axes:
+ n = tmp.shape[k]
+ p2 = n - (n + 1) // 2
+ mylist = concatenate((arange(p2, n), arange(p2)))
+ y = take(y, mylist, k)
+ return y
+
+ # create possible 2d array combinations and try all possible keywords
+ # compare output to original functions
+ for i in range(16):
+ for j in range(16):
+ for axes_keyword in [0, 1, None, (0,), (0, 1)]:
+ inp = np.random.rand(i, j)
+
+ assert_array_almost_equal(fft.fftshift(inp, axes_keyword),
+ original_fftshift(inp, axes_keyword))
+
+ assert_array_almost_equal(fft.ifftshift(inp, axes_keyword),
+ original_ifftshift(inp, axes_keyword))
class TestFFTFreq(object):
diff --git a/numpy/lib/arraypad.py b/numpy/lib/arraypad.py
index 153b4af65..cdc354a02 100644
--- a/numpy/lib/arraypad.py
+++ b/numpy/lib/arraypad.py
@@ -1186,7 +1186,7 @@ def pad(array, pad_width, mode, **kwargs):
reflect_type : {'even', 'odd'}, optional
Used in 'reflect', and 'symmetric'. The 'even' style is the
default with an unaltered reflection around the edge value. For
- the 'odd' style, the extented part of the array is created by
+ the 'odd' style, the extended part of the array is created by
subtracting the reflected values from two times the edge value.
Returns
diff --git a/numpy/lib/arraysetops.py b/numpy/lib/arraysetops.py
index a9426cdf3..7b103ef3e 100644
--- a/numpy/lib/arraysetops.py
+++ b/numpy/lib/arraysetops.py
@@ -110,16 +110,25 @@ def ediff1d(ary, to_end=None, to_begin=None):
return result
+def _unpack_tuple(x):
+ """ Unpacks one-element tuples for use as return values """
+ if len(x) == 1:
+ return x[0]
+ else:
+ return x
+
+
def unique(ar, return_index=False, return_inverse=False,
return_counts=False, axis=None):
"""
Find the unique elements of an array.
Returns the sorted unique elements of an array. There are three optional
- outputs in addition to the unique elements: the indices of the input array
- that give the unique values, the indices of the unique array that
- reconstruct the input array, and the number of times each unique value
- comes up in the input array.
+ outputs in addition to the unique elements:
+
+ * the indices of the input array that give the unique values
+ * the indices of the unique array that reconstruct the input array
+ * the number of times each unique value comes up in the input array
Parameters
----------
@@ -135,13 +144,16 @@ def unique(ar, return_index=False, return_inverse=False,
return_counts : bool, optional
If True, also return the number of times each unique item appears
in `ar`.
+
.. versionadded:: 1.9.0
+
axis : int or None, optional
The axis to operate on. If None, `ar` will be flattened beforehand.
Otherwise, duplicate items will be removed along the provided axis,
with all the other axes belonging to the each of the unique elements.
Object arrays or structured arrays that contain objects are not
supported if the `axis` kwarg is used.
+
.. versionadded:: 1.13.0
@@ -159,6 +171,7 @@ def unique(ar, return_index=False, return_inverse=False,
unique_counts : ndarray, optional
The number of times each of the unique values comes up in the
original array. Only provided if `return_counts` is True.
+
.. versionadded:: 1.9.0
See Also
@@ -207,11 +220,15 @@ def unique(ar, return_index=False, return_inverse=False,
"""
ar = np.asanyarray(ar)
if axis is None:
- return _unique1d(ar, return_index, return_inverse, return_counts)
- if not (-ar.ndim <= axis < ar.ndim):
- raise ValueError('Invalid axis kwarg specified for unique')
+ ret = _unique1d(ar, return_index, return_inverse, return_counts)
+ return _unpack_tuple(ret)
+
+ try:
+ ar = np.swapaxes(ar, axis, 0)
+ except np.AxisError:
+ # this removes the "axis1" or "axis2" prefix from the error message
+ raise np.AxisError(axis, ar.ndim)
- ar = np.swapaxes(ar, axis, 0)
orig_shape, orig_dtype = ar.shape, ar.dtype
# Must reshape to a contiguous 2D array for this to work...
ar = ar.reshape(orig_shape[0], -1)
@@ -241,11 +258,9 @@ def unique(ar, return_index=False, return_inverse=False,
output = _unique1d(consolidated, return_index,
return_inverse, return_counts)
- if not (return_index or return_inverse or return_counts):
- return reshape_uniq(output)
- else:
- uniq = reshape_uniq(output[0])
- return (uniq,) + output[1:]
+ output = (reshape_uniq(output[0]),) + output[1:]
+ return _unpack_tuple(output)
+
def _unique1d(ar, return_index=False, return_inverse=False,
return_counts=False):
@@ -255,20 +270,6 @@ def _unique1d(ar, return_index=False, return_inverse=False,
ar = np.asanyarray(ar).flatten()
optional_indices = return_index or return_inverse
- optional_returns = optional_indices or return_counts
-
- if ar.size == 0:
- if not optional_returns:
- ret = ar
- else:
- ret = (ar,)
- if return_index:
- ret += (np.empty(0, np.intp),)
- if return_inverse:
- ret += (np.empty(0, np.intp),)
- if return_counts:
- ret += (np.empty(0, np.intp),)
- return ret
if optional_indices:
perm = ar.argsort(kind='mergesort' if return_index else 'quicksort')
@@ -276,24 +277,24 @@ def _unique1d(ar, return_index=False, return_inverse=False,
else:
ar.sort()
aux = ar
- flag = np.concatenate(([True], aux[1:] != aux[:-1]))
-
- if not optional_returns:
- ret = aux[flag]
- else:
- ret = (aux[flag],)
- if return_index:
- ret += (perm[flag],)
- if return_inverse:
- iflag = np.cumsum(flag) - 1
- inv_idx = np.empty(ar.shape, dtype=np.intp)
- inv_idx[perm] = iflag
- ret += (inv_idx,)
- if return_counts:
- idx = np.concatenate(np.nonzero(flag) + ([ar.size],))
- ret += (np.diff(idx),)
+ mask = np.empty(aux.shape, dtype=np.bool_)
+ mask[:1] = True
+ mask[1:] = aux[1:] != aux[:-1]
+
+ ret = (aux[mask],)
+ if return_index:
+ ret += (perm[mask],)
+ if return_inverse:
+ imask = np.cumsum(mask) - 1
+ inv_idx = np.empty(mask.shape, dtype=np.intp)
+ inv_idx[perm] = imask
+ ret += (inv_idx,)
+ if return_counts:
+ idx = np.concatenate(np.nonzero(mask) + ([mask.size],))
+ ret += (np.diff(idx),)
return ret
+
def intersect1d(ar1, ar2, assume_unique=False):
"""
Find the intersection of two arrays.
@@ -614,7 +615,7 @@ def union1d(ar1, ar2):
>>> reduce(np.union1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2]))
array([1, 2, 3, 4, 6])
"""
- return unique(np.concatenate((ar1, ar2)))
+ return unique(np.concatenate((ar1, ar2), axis=None))
def setdiff1d(ar1, ar2, assume_unique=False):
"""
diff --git a/numpy/lib/format.py b/numpy/lib/format.py
index 84af2afc8..363bb2101 100644
--- a/numpy/lib/format.py
+++ b/numpy/lib/format.py
@@ -454,7 +454,9 @@ def _filter_header(s):
tokens = []
last_token_was_number = False
- for token in tokenize.generate_tokens(StringIO(asstr(s)).read):
+ # adding newline as python 2.7.5 workaround
+ string = asstr(s) + "\n"
+ for token in tokenize.generate_tokens(StringIO(string).readline):
token_type = token[0]
token_string = token[1]
if (last_token_was_number and
@@ -464,7 +466,8 @@ def _filter_header(s):
else:
tokens.append(token)
last_token_was_number = (token_type == tokenize.NUMBER)
- return tokenize.untokenize(tokens)
+ # removing newline (see above) as python 2.7.5 workaround
+ return tokenize.untokenize(tokens)[:-1]
def _read_array_header(fp, version):
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index 979bc13d0..391c47a06 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -1236,7 +1236,8 @@ def interp(x, xp, fp, left=None, right=None, period=None):
>>> np.interp(x, xp, fp, period=360)
array([7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75])
- Complex interpolation
+ Complex interpolation:
+
>>> x = [1.5, 4.0]
>>> xp = [2,3,5]
>>> fp = [1.0j, 0, 2+3j]
@@ -3528,7 +3529,7 @@ def _quantile_ureduce_func(a, q, axis=None, out=None, overwrite_input=False,
else:
zerod = False
- # prepare a for partioning
+ # prepare a for partitioning
if overwrite_input:
if axis is None:
ap = a.ravel()
diff --git a/numpy/lib/nanfunctions.py b/numpy/lib/nanfunctions.py
index 4b2c9d817..16e363d7c 100644
--- a/numpy/lib/nanfunctions.py
+++ b/numpy/lib/nanfunctions.py
@@ -198,8 +198,8 @@ def nanmin(a, axis=None, out=None, keepdims=np._NoValue):
a : array_like
Array containing numbers whose minimum is desired. If `a` is not an
array, a conversion is attempted.
- axis : int, optional
- Axis along which the minimum is computed. The default is to compute
+ axis : {int, tuple of int, None}, optional
+ Axis or axes along which the minimum is computed. The default is to compute
the minimum of the flattened array.
out : ndarray, optional
Alternate output array in which to place the result. The default
@@ -306,8 +306,8 @@ def nanmax(a, axis=None, out=None, keepdims=np._NoValue):
a : array_like
Array containing numbers whose maximum is desired. If `a` is not an
array, a conversion is attempted.
- axis : int, optional
- Axis along which the maximum is computed. The default is to compute
+ axis : {int, tuple of int, None}, optional
+ Axis or axes along which the maximum is computed. The default is to compute
the maximum of the flattened array.
out : ndarray, optional
Alternate output array in which to place the result. The default
@@ -505,8 +505,8 @@ def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
a : array_like
Array containing numbers whose sum is desired. If `a` is not an
array, a conversion is attempted.
- axis : int, optional
- Axis along which the sum is computed. The default is to compute the
+ axis : {int, tuple of int, None}, optional
+ Axis or axes along which the sum is computed. The default is to compute the
sum of the flattened array.
dtype : data-type, optional
The type of the returned array and of the accumulator in which the
@@ -596,8 +596,8 @@ def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
a : array_like
Array containing numbers whose product is desired. If `a` is not an
array, a conversion is attempted.
- axis : int, optional
- Axis along which the product is computed. The default is to compute
+ axis : {int, tuple of int, None}, optional
+ Axis or axes along which the product is computed. The default is to compute
the product of the flattened array.
dtype : data-type, optional
The type of the returned array and of the accumulator in which the
@@ -791,8 +791,8 @@ def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
a : array_like
Array containing numbers whose mean is desired. If `a` is not an
array, a conversion is attempted.
- axis : int, optional
- Axis along which the means are computed. The default is to compute
+ axis : {int, tuple of int, None}, optional
+ Axis or axes along which the means are computed. The default is to compute
the mean of the flattened array.
dtype : data-type, optional
Type to use in computing the mean. For integer inputs, the default
@@ -1217,8 +1217,8 @@ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
a : array_like
Array containing numbers whose variance is desired. If `a` is not an
array, a conversion is attempted.
- axis : int, optional
- Axis along which the variance is computed. The default is to compute
+ axis : {int, tuple of int, None}, optional
+ Axis or axes along which the variance is computed. The default is to compute
the variance of the flattened array.
dtype : data-type, optional
Type to use in computing the variance. For arrays of integer type
@@ -1359,8 +1359,8 @@ def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
----------
a : array_like
Calculate the standard deviation of the non-NaN values.
- axis : int, optional
- Axis along which the standard deviation is computed. The default is
+ axis : {int, tuple of int, None}, optional
+ Axis or axes along which the standard deviation is computed. The default is
to compute the standard deviation of the flattened array.
dtype : dtype, optional
Type to use in computing the standard deviation. For arrays of
diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py
index 9ee0aaaae..096f1a3a4 100644
--- a/numpy/lib/npyio.py
+++ b/numpy/lib/npyio.py
@@ -797,22 +797,23 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
The string used to separate values. For backwards compatibility, byte
strings will be decoded as 'latin1'. The default is whitespace.
converters : dict, optional
- A dictionary mapping column number to a function that will convert
- that column to a float. E.g., if column 0 is a date string:
- ``converters = {0: datestr2num}``. Converters can also be used to
- provide a default value for missing data (but see also `genfromtxt`):
- ``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None.
+ A dictionary mapping column number to a function that will parse the
+ column string into the desired value. E.g., if column 0 is a date
+ string: ``converters = {0: datestr2num}``. Converters can also be
+ used to provide a default value for missing data (but see also
+ `genfromtxt`): ``converters = {3: lambda s: float(s.strip() or 0)}``.
+ Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
usecols : int or sequence, optional
Which columns to read, with 0 being the first. For example,
- usecols = (1,4,5) will extract the 2nd, 5th and 6th columns.
+ ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
.. versionchanged:: 1.11.0
When a single column has to be read it is possible to use
an integer instead of a tuple. E.g ``usecols = 3`` reads the
- fourth column the same way as `usecols = (3,)`` would.
+ fourth column the same way as ``usecols = (3,)`` would.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. When used with a structured
@@ -827,7 +828,7 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
Encoding used to decode the inputfile. Does not apply to input streams.
The special value 'bytes' enables backward compatibility workarounds
that ensures you receive byte arrays as results if possible and passes
- latin1 encoded strings to converters. Override this value to receive
+ 'latin1' encoded strings to converters. Override this value to receive
unicode arrays and pass strings as input to converters. If set to None
the system default is used. The default value is 'bytes'.
@@ -2049,7 +2050,6 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
strcolidx = [i for (i, v) in enumerate(column_types)
if v == np.unicode_]
- type_str = np.unicode_
if byte_converters and strcolidx:
# convert strings back to bytes for backward compatibility
warnings.warn(
@@ -2065,33 +2065,37 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
try:
data = [encode_unicode_cols(r) for r in data]
- type_str = np.bytes_
except UnicodeEncodeError:
pass
+ else:
+ for i in strcolidx:
+ column_types[i] = np.bytes_
+ # Update string types to be the right length
+ sized_column_types = column_types[:]
+ for i, col_type in enumerate(column_types):
+ if np.issubdtype(col_type, np.character):
+ n_chars = max(len(row[i]) for row in data)
+ sized_column_types[i] = (col_type, n_chars)
- # ... and take the largest number of chars.
- for i in strcolidx:
- max_line_length = max(len(row[i]) for row in data)
- column_types[i] = np.dtype((type_str, max_line_length))
- #
if names is None:
- # If the dtype is uniform, don't define names, else use ''
- base = set([c.type for c in converters if c._checked])
+ # If the dtype is uniform (before sizing strings)
+ base = set([
+ c_type
+ for c, c_type in zip(converters, column_types)
+ if c._checked])
if len(base) == 1:
- if strcolidx:
- (ddtype, mdtype) = (type_str, bool)
- else:
- (ddtype, mdtype) = (list(base)[0], bool)
+ uniform_type, = base
+ (ddtype, mdtype) = (uniform_type, bool)
else:
ddtype = [(defaultfmt % i, dt)
- for (i, dt) in enumerate(column_types)]
+ for (i, dt) in enumerate(sized_column_types)]
if usemask:
mdtype = [(defaultfmt % i, bool)
- for (i, dt) in enumerate(column_types)]
+ for (i, dt) in enumerate(sized_column_types)]
else:
- ddtype = list(zip(names, column_types))
- mdtype = list(zip(names, [bool] * len(column_types)))
+ ddtype = list(zip(names, sized_column_types))
+ mdtype = list(zip(names, [bool] * len(sized_column_types)))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
diff --git a/numpy/lib/polynomial.py b/numpy/lib/polynomial.py
index f49b7e295..41b5e2f64 100644
--- a/numpy/lib/polynomial.py
+++ b/numpy/lib/polynomial.py
@@ -897,7 +897,7 @@ def polydiv(u, v):
n = len(v) - 1
scale = 1. / v[0]
q = NX.zeros((max(m - n + 1, 1),), w.dtype)
- r = u.copy()
+ r = u.astype(w.dtype)
for k in range(0, m-n+1):
d = scale * r[k]
q[k] = d
diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py
index b4787838d..17415d8fe 100644
--- a/numpy/lib/tests/test_arraysetops.py
+++ b/numpy/lib/tests/test_arraysetops.py
@@ -247,6 +247,14 @@ class TestSetOps(object):
c = union1d(a, b)
assert_array_equal(c, ec)
+ # Tests gh-10340, arguments to union1d should be
+ # flattened if they are not already 1D
+ x = np.array([[0, 1, 2], [3, 4, 5]])
+ y = np.array([0, 1, 2, 3, 4])
+ ez = np.array([0, 1, 2, 3, 4, 5])
+ z = union1d(x, y)
+ assert_array_equal(z, ez)
+
assert_array_equal([], union1d([], []))
def test_setdiff1d(self):
@@ -401,8 +409,8 @@ class TestUnique(object):
assert_raises(TypeError, self._run_axis_tests,
[('a', int), ('b', object)])
- assert_raises(ValueError, unique, np.arange(10), axis=2)
- assert_raises(ValueError, unique, np.arange(10), axis=-2)
+ assert_raises(np.AxisError, unique, np.arange(10), axis=2)
+ assert_raises(np.AxisError, unique, np.arange(10), axis=-2)
def test_unique_axis_list(self):
msg = "Unique failed on list of lists"
diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py
index 2d2b4cea2..d3bd2cef7 100644
--- a/numpy/lib/tests/test_format.py
+++ b/numpy/lib/tests/test_format.py
@@ -454,20 +454,20 @@ def assert_equal_(o1, o2):
def test_roundtrip():
for arr in basic_arrays + record_arrays:
arr2 = roundtrip(arr)
- yield assert_array_equal, arr, arr2
+ assert_array_equal(arr, arr2)
def test_roundtrip_randsize():
for arr in basic_arrays + record_arrays:
if arr.dtype != object:
arr2 = roundtrip_randsize(arr)
- yield assert_array_equal, arr, arr2
+ assert_array_equal(arr, arr2)
def test_roundtrip_truncated():
for arr in basic_arrays:
if arr.dtype != object:
- yield assert_raises, ValueError, roundtrip_truncated, arr
+ assert_raises(ValueError, roundtrip_truncated, arr)
def test_long_str():
@@ -508,7 +508,7 @@ def test_memmap_roundtrip():
fp = open(mfn, 'rb')
memmap_bytes = fp.read()
fp.close()
- yield assert_equal_, normal_bytes, memmap_bytes
+ assert_equal_(normal_bytes, memmap_bytes)
# Check that reading the file using memmap works.
ma = format.open_memmap(nfn, mode='r')
@@ -728,13 +728,13 @@ def test_read_magic():
def test_read_magic_bad_magic():
for magic in malformed_magic:
f = BytesIO(magic)
- yield raises(ValueError)(format.read_magic), f
+ assert_raises(ValueError, format.read_array, f)
def test_read_version_1_0_bad_magic():
for magic in bad_version_magic + malformed_magic:
f = BytesIO(magic)
- yield raises(ValueError)(format.read_array), f
+ assert_raises(ValueError, format.read_array, f)
def test_bad_magic_args():
diff --git a/numpy/lib/tests/test_index_tricks.py b/numpy/lib/tests/test_index_tricks.py
index b5e06dad0..0520ce580 100644
--- a/numpy/lib/tests/test_index_tricks.py
+++ b/numpy/lib/tests/test_index_tricks.py
@@ -245,7 +245,7 @@ class TestIndexExpression(object):
class TestIx_(object):
def test_regression_1(self):
- # Test empty inputs create ouputs of indexing type, gh-5804
+ # Test empty inputs create outputs of indexing type, gh-5804
# Test both lists and arrays
for func in (range, np.arange):
a, = np.ix_(func(0))
diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py
index 75a8e4968..d05fcd543 100644
--- a/numpy/lib/tests/test_io.py
+++ b/numpy/lib/tests/test_io.py
@@ -376,7 +376,7 @@ class TestSaveTxt(object):
lines = c.readlines()
assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])
- # Specify delimiter, should be overiden
+ # Specify delimiter, should be overridden
c = BytesIO()
np.savetxt(c, a, fmt='%02d : %3.1f', delimiter=',')
c.seek(0)
@@ -1096,7 +1096,7 @@ class TestFromTxt(LoadTxtBase):
assert_equal(test, control)
def test_array(self):
- # Test outputing a standard ndarray
+ # Test outputting a standard ndarray
data = TextIO('1 2\n3 4')
control = np.array([[1, 2], [3, 4]], dtype=int)
test = np.ndfromtxt(data, dtype=int)
@@ -2056,6 +2056,13 @@ M 33 21.99
assert_(isinstance(test, np.recarray))
assert_equal(test, control)
+ #gh-10394
+ data = TextIO('color\n"red"\n"blue"')
+ test = np.recfromcsv(data, converters={0: lambda x: x.strip(b'\"')})
+ control = np.array([('red',), ('blue',)], dtype=[('color', (bytes, 4))])
+ assert_equal(test.dtype, control.dtype)
+ assert_equal(test, control)
+
def test_max_rows(self):
# Test the `max_rows` keyword argument.
data = '1 2\n3 4\n5 6\n7 8\n9 10\n'
@@ -2226,7 +2233,7 @@ class TestPathUsage(object):
@dec.skipif(Path is None, "No pathlib.Path")
def test_ndfromtxt(self):
- # Test outputing a standard ndarray
+ # Test outputting a standard ndarray
with temppath(suffix='.txt') as path:
path = Path(path)
with path.open('w') as f:
@@ -2292,7 +2299,7 @@ def test_gzip_load():
def test_gzip_loadtxt():
- # Thanks to another windows brokeness, we can't use
+ # Thanks to another windows brokenness, we can't use
# NamedTemporaryFile: a file created from this function cannot be
# reopened by another open call. So we first put the gzipped string
# of the test reference array, write it to a securely opened file,
diff --git a/numpy/lib/tests/test_polynomial.py b/numpy/lib/tests/test_polynomial.py
index 9a4650825..03915cead 100644
--- a/numpy/lib/tests/test_polynomial.py
+++ b/numpy/lib/tests/test_polynomial.py
@@ -222,6 +222,14 @@ class TestDocs(object):
assert_equal(p == p2, False)
assert_equal(p != p2, True)
+ def test_polydiv(self):
+ b = np.poly1d([2, 6, 6, 1])
+ a = np.poly1d([-1j, (1+2j), -(2+1j), 1])
+ q, r = np.polydiv(b, a)
+ assert_equal(q.coeffs.dtype, np.complex128)
+ assert_equal(r.coeffs.dtype, np.complex128)
+ assert_equal(q*a + r, b)
+
def test_poly_coeffs_immutable(self):
""" Coefficients should not be modifiable """
p = np.poly1d([1, 2, 3])
diff --git a/numpy/lib/tests/test_type_check.py b/numpy/lib/tests/test_type_check.py
index 8945b61ea..ce8ef2f15 100644
--- a/numpy/lib/tests/test_type_check.py
+++ b/numpy/lib/tests/test_type_check.py
@@ -359,6 +359,7 @@ class TestNanToNum(object):
assert_all(vals[0] < -1e10) and assert_all(np.isfinite(vals[0]))
assert_(vals[1] == 0)
assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2]))
+ assert_equal(type(vals), np.ndarray)
# perform the same test but in-place
with np.errstate(divide='ignore', invalid='ignore'):
@@ -369,16 +370,27 @@ class TestNanToNum(object):
assert_all(vals[0] < -1e10) and assert_all(np.isfinite(vals[0]))
assert_(vals[1] == 0)
assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2]))
+ assert_equal(type(vals), np.ndarray)
+
+ def test_array(self):
+ vals = nan_to_num([1])
+ assert_array_equal(vals, np.array([1], int))
+ assert_equal(type(vals), np.ndarray)
def test_integer(self):
vals = nan_to_num(1)
assert_all(vals == 1)
- vals = nan_to_num([1])
- assert_array_equal(vals, np.array([1], int))
+ assert_equal(type(vals), np.int_)
+
+ def test_float(self):
+ vals = nan_to_num(1.0)
+ assert_all(vals == 1.0)
+ assert_equal(type(vals), np.float_)
def test_complex_good(self):
vals = nan_to_num(1+1j)
assert_all(vals == 1+1j)
+ assert_equal(type(vals), np.complex_)
def test_complex_bad(self):
with np.errstate(divide='ignore', invalid='ignore'):
@@ -387,6 +399,7 @@ class TestNanToNum(object):
vals = nan_to_num(v)
# !! This is actually (unexpectedly) zero
assert_all(np.isfinite(vals))
+ assert_equal(type(vals), np.complex_)
def test_complex_bad2(self):
with np.errstate(divide='ignore', invalid='ignore'):
@@ -394,6 +407,7 @@ class TestNanToNum(object):
v += np.array(-1+1.j)/0.
vals = nan_to_num(v)
assert_all(np.isfinite(vals))
+ assert_equal(type(vals), np.complex_)
# Fixme
#assert_all(vals.imag > 1e10) and assert_all(np.isfinite(vals))
# !! This is actually (unexpectedly) positive
diff --git a/numpy/lib/type_check.py b/numpy/lib/type_check.py
index 5c7528d4f..1664e6ebb 100644
--- a/numpy/lib/type_check.py
+++ b/numpy/lib/type_check.py
@@ -215,7 +215,7 @@ def iscomplex(x):
if issubclass(ax.dtype.type, _nx.complexfloating):
return ax.imag != 0
res = zeros(ax.shape, bool)
- return +res # convet to array-scalar if needed
+ return +res # convert to array-scalar if needed
def isreal(x):
"""
@@ -330,7 +330,7 @@ def _getmaxmin(t):
def nan_to_num(x, copy=True):
"""
- Replace nan with zero and inf with large finite numbers.
+ Replace NaN with zero and infinity with large finite numbers.
If `x` is inexact, NaN is replaced by zero, and infinity and -infinity
replaced by the respectively largest and most negative finite floating
@@ -343,7 +343,7 @@ def nan_to_num(x, copy=True):
Parameters
----------
- x : array_like
+ x : scalar or array_like
Input data.
copy : bool, optional
Whether to create a copy of `x` (True) or to replace values
@@ -374,6 +374,12 @@ def nan_to_num(x, copy=True):
Examples
--------
+ >>> np.nan_to_num(np.inf)
+ 1.7976931348623157e+308
+ >>> np.nan_to_num(-np.inf)
+ -1.7976931348623157e+308
+ >>> np.nan_to_num(np.nan)
+ 0.0
>>> x = np.array([np.inf, -np.inf, np.nan, -128, 128])
>>> np.nan_to_num(x)
array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000,
@@ -386,20 +392,21 @@ def nan_to_num(x, copy=True):
"""
x = _nx.array(x, subok=True, copy=copy)
xtype = x.dtype.type
+
+ isscalar = (x.ndim == 0)
+
if not issubclass(xtype, _nx.inexact):
- return x
+ return x[()] if isscalar else x
iscomplex = issubclass(xtype, _nx.complexfloating)
- isscalar = (x.ndim == 0)
- x = x[None] if isscalar else x
dest = (x.real, x.imag) if iscomplex else (x,)
maxf, minf = _getmaxmin(x.real.dtype)
for d in dest:
_nx.copyto(d, 0.0, where=isnan(d))
_nx.copyto(d, maxf, where=isposinf(d))
_nx.copyto(d, minf, where=isneginf(d))
- return x[0] if isscalar else x
+ return x[()] if isscalar else x
#-----------------------------------------------------------------------------
@@ -579,7 +586,7 @@ def common_type(*arrays):
an integer array, the minimum precision type that is returned is a
64-bit floating point dtype.
- All input arrays except int64 and uint64 can be safely cast to the
+ All input arrays except int64 and uint64 can be safely cast to the
returned dtype without loss of information.
Parameters
diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py
index e18eda0fb..1ecd334af 100644
--- a/numpy/lib/utils.py
+++ b/numpy/lib/utils.py
@@ -707,7 +707,7 @@ def lookfor(what, module=None, import_modules=True, regenerate=False,
"""
Do a keyword search on docstrings.
- A list of of objects that matched the search is displayed,
+ A list of objects that matched the search is displayed,
sorted by relevance. All given keywords need to be found in the
docstring for it to be returned as a result, but the order does
not matter.
diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py
index 8bc1b14d3..d7d67a91f 100644
--- a/numpy/linalg/linalg.py
+++ b/numpy/linalg/linalg.py
@@ -1835,7 +1835,7 @@ def det(a):
See Also
--------
- slogdet : Another way to representing the determinant, more suitable
+ slogdet : Another way to represent the determinant, more suitable
for large matrices where underflow/overflow may occur.
Notes
@@ -2121,6 +2121,9 @@ def norm(x, ord=None, axis=None, keepdims=False):
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned.
+
+ .. versionadded:: 1.8.0
+
keepdims : bool, optional
If this is set to True, the axes which are normed over are left in the
result as dimensions with size one. With this option the result will
@@ -2277,7 +2280,7 @@ def norm(x, ord=None, axis=None, keepdims=False):
return abs(x).min(axis=axis, keepdims=keepdims)
elif ord == 0:
# Zero norm
- return (x != 0).astype(float).sum(axis=axis, keepdims=keepdims)
+ return (x != 0).astype(x.real.dtype).sum(axis=axis, keepdims=keepdims)
elif ord == 1:
# special case for speedup
return add.reduce(abs(x), axis=axis, keepdims=keepdims)
@@ -2292,7 +2295,9 @@ def norm(x, ord=None, axis=None, keepdims=False):
raise ValueError("Invalid norm order for vectors.")
absx = abs(x)
absx **= ord
- return add.reduce(absx, axis=axis, keepdims=keepdims) ** (1.0 / ord)
+ ret = add.reduce(absx, axis=axis, keepdims=keepdims)
+ ret **= (1 / ord)
+ return ret
elif len(axis) == 2:
row_axis, col_axis = axis
row_axis = normalize_axis_index(row_axis, nd)
diff --git a/numpy/linalg/umath_linalg.c.src b/numpy/linalg/umath_linalg.c.src
index 36b99b522..3c30982a7 100644
--- a/numpy/linalg/umath_linalg.c.src
+++ b/numpy/linalg/umath_linalg.c.src
@@ -483,35 +483,30 @@ static void init_constants(void)
*/
-/* this struct contains information about how to linearize in a local buffer
- a matrix so that it can be used by blas functions.
- All strides are specified in number of elements (similar to what blas
- expects)
-
- dst_row_strides: number of elements between different row. Matrix is
- considered row-major
- dst_column_strides: number of elements between different columns in the
- destination buffer
- rows: number of rows of the matrix
- columns: number of columns of the matrix
- src_row_strides: strides needed to access the next row in the source matrix
- src_column_strides: strides needed to access the next column in the source
- matrix
+/*
+ * this struct contains information about how to linearize a matrix in a local
+ * buffer so that it can be used by blas functions. All strides are specified
+ * in bytes and are converted to elements later in type specific functions.
+ *
+ * rows: number of rows in the matrix
+ * columns: number of columns in the matrix
+ * row_strides: the number bytes between consecutive rows.
+ * column_strides: the number of bytes between consecutive columns.
*/
typedef struct linearize_data_struct
{
- size_t rows;
- size_t columns;
- ptrdiff_t row_strides;
- ptrdiff_t column_strides;
+ npy_intp rows;
+ npy_intp columns;
+ npy_intp row_strides;
+ npy_intp column_strides;
} LINEARIZE_DATA_t;
static NPY_INLINE void
init_linearize_data(LINEARIZE_DATA_t *lin_data,
- int rows,
- int columns,
- ptrdiff_t row_strides,
- ptrdiff_t column_strides)
+ npy_intp rows,
+ npy_intp columns,
+ npy_intp row_strides,
+ npy_intp column_strides)
{
lin_data->rows = rows;
lin_data->columns = columns;
@@ -1159,9 +1154,7 @@ static void
if (tmp_buff) {
LINEARIZE_DATA_t lin_data;
/* swapped steps to get matrix in FORTRAN order */
- init_linearize_data(&lin_data, m, m,
- (ptrdiff_t)steps[1],
- (ptrdiff_t)steps[0]);
+ init_linearize_data(&lin_data, m, m, steps[1], steps[0]);
BEGIN_OUTER_LOOP_3
linearize_@TYPE@_matrix(tmp_buff, args[0], &lin_data);
@TYPE@_slogdet_single_element(m,
@@ -1206,15 +1199,13 @@ static void
@typ@ sign;
@basetyp@ logdet;
/* swapped steps to get matrix in FORTRAN order */
- init_linearize_data(&lin_data, m, m,
- (ptrdiff_t)steps[1],
- (ptrdiff_t)steps[0]);
+ init_linearize_data(&lin_data, m, m, steps[1], steps[0]);
BEGIN_OUTER_LOOP_2
linearize_@TYPE@_matrix(tmp_buff, args[0], &lin_data);
@TYPE@_slogdet_single_element(m,
(void*)tmp_buff,
- (fortran_int*)(tmp_buff+matrix_size),
+ (fortran_int*)(tmp_buff + matrix_size),
&sign,
&logdet);
*(@typ@ *)args[1] = @TYPE@_det_from_slogdet(sign, logdet);
diff --git a/numpy/ma/core.py b/numpy/ma/core.py
index 69f01b819..9223c5705 100644
--- a/numpy/ma/core.py
+++ b/numpy/ma/core.py
@@ -3029,18 +3029,16 @@ class MaskedArray(ndarray):
if context is not None:
result._mask = result._mask.copy()
- (func, args, _) = context
- m = reduce(mask_or, [getmaskarray(arg) for arg in args])
+ func, args, out_i = context
+ # args sometimes contains outputs (gh-10459), which we don't want
+ input_args = args[:func.nin]
+ m = reduce(mask_or, [getmaskarray(arg) for arg in input_args])
# Get the domain mask
domain = ufunc_domain.get(func, None)
if domain is not None:
# Take the domain, and make sure it's a ndarray
- if len(args) > 2:
- with np.errstate(divide='ignore', invalid='ignore'):
- d = filled(reduce(domain, args), True)
- else:
- with np.errstate(divide='ignore', invalid='ignore'):
- d = filled(domain(*args), True)
+ with np.errstate(divide='ignore', invalid='ignore'):
+ d = filled(domain(*input_args), True)
if d.any():
# Fill the result where the domain is wrong
@@ -4285,7 +4283,7 @@ class MaskedArray(ndarray):
Convert to long.
"""
if self.size > 1:
- raise TypeError("Only length-1 arrays can be conveted "
+ raise TypeError("Only length-1 arrays can be converted "
"to Python scalars")
elif self._mask:
raise MaskError('Cannot convert masked element to a Python long.')
diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py
index 360d50d8a..99f5234d1 100644
--- a/numpy/ma/extras.py
+++ b/numpy/ma/extras.py
@@ -1209,7 +1209,7 @@ def union1d(ar1, ar2):
numpy.union1d : Equivalent function for ndarrays.
"""
- return unique(ma.concatenate((ar1, ar2)))
+ return unique(ma.concatenate((ar1, ar2), axis=None))
def setdiff1d(ar1, ar2, assume_unique=False):
diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py
index 8c631d95d..4c6bb2b42 100644
--- a/numpy/ma/tests/test_core.py
+++ b/numpy/ma/tests/test_core.py
@@ -5057,6 +5057,31 @@ def test_ufunc_with_output():
assert_(y is x)
+def test_ufunc_with_out_varied():
+ """ Test that masked arrays are immune to gh-10459 """
+ # the mask of the output should not affect the result, however it is passed
+ a = array([ 1, 2, 3], mask=[1, 0, 0])
+ b = array([10, 20, 30], mask=[1, 0, 0])
+ out = array([ 0, 0, 0], mask=[0, 0, 1])
+ expected = array([11, 22, 33], mask=[1, 0, 0])
+
+ out_pos = out.copy()
+ res_pos = np.add(a, b, out_pos)
+
+ out_kw = out.copy()
+ res_kw = np.add(a, b, out=out_kw)
+
+ out_tup = out.copy()
+ res_tup = np.add(a, b, out=(out_tup,))
+
+ assert_equal(res_kw.mask, expected.mask)
+ assert_equal(res_kw.data, expected.data)
+ assert_equal(res_tup.mask, expected.mask)
+ assert_equal(res_tup.data, expected.data)
+ assert_equal(res_pos.mask, expected.mask)
+ assert_equal(res_pos.data, expected.data)
+
+
def test_astype():
descr = [('v', int, 3), ('x', [('y', float)])]
x = array([
diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py
index 7687514fa..d1c1aa63e 100644
--- a/numpy/ma/tests/test_extras.py
+++ b/numpy/ma/tests/test_extras.py
@@ -1510,6 +1510,14 @@ class TestArraySetOps(object):
test = union1d(a, b)
control = array([1, 2, 3, 4, 5, 7, -1], mask=[0, 0, 0, 0, 0, 0, 1])
assert_equal(test, control)
+
+ # Tests gh-10340, arguments to union1d should be
+ # flattened if they are not already 1D
+ x = array([[0, 1, 2], [3, 4, 5]], mask=[[0, 0, 0], [0, 0, 1]])
+ y = array([0, 1, 2, 3, 4], mask=[0, 0, 0, 0, 1])
+ ez = array([0, 1, 2, 3, 4, 5], mask=[0, 0, 0, 0, 0, 1])
+ z = union1d(x, y)
+ assert_equal(z, ez)
#
assert_array_equal([], union1d([], []))
diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py
index a71e5b549..adbf30234 100644
--- a/numpy/polynomial/polynomial.py
+++ b/numpy/polynomial/polynomial.py
@@ -36,7 +36,7 @@ Misc Functions
--------------
- `polyfromroots` -- create a polynomial with specified roots.
- `polyroots` -- find the roots of a polynomial.
-- `polyvalfromroots` -- evalute a polynomial at given points from roots.
+- `polyvalfromroots` -- evaluate a polynomial at given points from roots.
- `polyvander` -- Vandermonde-like matrix for powers.
- `polyvander2d` -- Vandermonde-like matrix for 2D power series.
- `polyvander3d` -- Vandermonde-like matrix for 3D power series.
diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py
index 6ada4d997..4546a0184 100644
--- a/numpy/random/tests/test_random.py
+++ b/numpy/random/tests/test_random.py
@@ -231,7 +231,7 @@ class TestRandint(object):
res = hashlib.md5(val.view(np.int8)).hexdigest()
assert_(tgt[np.dtype(dt).name] == res)
- # bools do not depend on endianess
+ # bools do not depend on endianness
np.random.seed(1234)
val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8)
res = hashlib.md5(val).hexdigest()
diff --git a/numpy/testing/pytest_tools/utils.py b/numpy/testing/pytest_tools/utils.py
index 19982ec54..8a0eb8be3 100644
--- a/numpy/testing/pytest_tools/utils.py
+++ b/numpy/testing/pytest_tools/utils.py
@@ -1219,16 +1219,9 @@ def assert_raises(exception_class, fn=None, *args, **kwargs):
if fn is not None:
pytest.raises(exception_class, fn, *args,**kwargs)
else:
- @contextlib.contextmanager
- def assert_raises_context():
- try:
- yield
- except BaseException as raised_exception:
- assert isinstance(raised_exception, exception_class)
- else:
- raise ValueError('Function did not raise an exception')
+ assert not kwargs
- return assert_raises_context()
+ return pytest.raises(exception_class)
def assert_raises_regex(exception_class, expected_regexp, *args, **kwargs):
diff --git a/numpy/tests/test_reloading.py b/numpy/tests/test_reloading.py
index ca651c874..4481d76ef 100644
--- a/numpy/tests/test_reloading.py
+++ b/numpy/tests/test_reloading.py
@@ -1,8 +1,9 @@
from __future__ import division, absolute_import, print_function
import sys
+import pickle
-from numpy.testing import assert_raises, assert_, run_module_suite
+from numpy.testing import assert_raises, assert_, assert_equal, run_module_suite
if sys.version_info[:2] >= (3, 4):
from importlib import reload
@@ -29,6 +30,11 @@ def test_numpy_reloading():
assert_(ModuleDeprecationWarning is np.ModuleDeprecationWarning)
assert_(VisibleDeprecationWarning is np.VisibleDeprecationWarning)
+def test_novalue():
+ import numpy as np
+ assert_equal(repr(np._NoValue), '<no value>')
+ assert_(pickle.loads(pickle.dumps(np._NoValue)) is np._NoValue)
+
if __name__ == "__main__":
run_module_suite()
diff --git a/setup.py b/setup.py
index 248dcc823..eb172bf2b 100755
--- a/setup.py
+++ b/setup.py
@@ -352,13 +352,21 @@ def setup_package():
long_description = "\n".join(DOCLINES[2:]),
url = "http://www.numpy.org",
author = "Travis E. Oliphant et al.",
- download_url = "http://sourceforge.net/projects/numpy/files/NumPy/",
+ download_url = "https://pypi.python.org/pypi/numpy",
license = 'BSD',
classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f],
platforms = ["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"],
test_suite='nose.collector',
cmdclass={"sdist": sdist_checked},
python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*',
+ zip_safe=False,
+ entry_points={
+ 'console_scripts': [
+ 'f2py = numpy.f2py.__main__:main',
+ 'conv-template = numpy.distutils.conv_template:main',
+ 'from-template = numpy.distutils.conv_template:main',
+ ]
+ },
)
if "--force" in sys.argv:
diff --git a/tools/announce.py b/tools/changelog.py
index 05ea8cb36..84e046c5f 100755
--- a/tools/announce.py
+++ b/tools/changelog.py
@@ -1,10 +1,10 @@
#!/usr/bin/env python
# -*- encoding:utf-8 -*-
"""
-Script to generate contribor and pull request lists
+Script to generate contributor and pull request lists
This script generates contributor and pull request lists for release
-announcements using Github v3 protocol. Use requires an authentication token in
+changelogs using Github v3 protocol. Use requires an authentication token in
order to have sufficient bandwidth, you can get one following the directions at
`<https://help.github.com/articles/creating-an-access-token-for-command-line-use/>_
Don't add any scope, as the default is read access to public information. The
@@ -28,9 +28,9 @@ Some code was copied from scipy `tools/gh_list.py` and `tools/authors.py`.
Examples
--------
-From the bash command line with $GITHUB token.
+From the bash command line with $GITHUB token::
- $ ./tools/announce $GITHUB v1.11.0..v1.11.1 > announce.rst
+ $ ./tools/announce $GITHUB v1.13.0..v1.14.0 > 1.14.0-changelog.rst
"""
from __future__ import print_function, division
diff --git a/tools/swig/numpy.i b/tools/swig/numpy.i
index b8fdaeb1f..36bb55c98 100644
--- a/tools/swig/numpy.i
+++ b/tools/swig/numpy.i
@@ -80,6 +80,7 @@
%#define array_data(a) (((PyArrayObject*)a)->data)
%#define array_descr(a) (((PyArrayObject*)a)->descr)
%#define array_flags(a) (((PyArrayObject*)a)->flags)
+%#define array_clearflags(a,f) (((PyArrayObject*)a)->flags) &= ~f
%#define array_enableflags(a,f) (((PyArrayObject*)a)->flags) = f
%#define array_is_fortran(a) (PyArray_ISFORTRAN((PyArrayObject*)a))
%#else
@@ -94,6 +95,7 @@
%#define array_descr(a) PyArray_DESCR((PyArrayObject*)a)
%#define array_flags(a) PyArray_FLAGS((PyArrayObject*)a)
%#define array_enableflags(a,f) PyArray_ENABLEFLAGS((PyArrayObject*)a,f)
+%#define array_clearflags(a,f) PyArray_CLEARFLAGS((PyArrayObject*)a,f)
%#define array_is_fortran(a) (PyArray_IS_F_CONTIGUOUS((PyArrayObject*)a))
%#endif
%#define array_is_contiguous(a) (PyArray_ISCONTIGUOUS((PyArrayObject*)a))
@@ -485,7 +487,7 @@
{
int i;
int success = 1;
- int len;
+ size_t len;
char desired_dims[255] = "[";
char s[255];
char actual_dims[255] = "[";
@@ -538,7 +540,13 @@
int i;
npy_intp * strides = array_strides(ary);
if (array_is_fortran(ary)) return success;
+ int n_non_one = 0;
/* Set the Fortran ordered flag */
+ const npy_intp *dims = array_dimensions(ary);
+ for (i=0; i < nd; ++i)
+ n_non_one += (dims[i] != 1) ? 1 : 0;
+ if (n_non_one > 1)
+ array_clearflags(ary,NPY_ARRAY_CARRAY);
array_enableflags(ary,NPY_ARRAY_FARRAY);
/* Recompute the strides */
strides[0] = strides[nd-1];
@@ -3139,6 +3147,15 @@
%numpy_typemaps(unsigned long long, NPY_ULONGLONG, int)
%numpy_typemaps(float , NPY_FLOAT , int)
%numpy_typemaps(double , NPY_DOUBLE , int)
+%numpy_typemaps(int8_t , NPY_INT8 , int)
+%numpy_typemaps(int16_t , NPY_INT16 , int)
+%numpy_typemaps(int32_t , NPY_INT32 , int)
+%numpy_typemaps(int64_t , NPY_INT64 , int)
+%numpy_typemaps(uint8_t , NPY_UINT8 , int)
+%numpy_typemaps(uint16_t , NPY_UINT16 , int)
+%numpy_typemaps(uint32_t , NPY_UINT32 , int)
+%numpy_typemaps(uint64_t , NPY_UINT64 , int)
+
/* ***************************************************************
* The follow macro expansion does not work, because C++ bool is 4
diff --git a/tools/travis-test.sh b/tools/travis-test.sh
index ca9f236a2..bd9f79c22 100755
--- a/tools/travis-test.sh
+++ b/tools/travis-test.sh
@@ -89,14 +89,13 @@ setup_chroot()
sudo chroot $DIR bash -c "apt-get update"
# faster operation with preloaded eatmydata
- sudo chroot $DIR bash -c "apt-get install -qq -y --force-yes eatmydata"
+ sudo chroot $DIR bash -c "apt-get install -qq -y eatmydata"
echo '/usr/$LIB/libeatmydata.so' | \
sudo tee -a $DIR/etc/ld.so.preload
# install needed packages
- sudo chroot $DIR bash -c "apt-get install -qq -y --force-yes \
- libatlas-dev libatlas-base-dev gfortran \
- python-dev python-nose python-pip cython"
+ sudo chroot $DIR bash -c "apt-get install -qq -y \
+ libatlas-base-dev gfortran python-dev python-nose python-pip cython"
}
run_test()