diff options
140 files changed, 3036 insertions, 3544 deletions
diff --git a/.circleci/config.yml b/.circleci/config.yml index e0fb28c99..2a3d82677 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -20,7 +20,7 @@ jobs: command: | python3 -m venv venv . venv/bin/activate - pip install cython sphinx==1.7.9 matplotlib + pip install cython sphinx==1.8.5 matplotlib sudo apt-get update sudo apt-get install -y graphviz texlive-fonts-recommended texlive-latex-recommended texlive-latex-extra texlive-generic-extra latexmk texlive-xetex @@ -39,14 +39,14 @@ jobs: . venv/bin/activate cd doc git submodule update --init - make html + SPHINXOPTS=-q make -e html - run: name: build neps command: | . venv/bin/activate cd doc/neps - make html + SPHINXOPTS=-q make -e html - store_artifacts: path: doc/build/html/ @@ -195,6 +195,7 @@ Tim Teichmann <t.teichmann@dashdos.com> tteichmann <t.teichmann@dashdos.com> Tim Teichmann <t.teichmann@dashdos.com> tteichmann <44259103+tteichmann@users.noreply.github.com> Tom Boyd <pezcore@users.noreply.github.com> pezcore <pezcore@users.noreply.github.com> Tom Poole <t.b.poole@gmail.com> tpoole <t.b.poole@gmail.com> +Tony LaTorre <tlatorre@uchicago.edu> tlatorre <tlatorre@uchicago.edu> Travis Oliphant <travis@continuum.io> Travis E. Oliphant <teoliphant@gmail.com> Travis Oliphant <travis@continuum.io> Travis Oliphant <oliphant@enthought.com> Valentin Haenel <valentin@haenel.co> Valentin Haenel <valentin.haenel@gmx.de> diff --git a/.travis.yml b/.travis.yml index 7498e1d66..061d8de1a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -69,6 +69,8 @@ matrix: env: # for matrix annotation only - PPC64_LE=1 + # use POWER8 OpenBLAS build, not system ATLAS + - ATLAS=None before_install: - ./tools/travis-before-install.sh diff --git a/MANIFEST.in b/MANIFEST.in index e15e0e58a..647e2f704 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -11,15 +11,18 @@ include site.cfg.example include numpy/random/mtrand/generate_mtrand_c.py recursive-include numpy/random/mtrand *.pyx *.pxd # Add build support that should go in sdist, but not go in bdist/be installed +# Note that sub-directories that don't have __init__ are apparently not +# included by 'recursive-include', so list those separately +recursive-include numpy * recursive-include numpy/_build_utils * -recursive-include numpy/linalg/lapack_lite *.c *.h +recursive-include numpy/linalg/lapack_lite * include runtests.py include tox.ini pytest.ini .coveragerc recursive-include tools * # Add sdist files whose use depends on local configuration. include numpy/core/src/common/cblasfuncs.c include numpy/core/src/common/python_xerbla.c -# Adding scons build related files not found by distutils +# Adding build related files not found by distutils recursive-include numpy/core/code_generators *.py *.txt recursive-include numpy/core *.in *.h # Add documentation and benchmarks: we don't use add_data_dir since we do not diff --git a/azure-pipelines.yml b/azure-pipelines.yml index f964fffaa..4e8a8d654 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -28,9 +28,11 @@ jobs: F77=gfortran-5 F90=gfortran-5 \ CFLAGS='-UNDEBUG -std=c99' python3 runtests.py --mode=full -- -rsx --junitxml=junit/test-results.xml" displayName: 'Run 32-bit Ubuntu Docker Build / Tests' + continueOnError: true - task: PublishTestResults@2 inputs: testResultsFiles: '**/test-*.xml' + failTaskOnFailedTests: true testRunTitle: 'Publish test results for Python 3.6-32 bit full Linux' - job: macOS pool: @@ -59,21 +61,33 @@ jobs: # two C compilers, but with homebrew looks like we're # now stuck getting the full gcc toolchain instead of # just pulling in gfortran - - script: HOMEBREW_NO_AUTO_UPDATE=1 brew install gcc + - script: | + # same version of gfortran as the wheel builds + brew install gcc49 + # manually link critical gfortran libraries + ln -s /usr/local/Cellar/gcc@4.9/4.9.4_1/lib/gcc/4.9/libgfortran.3.dylib /usr/local/lib/libgfortran.3.dylib + ln -s /usr/local/Cellar/gcc@4.9/4.9.4_1/lib/gcc/4.9/libquadmath.0.dylib /usr/local/lib/libquadmath.0.dylib + # manually symlink gfortran-4.9 to plain gfortran + # for f2py + ln -s /usr/local/bin/gfortran-4.9 /usr/local/bin/gfortran displayName: 'make gfortran available on mac os vm' + # use the pre-built openblas binary that most closely + # matches our MacOS wheel builds -- currently based + # primarily on file size / name details + - script: | + wget "https://3f23b170c54c2533c070-1c8a9b3114517dc5fe17b7c3f8c63a43.ssl.cf2.rackcdn.com/openblas-v0.3.3-186-g701ea883-macosx_10_9_intel-gf_1becaaa.tar.gz" + tar -zxvf openblas-v0.3.3-186-g701ea883-macosx_10_9_intel-gf_1becaaa.tar.gz + # manually link to appropriate system paths + cp ./usr/local/lib/* /usr/local/lib/ + cp ./usr/local/include/* /usr/local/include/ + displayName: 'install pre-built openblas' - script: python -m pip install --upgrade pip setuptools wheel displayName: 'Install tools' - - script: python -m pip install cython nose pytz pytest pickle5 vulture docutils sphinx numpydoc matplotlib + - script: python -m pip install cython nose pytz pytest pickle5 vulture docutils sphinx==1.8.5 numpydoc matplotlib displayName: 'Install dependencies; some are optional to avoid test skips' - script: /bin/bash -c "! vulture . --min-confidence 100 --exclude doc/,numpy/distutils/ | grep 'unreachable'" displayName: 'Check for unreachable code paths in Python modules' - # NOTE: init_dgelsd failed init issue with current ACCELERATE / - # LAPACK configuration on Azure macos image; at the time of writing - # this plagues homebrew / macports NumPy builds, but we will - # circumvent for now by aggressively disabling acceleration for - # macos NumPy builds / tests; ACCELERATE=None on its own is not - # sufficient - # also, might as well prefer usage of clang over gcc proper + # prefer usage of clang over gcc proper # to match likely scenario on many user mac machines - script: python setup.py build -j 4 install displayName: 'Build NumPy' @@ -87,9 +101,11 @@ jobs: displayName: 'Run Refuide Check' - script: python runtests.py --mode=full -- -rsx --junitxml=junit/test-results.xml displayName: 'Run Full NumPy Test Suite' + continueOnError: true - task: PublishTestResults@2 inputs: testResultsFiles: '**/test-*.xml' + failTaskOnFailedTests: true testRunTitle: 'Publish test results for Python 3.6 64-bit full Mac OS' - job: Windows pool: @@ -188,7 +204,9 @@ jobs: displayName: 'For gh-12667; Windows DLL resolution' - script: python runtests.py -n --show-build-log --mode=$(TEST_MODE) -- -rsx --junitxml=junit/test-results.xml displayName: 'Run NumPy Test Suite' + continueOnError: true - task: PublishTestResults@2 inputs: testResultsFiles: '**/test-*.xml' + failTaskOnFailedTests: true testRunTitle: 'Publish test results for Python $(PYTHON_VERSION) $(BITS)-bit $(TEST_MODE) Windows' diff --git a/benchmarks/benchmarks/bench_function_base.py b/benchmarks/benchmarks/bench_function_base.py index 07ada2bbb..7ea8c39b0 100644 --- a/benchmarks/benchmarks/bench_function_base.py +++ b/benchmarks/benchmarks/bench_function_base.py @@ -199,6 +199,7 @@ class Sort(Benchmark): ['quick', 'merge', 'heap'], ['float64', 'int64', 'uint64'], [ + ('random',), ('ordered',), ('reversed',), ('uniform',), diff --git a/benchmarks/benchmarks/bench_io.py b/benchmarks/benchmarks/bench_io.py index 879f9b69e..439cd422f 100644 --- a/benchmarks/benchmarks/bench_io.py +++ b/benchmarks/benchmarks/bench_io.py @@ -66,7 +66,8 @@ class Savez(Benchmark): self.squares = get_squares() def time_vb_savez_squares(self): - np.savez('tmp.npz', self.squares) + np.savez('tmp.npz', **self.squares) + class LoadtxtCSVComments(Benchmark): # benchmarks for np.loadtxt comment handling diff --git a/doc/DISTUTILS.rst.txt b/doc/DISTUTILS.rst.txt index 515fc871b..42aa9561d 100644 --- a/doc/DISTUTILS.rst.txt +++ b/doc/DISTUTILS.rst.txt @@ -300,8 +300,68 @@ in writing setup scripts: Template files -------------- -XXX: Describe how files with extensions ``.f.src``, ``.pyf.src``, -``.c.src``, etc. are pre-processed by the ``build_src`` command. +NumPy Distutils preprocesses C source files (extension: :file:`.c.src`) written +in a custom templating language to generate C code. The :c:data:`@` symbol is +used to wrap macro-style variables to empower a string substitution mechansim +that might describe (for instance) a set of data types. + +As a more detailed scenario, a loop in the NumPy C source code may +have a :c:data:`@TYPE@` variable, targeted for string substitution, +which is preprocessed to a number of otherwise identical loops with +several strings such as :c:data:`INT, LONG, UINT, ULONG`. The :c:data:`@TYPE@` +style syntax thus reduces code duplication and maintenance burden by +mimicking languages that have generic type support. By convention, +and as required by the preprocessor, generically typed blocks are preceded +by comment blocks that enumerate the intended string substitutions. + +The template language blocks are delimited by :c:data:`/**begin repeat` +and :c:data:`/**end repeat**/` lines, which may also be nested using +consecutively numbered delimiting lines such as :c:data:`/**begin repeat1` +and :c:data:`/**end repeat1**/`. String replacement specifications are started +and terminated using :c:data:`#`. This may be clearer in the following +template source example:: + + /* TIMEDELTA to non-float types */ + + /**begin repeat + * + * #TOTYPE = BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG, + * LONGLONG, ULONGLONG, DATETIME, + * TIMEDELTA# + * #totype = npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint, + * npy_long, npy_ulong, npy_longlong, npy_ulonglong, + * npy_datetime, npy_timedelta# + */ + + /**begin repeat1 + * + * #FROMTYPE = TIMEDELTA# + * #fromtype = npy_timedelta# + */ + static void + @FROMTYPE@_to_@TOTYPE@(void *input, void *output, npy_intp n, + void *NPY_UNUSED(aip), void *NPY_UNUSED(aop)) + { + const @fromtype@ *ip = input; + @totype@ *op = output; + + while (n--) { + *op++ = (@totype@)*ip++; + } + } + /**end repeat1**/ + + /**end repeat**/ + +The preprocessing of generically typed C source files (whether in NumPy +proper or in any third party package using NumPy Distutils) is performed +by `conv_template.py`_. +The type specific C files generated (extension: :file:`.c`) by these modules +during the build process are ready to be compiled. This form +of generic typing is also supported for C header files (preprocessed to +produce :file:`.h` files). + +.. _conv_template.py: https://github.com/numpy/numpy/blob/master/numpy/distutils/conv_template.py Useful functions in ``numpy.distutils.misc_util`` ------------------------------------------------- diff --git a/doc/Makefile b/doc/Makefile index 38ce20c40..c70111385 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -1,10 +1,13 @@ # Makefile for Sphinx documentation # -# This needs to be major.minor, just "3" doesn't work - it will result in +# PYVER needs to be major.minor, just "3" doesn't work - it will result in # issues with the amendments to PYTHONPATH and install paths (see DIST_VARS). -# If you're using a different Python version, just change the number here. -PYVER = 3.6 + +# Use explicit "version_info" indexing since make cannot handle colon characters, and +# evaluate it now to allow easier debugging when printing the varaible + +PYVER:=$(shell python3 -c 'from sys import version_info as v; print("{0}.{1}".format(v[0], v[1]))') PYTHON = python$(PYVER) # You can set these variables from the command line. @@ -17,7 +20,8 @@ FILES= # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source +ALLSPHINXOPTS = -WT --keep-going -d build/doctrees $(PAPEROPT_$(PAPER)) \ + $(SPHINXOPTS) source .PHONY: help clean html web pickle htmlhelp latex changes linkcheck \ dist dist-build gitwash-update diff --git a/doc/TESTS.rst.txt b/doc/TESTS.rst.txt index daf82aaaa..8169ea38a 100644 --- a/doc/TESTS.rst.txt +++ b/doc/TESTS.rst.txt @@ -37,10 +37,9 @@ or from the command line:: $ python runtests.py -SciPy uses the testing framework from NumPy (specifically -:ref:`numpy-testing`), so all the SciPy examples shown here are also -applicable to NumPy. NumPy's full test suite can be run as -follows:: +SciPy uses the testing framework from :mod:`numpy.testing`, so all +the SciPy examples shown here are also applicable to NumPy. NumPy's full test +suite can be run as follows:: >>> import numpy >>> numpy.test() diff --git a/doc/changelog/1.16.2-changelog.rst b/doc/changelog/1.16.2-changelog.rst new file mode 100644 index 000000000..3cf0cc566 --- /dev/null +++ b/doc/changelog/1.16.2-changelog.rst @@ -0,0 +1,25 @@ + +Contributors +============ + +A total of 5 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Eric Wieser +* Matti Picus +* Tyler Reddy +* Tony LaTorre + + +Pull requests merged +==================== + +A total of 7 pull requests were merged for this release. + +* `#12909 <https://github.com/numpy/numpy/pull/12909>`__: TST: fix vmImage dispatch in Azure +* `#12923 <https://github.com/numpy/numpy/pull/12923>`__: MAINT: remove complicated test of multiarray import failure mode +* `#13020 <https://github.com/numpy/numpy/pull/13020>`__: BUG: fix signed zero behavior in npy_divmod +* `#13026 <https://github.com/numpy/numpy/pull/13026>`__: MAINT: Add functions to parse shell-strings in the platform-native... +* `#13028 <https://github.com/numpy/numpy/pull/13028>`__: BUG: Fix regression in parsing of F90 and F77 environment variables +* `#13038 <https://github.com/numpy/numpy/pull/13038>`__: BUG: parse shell escaping in extra_compile_args and extra_link_args +* `#13041 <https://github.com/numpy/numpy/pull/13041>`__: BLD: Windows absolute path DLL loading diff --git a/doc/release/1.16.0-notes.rst b/doc/release/1.16.0-notes.rst index 341d5f715..1034d6e6c 100644 --- a/doc/release/1.16.0-notes.rst +++ b/doc/release/1.16.0-notes.rst @@ -176,7 +176,7 @@ of: * :c:member:`PyUFuncObject.core_dim_flags` * :c:member:`PyUFuncObject.core_dim_sizes` * :c:member:`PyUFuncObject.identity_value` -* :c:function:`PyUFunc_FromFuncAndDataAndSignatureAndIdentity` +* :c:func:`PyUFunc_FromFuncAndDataAndSignatureAndIdentity` New Features @@ -407,7 +407,7 @@ Additionally, `logaddexp` now has an identity of ``-inf``, allowing it to be called on empty sequences, where previously it could not be. This is possible thanks to the new -:c:function:`PyUFunc_FromFuncAndDataAndSignatureAndIdentity`, which allows +:c:func:`PyUFunc_FromFuncAndDataAndSignatureAndIdentity`, which allows arbitrary values to be used as identities now. Improved conversion from ctypes objects diff --git a/doc/release/1.16.2-notes.rst b/doc/release/1.16.2-notes.rst new file mode 100644 index 000000000..62b90dc40 --- /dev/null +++ b/doc/release/1.16.2-notes.rst @@ -0,0 +1,70 @@ +========================== +NumPy 1.16.2 Release Notes +========================== + +NumPy 1.16.2 is a quick release fixing several problems encountered on Windows. +The Python versions supported are 2.7 and 3.5-3.7. The Windows problems +addressed are: + +- DLL load problems for NumPy wheels on Windows, +- distutils command line parsing on Windows. + +There is also a regression fix correcting signed zeros produced by divmod, see +below for details. + +Downstream developers building this release should use Cython >= 0.29.2 and, if +using OpenBLAS, OpenBLAS > v0.3.4. + +If you are installing using pip, you may encounter a problem with older +installed versions of NumPy that pip did not delete becoming mixed with the +current version, resulting in an ``ImportError``. That problem is particularly +common on Debian derived distributions due to a modified pip. The fix is to +make sure all previous NumPy versions installed by pip have been removed. See +`#12736 <https://github.com/numpy/numpy/issues/12736>`__ for discussion of the +issue. + + +Compatibility notes +=================== + +Signed zero when using divmod +----------------------------- +Starting in version 1.12.0, numpy incorrectly returned a negatively signed zero +when using the ``divmod`` and ``floor_divide`` functions when the result was +zero. For example:: + + >>> np.zeros(10)//1 + array([-0., -0., -0., -0., -0., -0., -0., -0., -0., -0.]) + +With this release, the result is correctly returned as a positively signed +zero:: + + >>> np.zeros(10)//1 + array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]) + + +Contributors +============ + +A total of 5 people contributed to this release. People with a "+" by their +names contributed a patch for the first time. + +* Charles Harris +* Eric Wieser +* Matti Picus +* Tyler Reddy +* Tony LaTorre + + + +Pull requests merged +==================== + +A total of 7 pull requests were merged for this release. + +* `#12909 <https://github.com/numpy/numpy/pull/12909>`__: TST: fix vmImage dispatch in Azure +* `#12923 <https://github.com/numpy/numpy/pull/12923>`__: MAINT: remove complicated test of multiarray import failure mode +* `#13020 <https://github.com/numpy/numpy/pull/13020>`__: BUG: fix signed zero behavior in npy_divmod +* `#13026 <https://github.com/numpy/numpy/pull/13026>`__: MAINT: Add functions to parse shell-strings in the platform-native... +* `#13028 <https://github.com/numpy/numpy/pull/13028>`__: BUG: Fix regression in parsing of F90 and F77 environment variables +* `#13038 <https://github.com/numpy/numpy/pull/13038>`__: BUG: parse shell escaping in extra_compile_args and extra_link_args +* `#13041 <https://github.com/numpy/numpy/pull/13041>`__: BLD: Windows absolute path DLL loading diff --git a/doc/release/1.17.0-notes.rst b/doc/release/1.17.0-notes.rst index d8ece4244..1155449a7 100644 --- a/doc/release/1.17.0-notes.rst +++ b/doc/release/1.17.0-notes.rst @@ -18,6 +18,21 @@ New functions Deprecations ============ +``np.polynomial`` functions warn when passed ``float`` in place of ``int`` +-------------------------------------------------------------------------- +Previously functions in this module would accept ``float`` values provided they +were integral (``1.0``, ``2.0``, etc). For consistency with the rest of numpy, +doing so is now deprecated, and in future will raise a ``TypeError``. + +Similarly, passing a float like ``0.5`` in place of an integer will now raise a +``TypeError`` instead of the previous ``ValueError``. + +Deprecate ``numpy.distutils.exec_command`` and ``numpy.distutils.temp_file_name`` +--------------------------------------------------------------------------------- +The internal use of these functions has been refactored and there are better +alternatives. Relace ``exec_command`` with `subprocess.Popen` and +``temp_file_name`` with `tempfile.mkstemp`. + Future Changes ============== @@ -38,7 +53,6 @@ now be rounded up instead of down, changing the last bit (ULP) of the result. Signed zero when using divmod ----------------------------- - Starting in version 1.12.0, numpy incorrectly returned a negatively signed zero when using the ``divmod`` and ``floor_divide`` functions when the result was zero. For example:: @@ -52,6 +66,18 @@ zero:: >>> np.zeros(10)//1 array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]) +Do not lookup ``__buffer__`` attribute in `numpy.frombuffer` +------------------------------------------------------------ +Looking up ``__buffer__`` attribute in `numpy.frombuffer` was undocumented and +non-functional. This code was removed. If needed, use +``frombuffer(memoryview(obj), ...)`` instead. + +``out``is buffered for memory overlaps in ``np.take``, ``np.choose``, ``np.put`` +-------------------------------------------------------------------------------- +If the out argument to these functions is provided and has memory overlap with +the other arguments, it is now buffered to avoid order-dependent behavior. + + C API changes ============= @@ -88,7 +114,6 @@ available bits add zero padding. Negative counts trim bits off the end instead of counting from the beginning. None counts implement the existing behavior of unpacking everything. - ``np.linalg.svd`` and ``np.linalg.pinv`` can be faster on hermitian inputs -------------------------------------------------------------------------- These functions now accept a ``hermitian`` argument, matching the one added @@ -99,6 +124,10 @@ divmod operation is now supported for two ``timedelta64`` operands The divmod operator now handles two ``np.timedelta64`` operands, with type signature mm->qm. +New mode "empty" for ``np.pad`` +------------------------------- +This mode pads an array to a desired shape without initializing the new +entries. Improvements ============ @@ -113,7 +142,6 @@ tolerances. Replacement of the `fftpack`-based FFT module by the `pocketfft` library ------------------------------------------------------------------------ - Both implementations have the same ancestor (Fortran77 `FFTPACK` by Paul N. Swarztrauber), but `pocketfft` contains additional modifications which improve both accuracy and performance in some circumstances. For FFT lengths @@ -152,18 +180,35 @@ but with this change, you can do:: thereby saving a level of indentation +Improve performance of ``np.pad`` +--------------------------------- +The performance of the function has been improved for most cases by filling in +a preallocated array with the desired padded shape instead of using +concatenation. + ``np.interp`` handles infinities more robustly ---------------------------------------------- In some cases where ``np.interp`` would previously return ``np.nan``, it now returns an appropriate infinity. +Specialized ``np.isnan``, ``np.isinf``, and ``np.isfinite`` ufuncs for bool and int types +----------------------------------------------------------------------------------------- +The boolean and integer types are incapable of storing ``np.nan`` and ``np.inf`` values, +which allows us to provide specialized ufuncs that are up to 250x faster than the current +approach. + +New keywords added to ``np.nan_to_num`` +--------------------------------------- +``np.nan_to_num`` now accepts keywords ``nan``, ``posinf`` and ``neginf`` allowing the +user to define the value to replace the ``nan``, positive and negative ``np.inf`` values +respectively. + Changes ======= ``median`` and ``percentile`` family of functions no longer warn about ``nan`` ------------------------------------------------------------------------------ - `numpy.median`, `numpy.percentile`, and `numpy.quantile` used to emit a ``RuntimeWarning`` when encountering an `numpy.nan`. Since they return the ``nan`` value, the warning is redundant and has been removed. @@ -180,4 +225,8 @@ of NumPy functions on non-NumPy arrays, as described in `NEP 18`_. The feature was available for testing with NumPy 1.16 if appropriate environment variables are set, but is now always enabled. +``__array_interface__`` offset now works as documented +------------------------------------------------------ +The interface may use an ``offset`` value that was mistakenly ignored. + .. _`NEP 18` : http://www.numpy.org/neps/nep-0018-array-function-protocol.html diff --git a/doc/scipy-sphinx-theme b/doc/scipy-sphinx-theme -Subproject d990ab9134199f6496b9ac8567f10791f04a720 +Subproject f0d96ae2bf3b010ce53adadde1e38997497a513 diff --git a/doc/source/dev/development_environment.rst b/doc/source/dev/development_environment.rst index f9b438bfd..445ce3204 100644 --- a/doc/source/dev/development_environment.rst +++ b/doc/source/dev/development_environment.rst @@ -127,6 +127,9 @@ the interpreter, tests can be run like this:: >>> np.test('full') # Also run tests marked as slow >>> np.test('full', verbose=2) # Additionally print test name/file + An example of a successful test : + ``4686 passed, 362 skipped, 9 xfailed, 5 warnings in 213.99 seconds`` + Or a similar way from the command line:: $ python -c "import numpy as np; np.test()" diff --git a/doc/source/dev/index.rst b/doc/source/dev/index.rst index 825b93b53..43aff1931 100644 --- a/doc/source/dev/index.rst +++ b/doc/source/dev/index.rst @@ -3,11 +3,12 @@ Contributing to NumPy ##################### .. toctree:: - :maxdepth: 3 + :maxdepth: 2 conduct/code_of_conduct gitwash/index development_environment + ../benchmarking style_guide releasing governance/index diff --git a/doc/source/docs/howto_build_docs.rst b/doc/source/docs/howto_build_docs.rst index cdf490c37..98d1b88ba 100644 --- a/doc/source/docs/howto_build_docs.rst +++ b/doc/source/docs/howto_build_docs.rst @@ -5,7 +5,7 @@ Building the NumPy API and reference docs ========================================= We currently use Sphinx_ for generating the API and reference -documentation for NumPy. You will need Sphinx 1.0.1 or newer. +documentation for NumPy. You will need Sphinx 1.8.3 or newer. If you only want to get the documentation, note that pre-built versions can be found at diff --git a/doc/source/reference/arrays.dtypes.rst b/doc/source/reference/arrays.dtypes.rst index f2072263f..b55feb247 100644 --- a/doc/source/reference/arrays.dtypes.rst +++ b/doc/source/reference/arrays.dtypes.rst @@ -14,7 +14,7 @@ following aspects of the data: 1. Type of the data (integer, float, Python object, etc.) 2. Size of the data (how many bytes is in *e.g.* the integer) 3. Byte order of the data (:term:`little-endian` or :term:`big-endian`) -4. If the data type is :term:`structured`, an aggregate of other +4. If the data type is :term:`structured data type`, an aggregate of other data types, (*e.g.*, describing an array item consisting of an integer and a float), @@ -42,7 +42,7 @@ needed in NumPy. pair: dtype; field Structured data types are formed by creating a data type whose -:term:`fields` contain other data types. Each field has a name by +:term:`field` contain other data types. Each field has a name by which it can be :ref:`accessed <arrays.indexing.fields>`. The parent data type should be of sufficient size to contain all its fields; the parent is nearly always based on the :class:`void` type which allows @@ -145,7 +145,7 @@ Array-scalar types This is true for their sub-classes as well. Note that not all data-type information can be supplied with a - type-object: for example, :term:`flexible` data-types have + type-object: for example, `flexible` data-types have a default *itemsize* of 0, and require an explicitly given size to be useful. @@ -511,7 +511,7 @@ Endianness of this data: dtype.byteorder -Information about sub-data-types in a :term:`structured` data type: +Information about sub-data-types in a :term:`structured data type`: .. autosummary:: :toctree: generated/ diff --git a/doc/source/reference/arrays.indexing.rst b/doc/source/reference/arrays.indexing.rst index 3a319ecca..0c0c8dff6 100644 --- a/doc/source/reference/arrays.indexing.rst +++ b/doc/source/reference/arrays.indexing.rst @@ -57,6 +57,17 @@ interpreted as counting from the end of the array (*i.e.*, if All arrays generated by basic slicing are always :term:`views <view>` of the original array. +.. note:: + + NumPy slicing creates a :term:`view` instead of a copy as in the case of + builtin Python sequences such as string, tuple and list. + Care must be taken when extracting + a small portion from a large array which becomes useless after the + extraction, because the small portion extracted contains a reference + to the large original array whose memory will not be released until + all arrays derived from it are garbage-collected. In such cases an + explicit ``copy()`` is recommended. + The standard rules of sequence slicing apply to basic slicing on a per-dimension basis (including using a step index). Some useful concepts to remember include: diff --git a/doc/source/reference/arrays.ndarray.rst b/doc/source/reference/arrays.ndarray.rst index 98a811d35..5e685f25c 100644 --- a/doc/source/reference/arrays.ndarray.rst +++ b/doc/source/reference/arrays.ndarray.rst @@ -82,10 +82,12 @@ Indexing arrays Arrays can be indexed using an extended Python slicing syntax, ``array[selection]``. Similar syntax is also used for accessing -fields in a :ref:`structured array <arrays.dtypes.field>`. +fields in a :term:`structured data type`. .. seealso:: :ref:`Array Indexing <arrays.indexing>`. +.. _memory-layout: + Internal memory layout of an ndarray ==================================== @@ -127,7 +129,7 @@ strided scheme, and correspond to memory that can be *addressed* by the strides: where :math:`d_j` `= self.shape[j]`. Both the C and Fortran orders are :term:`contiguous`, *i.e.,* -:term:`single-segment`, memory layouts, in which every part of the +single-segment, memory layouts, in which every part of the memory block can be accessed by some combination of the indices. While a C-style and Fortran-style contiguous array, which has the corresponding @@ -143,14 +145,15 @@ different. This can happen in two cases: considered C-style and Fortran-style contiguous. Point 1. means that ``self`` and ``self.squeeze()`` always have the same -contiguity and :term:`aligned` flags value. This also means that even a high -dimensional array could be C-style and Fortran-style contiguous at the same -time. +contiguity and ``aligned`` flags value. This also means +that even a high dimensional array could be C-style and Fortran-style +contiguous at the same time. .. index:: aligned An array is considered aligned if the memory offsets for all elements and the -base offset itself is a multiple of `self.itemsize`. +base offset itself is a multiple of `self.itemsize`. Understanding +`memory-alignment` leads to better performance on most hardware. .. note:: @@ -441,7 +444,7 @@ Each of the arithmetic operations (``+``, ``-``, ``*``, ``/``, ``//``, ``%``, ``divmod()``, ``**`` or ``pow()``, ``<<``, ``>>``, ``&``, ``^``, ``|``, ``~``) and the comparisons (``==``, ``<``, ``>``, ``<=``, ``>=``, ``!=``) is equivalent to the corresponding -:term:`universal function` (or :term:`ufunc` for short) in NumPy. For +universal function (or :term:`ufunc` for short) in NumPy. For more information, see the section on :ref:`Universal Functions <ufuncs>`. diff --git a/doc/source/reference/arrays.scalars.rst b/doc/source/reference/arrays.scalars.rst index 9c4f05f75..d27d61e2c 100644 --- a/doc/source/reference/arrays.scalars.rst +++ b/doc/source/reference/arrays.scalars.rst @@ -177,7 +177,7 @@ Any Python object: .. note:: - The data actually stored in :term:`object arrays <object array>` + The data actually stored in object arrays (*i.e.*, arrays having dtype :class:`object_`) are references to Python objects, not the objects themselves. Hence, object arrays behave more like usual Python :class:`lists <list>`, in the sense @@ -188,8 +188,10 @@ Any Python object: on item access, but instead returns the actual object that the array item refers to. -The following data types are :term:`flexible`. They have no predefined -size: the data they describe can be of different length in different +.. index:: flexible + +The following data types are **flexible**: they have no predefined +size and the data they describe can be of different length in different arrays. (In the character codes ``#`` is an integer denoting how many elements the data type consists of.) diff --git a/doc/source/reference/c-api.array.rst b/doc/source/reference/c-api.array.rst index 44d09a9fe..7d6942f75 100644 --- a/doc/source/reference/c-api.array.rst +++ b/doc/source/reference/c-api.array.rst @@ -219,7 +219,7 @@ From scratch If *data* is ``NULL``, then new unitinialized memory will be allocated and *flags* can be non-zero to indicate a Fortran-style contiguous array. Use - :c:ref:`PyArray_FILLWBYTE` to initialze the memory. + :c:func:`PyArray_FILLWBYTE` to initialze the memory. If *data* is not ``NULL``, then it is assumed to point to the memory to be used for the array and the *flags* argument is used as the @@ -510,6 +510,11 @@ From other objects :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_WRITEABLE` \| :c:data:`NPY_ARRAY_ALIGNED` + .. c:var:: NPY_ARRAY_OUT_ARRAY + + :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_ALIGNED` \| + :c:data:`NPY_ARRAY_WRITEABLE` + .. c:var:: NPY_ARRAY_OUT_FARRAY :c:data:`NPY_ARRAY_F_CONTIGUOUS` \| :c:data:`NPY_ARRAY_WRITEABLE` \| @@ -573,8 +578,9 @@ From other objects return NULL; } if (arr == NULL) { + /* ... validate/change dtype, validate flags, ndim, etc ... - // Could make custom strides here too + Could make custom strides here too */ arr = PyArray_NewFromDescr(&PyArray_Type, dtype, ndim, dims, NULL, fortran ? NPY_ARRAY_F_CONTIGUOUS : 0, @@ -588,10 +594,14 @@ From other objects } } else { + /* ... in this case the other parameters weren't filled, just validate and possibly copy arr itself ... + */ } + /* ... use arr ... + */ .. c:function:: PyObject* PyArray_CheckFromAny( \ PyObject* op, PyArray_Descr* dtype, int min_depth, int max_depth, \ @@ -784,7 +794,7 @@ From other objects PyObject* obj, int typenum, int requirements) Combination of :c:func:`PyArray_FROM_OF` and :c:func:`PyArray_FROM_OT` - allowing both a *typenum* and a *flags* argument to be provided.. + allowing both a *typenum* and a *flags* argument to be provided. .. c:function:: PyObject* PyArray_FROMANY( \ PyObject* obj, int typenum, int min, int max, int requirements) @@ -816,17 +826,17 @@ Dealing with types General check of Python Type ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -.. c:function:: PyArray_Check(op) +.. c:function:: PyArray_Check(PyObject *op) Evaluates true if *op* is a Python object whose type is a sub-type of :c:data:`PyArray_Type`. -.. c:function:: PyArray_CheckExact(op) +.. c:function:: PyArray_CheckExact(PyObject *op) Evaluates true if *op* is a Python object with type :c:data:`PyArray_Type`. -.. c:function:: PyArray_HasArrayInterface(op, out) +.. c:function:: PyArray_HasArrayInterface(PyObject *op, PyObject *out) If ``op`` implements any part of the array interface, then ``out`` will contain a new reference to the newly created ndarray using @@ -2660,22 +2670,22 @@ cost of a slight overhead. .. code-block:: c - PyArrayIterObject \*iter; - PyArrayNeighborhoodIterObject \*neigh_iter; + PyArrayIterObject *iter; + PyArrayNeighborhoodIterObject *neigh_iter; iter = PyArray_IterNew(x); - //For a 3x3 kernel + /*For a 3x3 kernel */ bounds = {-1, 1, -1, 1}; neigh_iter = (PyArrayNeighborhoodIterObject*)PyArrayNeighborhoodIter_New( iter, bounds, NPY_NEIGHBORHOOD_ITER_ZERO_PADDING, NULL); for(i = 0; i < iter->size; ++i) { for (j = 0; j < neigh_iter->size; ++j) { - // Walk around the item currently pointed by iter->dataptr + /* Walk around the item currently pointed by iter->dataptr */ PyArrayNeighborhoodIter_Next(neigh_iter); } - // Move to the next point of iter + /* Move to the next point of iter */ PyArrayIter_Next(iter); PyArrayNeighborhoodIter_Reset(neigh_iter); } @@ -3256,19 +3266,19 @@ Memory management Macros to allocate, free, and reallocate memory. These macros are used internally to create arrays. -.. c:function:: npy_intp* PyDimMem_NEW(nd) +.. c:function:: npy_intp* PyDimMem_NEW(int nd) -.. c:function:: PyDimMem_FREE(npy_intp* ptr) +.. c:function:: PyDimMem_FREE(char* ptr) -.. c:function:: npy_intp* PyDimMem_RENEW(npy_intp* ptr, npy_intp newnd) +.. c:function:: npy_intp* PyDimMem_RENEW(void* ptr, size_t newnd) Macros to allocate, free, and reallocate dimension and strides memory. -.. c:function:: PyArray_malloc(nbytes) +.. c:function:: void* PyArray_malloc(size_t nbytes) -.. c:function:: PyArray_free(ptr) +.. c:function:: PyArray_free(void* ptr) -.. c:function:: PyArray_realloc(ptr, nbytes) +.. c:function:: void* PyArray_realloc(npy_intp* ptr, size_t nbytes) These macros use different memory allocators, depending on the constant :c:data:`NPY_USE_PYMEM`. The system malloc is used when @@ -3470,31 +3480,31 @@ Other constants Miscellaneous Macros ^^^^^^^^^^^^^^^^^^^^ -.. c:function:: PyArray_SAMESHAPE(a1, a2) +.. c:function:: PyArray_SAMESHAPE(PyArrayObject *a1, PyArrayObject *a2) Evaluates as True if arrays *a1* and *a2* have the same shape. -.. c:function:: PyArray_MAX(a,b) +.. c:macro:: PyArray_MAX(a,b) Returns the maximum of *a* and *b*. If (*a*) or (*b*) are expressions they are evaluated twice. -.. c:function:: PyArray_MIN(a,b) +.. c:macro:: PyArray_MIN(a,b) Returns the minimum of *a* and *b*. If (*a*) or (*b*) are expressions they are evaluated twice. -.. c:function:: PyArray_CLT(a,b) +.. c:macro:: PyArray_CLT(a,b) -.. c:function:: PyArray_CGT(a,b) +.. c:macro:: PyArray_CGT(a,b) -.. c:function:: PyArray_CLE(a,b) +.. c:macro:: PyArray_CLE(a,b) -.. c:function:: PyArray_CGE(a,b) +.. c:macro:: PyArray_CGE(a,b) -.. c:function:: PyArray_CEQ(a,b) +.. c:macro:: PyArray_CEQ(a,b) -.. c:function:: PyArray_CNE(a,b) +.. c:macro:: PyArray_CNE(a,b) Implements the complex comparisons between two complex numbers (structures with a real and imag member) using NumPy's definition diff --git a/doc/source/reference/c-api.dtype.rst b/doc/source/reference/c-api.dtype.rst index 9ac46b284..72e908861 100644 --- a/doc/source/reference/c-api.dtype.rst +++ b/doc/source/reference/c-api.dtype.rst @@ -308,13 +308,45 @@ to the front of the integer name. (unsigned) char -.. c:type:: npy_(u)short +.. c:type:: npy_short - (unsigned) short + short -.. c:type:: npy_(u)int +.. c:type:: npy_ushort - (unsigned) int + unsigned short + +.. c:type:: npy_uint + + unsigned int + +.. c:type:: npy_int + + int + +.. c:type:: npy_int16 + + 16-bit integer + +.. c:type:: npy_uint16 + + 16-bit unsigned integer + +.. c:type:: npy_int32 + + 32-bit integer + +.. c:type:: npy_uint32 + + 32-bit unsigned integer + +.. c:type:: npy_int64 + + 64-bit integer + +.. c:type:: npy_uint64 + + 64-bit unsigned integer .. c:type:: npy_(u)long @@ -324,22 +356,31 @@ to the front of the integer name. (unsigned long long int) -.. c:type:: npy_(u)intp +.. c:type:: npy_intp - (unsigned) Py_intptr_t (an integer that is the size of a pointer on + Py_intptr_t (an integer that is the size of a pointer on + the platform). + +.. c:type:: npy_uintp + + unsigned Py_intptr_t (an integer that is the size of a pointer on the platform). (Complex) Floating point ^^^^^^^^^^^^^^^^^^^^^^^^ +.. c:type:: npy_half + + 16-bit float + .. c:type:: npy_(c)float - float + 32-bit float .. c:type:: npy_(c)double - double + 64-bit double .. c:type:: npy_(c)longdouble diff --git a/doc/source/reference/c-api.types-and-structures.rst b/doc/source/reference/c-api.types-and-structures.rst index f04d65ee1..f411ebc44 100644 --- a/doc/source/reference/c-api.types-and-structures.rst +++ b/doc/source/reference/c-api.types-and-structures.rst @@ -203,14 +203,17 @@ PyArrayDescr_Type char kind; char type; char byteorder; - char unused; - int flags; + char flags; int type_num; int elsize; int alignment; PyArray_ArrayDescr *subarray; PyObject *fields; + PyObject *names; PyArray_ArrFuncs *f; + PyObject *metadata; + NpyAuxData *c_metadata; + npy_hash_t hash; } PyArray_Descr; .. c:member:: PyTypeObject *PyArray_Descr.typeobj @@ -242,7 +245,7 @@ PyArrayDescr_Type endian), '=' (native), '\|' (irrelevant, ignore). All builtin data- types have byteorder '='. -.. c:member:: int PyArray_Descr.flags +.. c:member:: char PyArray_Descr.flags A data-type bit-flag that determines if the data-type exhibits object- array like behavior. Each bit in this member is a flag which are named @@ -377,6 +380,11 @@ PyArrayDescr_Type normally a Python string. These tuples are placed in this dictionary keyed by name (and also title if given). +.. c:member:: PyObject *PyArray_Descr.names + + An ordered tuple of field names. It is NULL if no field is + defined. + .. c:member:: PyArray_ArrFuncs *PyArray_Descr.f A pointer to a structure containing functions that the type needs @@ -384,6 +392,20 @@ PyArrayDescr_Type thing as the universal functions (ufuncs) described later. Their signatures can vary arbitrarily. +.. c:member:: PyObject *PyArray_Descr.metadata + + Metadata about this dtype. + +.. c:member:: NpyAuxData *PyArray_Descr.c_metadata + + Metadata specific to the C implementation + of the particular dtype. Added for NumPy 1.7.0. + +.. c:member:: Npy_hash_t *PyArray_Descr.hash + + Currently unused. Reserved for future use in caching + hash values. + .. c:type:: PyArray_ArrFuncs Functions implementing internal features. Not all of these diff --git a/doc/source/reference/maskedarray.baseclass.rst b/doc/source/reference/maskedarray.baseclass.rst index 17a0a940d..204ebfe08 100644 --- a/doc/source/reference/maskedarray.baseclass.rst +++ b/doc/source/reference/maskedarray.baseclass.rst @@ -49,11 +49,11 @@ The :class:`MaskedArray` class .. class:: MaskedArray - A subclass of :class:`~numpy.ndarray` designed to manipulate numerical arrays with missing data. +A subclass of :class:`~numpy.ndarray` designed to manipulate numerical arrays with missing data. - An instance of :class:`MaskedArray` can be thought as the combination of several elements: +An instance of :class:`MaskedArray` can be thought as the combination of several elements: * The :attr:`~MaskedArray.data`, as a regular :class:`numpy.ndarray` of any shape or datatype (the data). * A boolean :attr:`~numpy.ma.MaskedArray.mask` with the same shape as the data, where a ``True`` value indicates that the corresponding element of the data is invalid. @@ -62,89 +62,26 @@ The :class:`MaskedArray` class +.. _ma-attributes: + Attributes and properties of masked arrays ------------------------------------------ .. seealso:: :ref:`Array Attributes <arrays.ndarray.attributes>` +.. autoattribute:: MaskedArray.data -.. attribute:: MaskedArray.data - - Returns the underlying data, as a view of the masked array. - If the underlying data is a subclass of :class:`numpy.ndarray`, it is - returned as such. - - >>> x = ma.array(np.matrix([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]]) - >>> x.data - matrix([[1, 2], - [3, 4]]) - - The type of the data can be accessed through the :attr:`baseclass` - attribute. - -.. attribute:: MaskedArray.mask - - Returns the underlying mask, as an array with the same shape and structure - as the data, but where all fields are atomically booleans. - A value of ``True`` indicates an invalid entry. - - -.. attribute:: MaskedArray.recordmask - - Returns the mask of the array if it has no named fields. For structured - arrays, returns a ndarray of booleans where entries are ``True`` if **all** - the fields are masked, ``False`` otherwise:: - - >>> x = ma.array([(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)], - ... mask=[(0, 0), (1, 0), (1, 1), (0, 1), (0, 0)], - ... dtype=[('a', int), ('b', int)]) - >>> x.recordmask - array([False, False, True, False, False]) - - -.. attribute:: MaskedArray.fill_value - - Returns the value used to fill the invalid entries of a masked array. - The value is either a scalar (if the masked array has no named fields), - or a 0-D ndarray with the same :attr:`dtype` as the masked array if it has - named fields. - - The default filling value depends on the datatype of the array: - - ======== ======== - datatype default - ======== ======== - bool True - int 999999 - float 1.e20 - complex 1.e20+0j - object '?' - string 'N/A' - ======== ======== - - - -.. attribute:: MaskedArray.baseclass - - Returns the class of the underlying data. - - >>> x = ma.array(np.matrix([[1, 2], [3, 4]]), mask=[[0, 0], [1, 0]]) - >>> x.baseclass - <class 'numpy.matrixlib.defmatrix.matrix'> - - -.. attribute:: MaskedArray.sharedmask +.. autoattribute:: MaskedArray.mask - Returns whether the mask of the array is shared between several masked arrays. - If this is the case, any modification to the mask of one array will be - propagated to the others. +.. autoattribute:: MaskedArray.recordmask +.. autoattribute:: MaskedArray.fill_value -.. attribute:: MaskedArray.hardmask +.. autoattribute:: MaskedArray.baseclass - Returns whether the mask is hard (``True``) or soft (``False``). - When the mask is hard, masked entries cannot be unmasked. +.. autoattribute:: MaskedArray.sharedmask +.. autoattribute:: MaskedArray.hardmask As :class:`MaskedArray` is a subclass of :class:`~numpy.ndarray`, a masked array also inherits all the attributes and properties of a :class:`~numpy.ndarray` instance. diff --git a/doc/source/reference/routines.ma.rst b/doc/source/reference/routines.ma.rst index 15f2ba0a4..491bb6bff 100644 --- a/doc/source/reference/routines.ma.rst +++ b/doc/source/reference/routines.ma.rst @@ -68,9 +68,6 @@ Inspecting the array ma.is_masked ma.is_mask - ma.MaskedArray.data - ma.MaskedArray.mask - ma.MaskedArray.recordmask ma.MaskedArray.all ma.MaskedArray.any @@ -80,6 +77,12 @@ Inspecting the array ma.size +.. autosummary:: + + ma.MaskedArray.data + ma.MaskedArray.mask + ma.MaskedArray.recordmask + _____ Manipulating a MaskedArray @@ -285,8 +288,10 @@ Filling a masked array ma.MaskedArray.get_fill_value ma.MaskedArray.set_fill_value - ma.MaskedArray.fill_value +.. autosummary:: + + ma.MaskedArray.fill_value _____ diff --git a/doc/source/reference/routines.testing.rst b/doc/source/reference/routines.testing.rst index 77c046768..c676dec07 100644 --- a/doc/source/reference/routines.testing.rst +++ b/doc/source/reference/routines.testing.rst @@ -1,5 +1,3 @@ -.. _numpy-testing: - .. module:: numpy.testing Test Support (:mod:`numpy.testing`) diff --git a/doc/source/reference/ufuncs.rst b/doc/source/reference/ufuncs.rst index 3cc956887..e2981b40e 100644 --- a/doc/source/reference/ufuncs.rst +++ b/doc/source/reference/ufuncs.rst @@ -16,7 +16,7 @@ A universal function (or :term:`ufunc` for short) is a function that operates on :class:`ndarrays <ndarray>` in an element-by-element fashion, supporting :ref:`array broadcasting <ufuncs.broadcasting>`, :ref:`type casting <ufuncs.casting>`, and several other standard features. That -is, a ufunc is a ":term:`vectorized`" wrapper for a function that +is, a ufunc is a ":term:`vectorized <vectorization>`" wrapper for a function that takes a fixed number of specific inputs and produces a fixed number of specific outputs. @@ -59,7 +59,7 @@ understood by four rules: entry in that dimension will be used for all calculations along that dimension. In other words, the stepping machinery of the :term:`ufunc` will simply not step along that dimension (the - :term:`stride` will be 0 for that dimension). + :ref:`stride <memory-layout>` will be 0 for that dimension). Broadcasting is used throughout NumPy to decide how to handle disparately shaped arrays; for example, all arithmetic operations (``+``, @@ -70,7 +70,7 @@ arrays before operation. .. index:: broadcastable -A set of arrays is called ":term:`broadcastable`" to the same shape if +A set of arrays is called "broadcastable" to the same shape if the above rules produce a valid result, *i.e.*, one of the following is true: diff --git a/doc/source/release.rst b/doc/source/release.rst index e9334cb34..8c8af6834 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -3,6 +3,7 @@ Release Notes ************* .. include:: ../release/1.17.0-notes.rst +.. include:: ../release/1.16.2-notes.rst .. include:: ../release/1.16.1-notes.rst .. include:: ../release/1.16.0-notes.rst .. include:: ../release/1.15.4-notes.rst diff --git a/doc/source/user/c-info.how-to-extend.rst b/doc/source/user/c-info.how-to-extend.rst index 9738168d2..b70036c89 100644 --- a/doc/source/user/c-info.how-to-extend.rst +++ b/doc/source/user/c-info.how-to-extend.rst @@ -362,8 +362,7 @@ specific builtin data-type ( *e.g.* float), while specifying a particular set of requirements ( *e.g.* contiguous, aligned, and writeable). The syntax is -.. c:function:: PyObject *PyArray_FROM_OTF( \ - PyObject* obj, int typenum, int requirements) +:c:func:`PyArray_FROM_OTF` Return an ndarray from any Python object, *obj*, that can be converted to an array. The number of dimensions in the returned @@ -446,31 +445,23 @@ writeable). The syntax is flags most commonly needed are :c:data:`NPY_ARRAY_IN_ARRAY`, :c:data:`NPY_OUT_ARRAY`, and :c:data:`NPY_ARRAY_INOUT_ARRAY`: - .. c:var:: NPY_ARRAY_IN_ARRAY + :c:data:`NPY_ARRAY_IN_ARRAY` - Equivalent to :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| - :c:data:`NPY_ARRAY_ALIGNED`. This combination of flags is useful - for arrays that must be in C-contiguous order and aligned. - These kinds of arrays are usually input arrays for some - algorithm. + This flag is useful for arrays that must be in C-contiguous + order and aligned. These kinds of arrays are usually input + arrays for some algorithm. - .. c:var:: NPY_ARRAY_OUT_ARRAY + :c:data:`NPY_ARRAY_OUT_ARRAY` - Equivalent to :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| - :c:data:`NPY_ARRAY_ALIGNED` \| :c:data:`NPY_ARRAY_WRITEABLE`. This - combination of flags is useful to specify an array that is + This flag is useful to specify an array that is in C-contiguous order, is aligned, and can be written to as well. Such an array is usually returned as output (although normally such output arrays are created from scratch). - .. c:var:: NPY_ARRAY_INOUT_ARRAY + :c:data:`NPY_ARRAY_INOUT_ARRAY` - Equivalent to :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| - :c:data:`NPY_ARRAY_ALIGNED` \| :c:data:`NPY_ARRAY_WRITEABLE` \| - :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` \| - :c:data:`NPY_ARRAY_UPDATEIFCOPY`. This combination of flags is - useful to specify an array that will be used for both + This flag is useful to specify an array that will be used for both input and output. :c:func:`PyArray_ResolveWritebackIfCopy` must be called before :func:`Py_DECREF` at the end of the interface routine to write back the temporary data @@ -487,16 +478,16 @@ writeable). The syntax is Other useful flags that can be OR'd as additional requirements are: - .. c:var:: NPY_ARRAY_FORCECAST + :c:data:`NPY_ARRAY_FORCECAST` Cast to the desired type, even if it can't be done without losing information. - .. c:var:: NPY_ARRAY_ENSURECOPY + :c:data:`NPY_ARRAY_ENSURECOPY` Make sure the resulting array is a copy of the original. - .. c:var:: NPY_ARRAY_ENSUREARRAY + :c:data:`NPY_ARRAY_ENSUREARRAY` Make sure the resulting object is an actual ndarray and not a sub- class. @@ -523,7 +514,7 @@ creation functions go through this heavily re-used code. Because of its flexibility, it can be somewhat confusing to use. As a result, simpler forms exist that are easier to use. -.. c:function:: PyObject *PyArray_SimpleNew(int nd, npy_intp* dims, int typenum) +:c:func:`PyArray_SimpleNew` This function allocates new memory and places it in an ndarray with *nd* dimensions whose shape is determined by the array of @@ -535,8 +526,7 @@ simpler forms exist that are easier to use. memory for the array can be set to zero if desired using :c:func:`PyArray_FILLWBYTE` (return_object, 0). -.. c:function:: PyObject *PyArray_SimpleNewFromData( \ - int nd, npy_intp* dims, int typenum, void* data) +:c:func:`PyArray_SimpleNewFromData` Sometimes, you want to wrap memory allocated elsewhere into an ndarray object for downstream use. This routine makes it diff --git a/doc/source/user/numpy-for-matlab-users.rst b/doc/source/user/numpy-for-matlab-users.rst index 16ee48c5e..e53d1ca45 100644 --- a/doc/source/user/numpy-for-matlab-users.rst +++ b/doc/source/user/numpy-for-matlab-users.rst @@ -436,7 +436,7 @@ Linear Algebra Equivalents ``a`` * - ``rand(3,4)`` - - ``random.rand(3,4)`` + - ``random.rand(3,4)`` or ``random.random_sample((3, 4))`` - random 3x4 matrix * - ``linspace(1,3,4)`` diff --git a/doc/source/user/quickstart.rst b/doc/source/user/quickstart.rst index 5ef8b145f..262f53c1c 100644 --- a/doc/source/user/quickstart.rst +++ b/doc/source/user/quickstart.rst @@ -884,6 +884,17 @@ The ``copy`` method makes a complete copy of the array and its data. [ 8, 10, 10, 11]]) +Sometimes ``copy`` should be called after slicing if the original array is not required anymore. +For example, suppose ``a`` is a huge intermediate result and the final result ``b`` only contains +a small fraction of ``a``, a deep copy should be made when constructing ``b`` with slicing:: + + >>> a = np.arange(int(1e8)) + >>> b = a[:100].copy() + >>> del a # the memory of ``a`` can be released. + +If ``b = a[:100]`` is used instead, ``a`` is referenced by ``b`` and will persist in memory +even if ``del a`` is executed. + Functions and Methods Overview ------------------------------ diff --git a/doc/source/user/whatisnumpy.rst b/doc/source/user/whatisnumpy.rst index d47c105ba..abaa2bfed 100644 --- a/doc/source/user/whatisnumpy.rst +++ b/doc/source/user/whatisnumpy.rst @@ -91,6 +91,11 @@ idiom is even simpler! This last example illustrates two of NumPy's features which are the basis of much of its power: vectorization and broadcasting. +.. _whatis-vectorization: + +Why is NumPy Fast? +------------------ + Vectorization describes the absence of any explicit looping, indexing, etc., in the code - these things are taking place, of course, just "behind the scenes" in optimized, pre-compiled C code. Vectorized @@ -120,8 +125,13 @@ the shape of the larger in such a way that the resulting broadcast is unambiguous. For detailed "rules" of broadcasting see `numpy.doc.broadcasting`. +Who Else Uses NumPy? +-------------------- + NumPy fully supports an object-oriented approach, starting, once again, with `ndarray`. For example, `ndarray` is a class, possessing numerous methods and attributes. Many of its methods are mirrored by functions in the outer-most NumPy namespace, allowing the programmer -to code in whichever paradigm they prefer. +to code in whichever paradigm they prefer. This flexibility has allowed the +NumPy array dialect and NumPy `ndarray` class to become the *de-facto* language +of multi-dimensional data interchange used in Python. diff --git a/doc/sphinxext b/doc/sphinxext -Subproject de21addd6560576151938a4bc59543d55ce6f08 +Subproject a482f66913c1079d7439770f0119b55376bb1b8 diff --git a/numpy/_build_utils/src/apple_sgemv_fix.c b/numpy/_build_utils/src/apple_sgemv_fix.c index c33c68992..b1dbeb681 100644 --- a/numpy/_build_utils/src/apple_sgemv_fix.c +++ b/numpy/_build_utils/src/apple_sgemv_fix.c @@ -102,7 +102,7 @@ using_mavericks() __attribute__((destructor)) static void unloadlib(void) { - if (veclib) dlclose(veclib); + if (veclib) dlclose(veclib); } __attribute__((constructor)) @@ -224,30 +224,30 @@ void cblas_sgemv(const enum CBLAS_ORDER order, const float *X, const int incX, const float beta, float *Y, const int incY) { - char TA; - if (order == CblasColMajor) - { - if (TransA == CblasNoTrans) TA = 'N'; - else if (TransA == CblasTrans) TA = 'T'; - else if (TransA == CblasConjTrans) TA = 'C'; - else - { - cblas_xerbla(2, "cblas_sgemv","Illegal TransA setting, %d\n", TransA); - } - sgemv_(&TA, &M, &N, &alpha, A, &lda, X, &incX, &beta, Y, &incY); - } - else if (order == CblasRowMajor) - { - if (TransA == CblasNoTrans) TA = 'T'; - else if (TransA == CblasTrans) TA = 'N'; - else if (TransA == CblasConjTrans) TA = 'N'; - else - { - cblas_xerbla(2, "cblas_sgemv", "Illegal TransA setting, %d\n", TransA); - return; - } - sgemv_(&TA, &N, &M, &alpha, A, &lda, X, &incX, &beta, Y, &incY); - } - else - cblas_xerbla(1, "cblas_sgemv", "Illegal Order setting, %d\n", order); + char TA; + if (order == CblasColMajor) + { + if (TransA == CblasNoTrans) TA = 'N'; + else if (TransA == CblasTrans) TA = 'T'; + else if (TransA == CblasConjTrans) TA = 'C'; + else + { + cblas_xerbla(2, "cblas_sgemv","Illegal TransA setting, %d\n", TransA); + } + sgemv_(&TA, &M, &N, &alpha, A, &lda, X, &incX, &beta, Y, &incY); + } + else if (order == CblasRowMajor) + { + if (TransA == CblasNoTrans) TA = 'T'; + else if (TransA == CblasTrans) TA = 'N'; + else if (TransA == CblasConjTrans) TA = 'N'; + else + { + cblas_xerbla(2, "cblas_sgemv", "Illegal TransA setting, %d\n", TransA); + return; + } + sgemv_(&TA, &N, &M, &alpha, A, &lda, X, &incX, &beta, Y, &incY); + } + else + cblas_xerbla(1, "cblas_sgemv", "Illegal Order setting, %d\n", order); } diff --git a/numpy/compat/py3k.py b/numpy/compat/py3k.py index 141a21fed..c9ed9d52c 100644 --- a/numpy/compat/py3k.py +++ b/numpy/compat/py3k.py @@ -11,6 +11,7 @@ __all__ = ['bytes', 'asbytes', 'isfileobj', 'getexception', 'strchar', 'pickle', 'contextlib_nullcontext', 'os_fspath', 'os_PathLike'] import sys +import os try: from pathlib import Path, PurePath except ImportError: @@ -173,7 +174,6 @@ else: """ import imp - import os if info is None: path = os.path.dirname(fn) fo, fn, info = imp.find_module(name, [path]) @@ -195,7 +195,6 @@ else: # Backport os.fs_path, os.PathLike, and PurePath.__fspath__ if sys.version_info[:2] >= (3, 6): - import os os_fspath = os.fspath os_PathLike = os.PathLike else: diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py index 2f5a48ed8..cd2a6ce4e 100644 --- a/numpy/core/_add_newdocs.py +++ b/numpy/core/_add_newdocs.py @@ -161,62 +161,63 @@ add_newdoc('numpy.core', 'nditer', ---------- op : ndarray or sequence of array_like The array(s) to iterate over. + flags : sequence of str, optional - Flags to control the behavior of the iterator. + Flags to control the behavior of the iterator. - * "buffered" enables buffering when required. - * "c_index" causes a C-order index to be tracked. - * "f_index" causes a Fortran-order index to be tracked. - * "multi_index" causes a multi-index, or a tuple of indices + * ``buffered`` enables buffering when required. + * ``c_index`` causes a C-order index to be tracked. + * ``f_index`` causes a Fortran-order index to be tracked. + * ``multi_index`` causes a multi-index, or a tuple of indices with one per iteration dimension, to be tracked. - * "common_dtype" causes all the operands to be converted to + * ``common_dtype`` causes all the operands to be converted to a common data type, with copying or buffering as necessary. - * "copy_if_overlap" causes the iterator to determine if read + * ``copy_if_overlap`` causes the iterator to determine if read operands have overlap with write operands, and make temporary copies as necessary to avoid overlap. False positives (needless copying) are possible in some cases. - * "delay_bufalloc" delays allocation of the buffers until - a reset() call is made. Allows "allocate" operands to + * ``delay_bufalloc`` delays allocation of the buffers until + a reset() call is made. Allows ``allocate`` operands to be initialized before their values are copied into the buffers. - * "external_loop" causes the `values` given to be + * ``external_loop`` causes the ``values`` given to be one-dimensional arrays with multiple values instead of zero-dimensional arrays. - * "grow_inner" allows the `value` array sizes to be made - larger than the buffer size when both "buffered" and - "external_loop" is used. - * "ranged" allows the iterator to be restricted to a sub-range + * ``grow_inner`` allows the ``value`` array sizes to be made + larger than the buffer size when both ``buffered`` and + ``external_loop`` is used. + * ``ranged`` allows the iterator to be restricted to a sub-range of the iterindex values. - * "refs_ok" enables iteration of reference types, such as + * ``refs_ok`` enables iteration of reference types, such as object arrays. - * "reduce_ok" enables iteration of "readwrite" operands + * ``reduce_ok`` enables iteration of ``readwrite`` operands which are broadcasted, also known as reduction operands. - * "zerosize_ok" allows `itersize` to be zero. + * ``zerosize_ok`` allows `itersize` to be zero. op_flags : list of list of str, optional - This is a list of flags for each operand. At minimum, one of - "readonly", "readwrite", or "writeonly" must be specified. - - * "readonly" indicates the operand will only be read from. - * "readwrite" indicates the operand will be read from and written to. - * "writeonly" indicates the operand will only be written to. - * "no_broadcast" prevents the operand from being broadcasted. - * "contig" forces the operand data to be contiguous. - * "aligned" forces the operand data to be aligned. - * "nbo" forces the operand data to be in native byte order. - * "copy" allows a temporary read-only copy if required. - * "updateifcopy" allows a temporary read-write copy if required. - * "allocate" causes the array to be allocated if it is None - in the `op` parameter. - * "no_subtype" prevents an "allocate" operand from using a subtype. - * "arraymask" indicates that this operand is the mask to use + This is a list of flags for each operand. At minimum, one of + ``readonly``, ``readwrite``, or ``writeonly`` must be specified. + + * ``readonly`` indicates the operand will only be read from. + * ``readwrite`` indicates the operand will be read from and written to. + * ``writeonly`` indicates the operand will only be written to. + * ``no_broadcast`` prevents the operand from being broadcasted. + * ``contig`` forces the operand data to be contiguous. + * ``aligned`` forces the operand data to be aligned. + * ``nbo`` forces the operand data to be in native byte order. + * ``copy`` allows a temporary read-only copy if required. + * ``updateifcopy`` allows a temporary read-write copy if required. + * ``allocate`` causes the array to be allocated if it is None + in the ``op`` parameter. + * ``no_subtype`` prevents an ``allocate`` operand from using a subtype. + * ``arraymask`` indicates that this operand is the mask to use for selecting elements when writing to operands with the 'writemasked' flag set. The iterator does not enforce this, but when writing from a buffer back to the array, it only copies those elements indicated by this mask. - * 'writemasked' indicates that only elements where the chosen - 'arraymask' operand is True will be written to. - * "overlap_assume_elementwise" can be used to mark operands that are + * ``writemasked`` indicates that only elements where the chosen + ``arraymask`` operand is True will be written to. + * ``overlap_assume_elementwise`` can be used to mark operands that are accessed only in the iterator order, to allow less conservative - copying when "copy_if_overlap" is present. + copying when ``copy_if_overlap`` is present. op_dtypes : dtype or tuple of dtype(s), optional The required data type(s) of the operands. If copying or buffering is enabled, the data will be converted to/from their original types. @@ -225,7 +226,7 @@ add_newdoc('numpy.core', 'nditer', Fortran order, 'A' means 'F' order if all the arrays are Fortran contiguous, 'C' order otherwise, and 'K' means as close to the order the array elements appear in memory as possible. This also - affects the element memory order of "allocate" operands, as they + affects the element memory order of ``allocate`` operands, as they are allocated to be compatible with iteration order. Default is 'K'. casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional @@ -233,20 +234,20 @@ add_newdoc('numpy.core', 'nditer', or buffering. Setting this to 'unsafe' is not recommended, as it can adversely affect accumulations. - * 'no' means the data types should not be cast at all. - * 'equiv' means only byte-order changes are allowed. - * 'safe' means only casts which can preserve values are allowed. - * 'same_kind' means only safe casts or casts within a kind, - like float64 to float32, are allowed. - * 'unsafe' means any data conversions may be done. + * 'no' means the data types should not be cast at all. + * 'equiv' means only byte-order changes are allowed. + * 'safe' means only casts which can preserve values are allowed. + * 'same_kind' means only safe casts or casts within a kind, + like float64 to float32, are allowed. + * 'unsafe' means any data conversions may be done. op_axes : list of list of ints, optional If provided, is a list of ints or None for each operands. The list of axes for an operand is a mapping from the dimensions of the iterator to the dimensions of the operand. A value of -1 can be placed for entries, causing that dimension to be - treated as "newaxis". + treated as `newaxis`. itershape : tuple of ints, optional - The desired shape of the iterator. This allows "allocate" operands + The desired shape of the iterator. This allows ``allocate`` operands with a dimension mapped by op_axes not corresponding to a dimension of a different operand to get a value not equal to 1 for that dimension. @@ -263,19 +264,19 @@ add_newdoc('numpy.core', 'nditer', finished : bool Whether the iteration over the operands is finished or not. has_delayed_bufalloc : bool - If True, the iterator was created with the "delay_bufalloc" flag, + If True, the iterator was created with the ``delay_bufalloc`` flag, and no reset() function was called on it yet. has_index : bool - If True, the iterator was created with either the "c_index" or - the "f_index" flag, and the property `index` can be used to + If True, the iterator was created with either the ``c_index`` or + the ``f_index`` flag, and the property `index` can be used to retrieve it. has_multi_index : bool - If True, the iterator was created with the "multi_index" flag, + If True, the iterator was created with the ``multi_index`` flag, and the property `multi_index` can be used to retrieve it. index - When the "c_index" or "f_index" flag was used, this property + When the ``c_index`` or ``f_index`` flag was used, this property provides access to the index. Raises a ValueError if accessed - and `has_index` is False. + and ``has_index`` is False. iterationneedsapi : bool Whether iteration requires access to the Python API, for example if one of the operands is an object array. @@ -288,11 +289,11 @@ add_newdoc('numpy.core', 'nditer', and optimized iterator access pattern. Valid only before the iterator is closed. multi_index - When the "multi_index" flag was used, this property + When the ``multi_index`` flag was used, this property provides access to the index. Raises a ValueError if accessed - accessed and `has_multi_index` is False. + accessed and ``has_multi_index`` is False. ndim : int - The iterator's dimension. + The dimensions of the iterator. nop : int The number of iterator operands. operands : tuple of operand(s) @@ -301,8 +302,8 @@ add_newdoc('numpy.core', 'nditer', shape : tuple of ints Shape tuple, the shape of the iterator. value - Value of `operands` at current iteration. Normally, this is a - tuple of array scalars, but if the flag "external_loop" is used, + Value of ``operands`` at current iteration. Normally, this is a + tuple of array scalars, but if the flag ``external_loop`` is used, it is a tuple of one dimensional arrays. Notes @@ -313,12 +314,12 @@ add_newdoc('numpy.core', 'nditer', The Python exposure supplies two iteration interfaces, one which follows the Python iterator protocol, and another which mirrors the C-style do-while pattern. The native Python approach is better in most cases, but - if you need the iterator's coordinates or index, use the C-style pattern. + if you need the coordinates or index of an iterator, use the C-style pattern. Examples -------- Here is how we might write an ``iter_add`` function, using the - Python iterator protocol:: + Python iterator protocol: >>> def iter_add_py(x, y, out=None): ... addop = np.add @@ -329,7 +330,7 @@ add_newdoc('numpy.core', 'nditer', ... addop(a, b, out=c) ... return it.operands[2] - Here is the same function, but following the C-style pattern:: + Here is the same function, but following the C-style pattern: >>> def iter_add(x, y, out=None): ... addop = np.add @@ -341,7 +342,7 @@ add_newdoc('numpy.core', 'nditer', ... it.iternext() ... return it.operands[2] - Here is an example outer product function:: + Here is an example outer product function: >>> def outer_it(x, y, out=None): ... mulop = np.multiply @@ -361,7 +362,7 @@ add_newdoc('numpy.core', 'nditer', array([[1, 2, 3], [2, 4, 6]]) - Here is an example function which operates like a "lambda" ufunc:: + Here is an example function which operates like a "lambda" ufunc: >>> def luf(lamdaexpr, *args, **kwargs): ... '''luf(lambdaexpr, op1, ..., opn, out=None, order='K', casting='safe', buffersize=0)''' @@ -2031,21 +2032,27 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('ctypes', as well as documented private attributes): .. autoattribute:: numpy.core._internal._ctypes.data + :noindex: .. autoattribute:: numpy.core._internal._ctypes.shape + :noindex: .. autoattribute:: numpy.core._internal._ctypes.strides + :noindex: .. automethod:: numpy.core._internal._ctypes.data_as + :noindex: .. automethod:: numpy.core._internal._ctypes.shape_as + :noindex: .. automethod:: numpy.core._internal._ctypes.strides_as + :noindex: If the ctypes module is not available, then the ctypes attribute of array objects still returns something useful, but ctypes objects are not returned and errors may be raised instead. In particular, - the object will still have the as parameter attribute which will + the object will still have the ``as_parameter`` attribute which will return an integer equal to the data attribute. Examples @@ -2443,8 +2450,9 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('strides', add_newdoc('numpy.core.multiarray', 'ndarray', ('T', """ - Same as self.transpose(), except that self is returned if - self.ndim < 2. + The transposed array. + + Same as ``self.transpose()``. Examples -------- @@ -2461,6 +2469,10 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('T', >>> x.T array([ 1., 2., 3., 4.]) + See Also + -------- + transpose + """)) @@ -4861,7 +4873,7 @@ add_newdoc('numpy.core', 'ufunc', ('reduce', out : ndarray, None, or tuple of ndarray and None, optional A location into which the result is stored. If not provided or `None`, a freshly-allocated array is returned. For consistency with - :ref:`ufunc.__call__`, if given as a keyword, this may be wrapped in a + ``ufunc.__call__``, if given as a keyword, this may be wrapped in a 1-element tuple. .. versionchanged:: 1.13.0 @@ -4978,7 +4990,7 @@ add_newdoc('numpy.core', 'ufunc', ('accumulate', out : ndarray, None, or tuple of ndarray and None, optional A location into which the result is stored. If not provided or `None`, a freshly-allocated array is returned. For consistency with - :ref:`ufunc.__call__`, if given as a keyword, this may be wrapped in a + ``ufunc.__call__``, if given as a keyword, this may be wrapped in a 1-element tuple. .. versionchanged:: 1.13.0 @@ -5060,7 +5072,7 @@ add_newdoc('numpy.core', 'ufunc', ('reduceat', out : ndarray, None, or tuple of ndarray and None, optional A location into which the result is stored. If not provided or `None`, a freshly-allocated array is returned. For consistency with - :ref:`ufunc.__call__`, if given as a keyword, this may be wrapped in a + ``ufunc.__call__``, if given as a keyword, this may be wrapped in a 1-element tuple. .. versionchanged:: 1.13.0 diff --git a/numpy/core/_exceptions.py b/numpy/core/_exceptions.py index 5e0105beb..227c08cd6 100644 --- a/numpy/core/_exceptions.py +++ b/numpy/core/_exceptions.py @@ -5,8 +5,6 @@ in python where it's easier. By putting the formatting in `__str__`, we also avoid paying the cost for users who silence the exceptions. """ -from numpy.core.overrides import set_module - def _unpack_tuple(tup): if len(tup) == 1: diff --git a/numpy/core/_methods.py b/numpy/core/_methods.py index 51362c761..953e7e1b8 100644 --- a/numpy/core/_methods.py +++ b/numpy/core/_methods.py @@ -115,10 +115,11 @@ def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # Note that x may not be inexact and that we need it to be an array, # not a scalar. x = asanyarray(arr - arrmean) - if issubclass(arr.dtype.type, nt.complexfloating): - x = um.multiply(x, um.conjugate(x), out=x).real - else: + if issubclass(arr.dtype.type, (nt.floating, nt.integer)): x = um.multiply(x, x, out=x) + else: + x = um.multiply(x, um.conjugate(x), out=x).real + ret = umr_sum(x, axis, dtype, out, keepdims) # Compute degrees of freedom and make sure it is not negative. diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py index 687a8467b..de0bb81fe 100644 --- a/numpy/core/code_generators/generate_umath.py +++ b/numpy/core/code_generators/generate_umath.py @@ -827,7 +827,7 @@ defdict = { Ufunc(1, 1, None, docstrings.get('numpy.core.umath.isnan'), None, - TD(inexact, out='?'), + TD(nodatetime_or_obj, out='?'), ), 'isnat': Ufunc(1, 1, None, @@ -839,13 +839,13 @@ defdict = { Ufunc(1, 1, None, docstrings.get('numpy.core.umath.isinf'), None, - TD(inexact, out='?'), + TD(nodatetime_or_obj, out='?'), ), 'isfinite': Ufunc(1, 1, None, docstrings.get('numpy.core.umath.isfinite'), None, - TD(inexact, out='?'), + TD(nodatetime_or_obj, out='?'), ), 'signbit': Ufunc(1, 1, None, diff --git a/numpy/core/code_generators/ufunc_docstrings.py b/numpy/core/code_generators/ufunc_docstrings.py index 6dd6982df..7591a7952 100644 --- a/numpy/core/code_generators/ufunc_docstrings.py +++ b/numpy/core/code_generators/ufunc_docstrings.py @@ -1313,7 +1313,7 @@ add_newdoc('numpy.core.umath', 'floor_divide', """ Return the largest integer smaller or equal to the division of the inputs. It is equivalent to the Python ``//`` operator and pairs with the - Python ``%`` (`remainder`), function so that ``b = a % b + b * (a // b)`` + Python ``%`` (`remainder`), function so that ``a = a % b + b * (a // b)`` up to roundoff. Parameters @@ -2607,7 +2607,7 @@ add_newdoc('numpy.core.umath', 'matmul', >>> a = np.array([[1, 0], ... [0, 1]]) - >>> b = np.array([[4, 1], + >>> b = np.array([[4, 1], ... [2, 2]]) >>> np.matmul(a, b) array([[4, 1], diff --git a/numpy/core/defchararray.py b/numpy/core/defchararray.py index 007fc6186..3fd7d14c4 100644 --- a/numpy/core/defchararray.py +++ b/numpy/core/defchararray.py @@ -29,7 +29,7 @@ from numpy.compat import asbytes, long import numpy __all__ = [ - 'chararray', 'equal', 'not_equal', 'greater_equal', 'less_equal', + 'equal', 'not_equal', 'greater_equal', 'less_equal', 'greater', 'less', 'str_len', 'add', 'multiply', 'mod', 'capitalize', 'center', 'count', 'decode', 'encode', 'endswith', 'expandtabs', 'find', 'index', 'isalnum', 'isalpha', 'isdigit', 'islower', 'isspace', diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index cdb6c4bed..cb10c3947 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -52,17 +52,20 @@ def _wrapit(obj, method, *args, **kwds): def _wrapfunc(obj, method, *args, **kwds): - try: - return getattr(obj, method)(*args, **kwds) - - # An AttributeError occurs if the object does not have - # such a method in its class. + bound = getattr(obj, method, None) + if bound is None: + return _wrapit(obj, method, *args, **kwds) - # A TypeError occurs if the object does have such a method - # in its class, but its signature is not identical to that - # of NumPy's. This situation has occurred in the case of - # a downstream library like 'pandas'. - except (AttributeError, TypeError): + try: + return bound(*args, **kwds) + except TypeError: + # A TypeError occurs if the object does have such a method in its + # class, but its signature is not identical to that of NumPy's. This + # situation has occurred in the case of a downstream library like + # 'pandas'. + # + # Call _wrapit from within the except clause to ensure a potential + # exception has a traceback chain. return _wrapit(obj, method, *args, **kwds) @@ -127,7 +130,8 @@ def take(a, indices, axis=None, out=None, mode='raise'): input array is used. out : ndarray, optional (Ni..., Nj..., Nk...) If provided, the result will be placed in this array. It should - be of the appropriate shape and dtype. + be of the appropriate shape and dtype. Note that `out` is always + buffered if `mode='raise'`; use other modes for better performance. mode : {'raise', 'wrap', 'clip'}, optional Specifies how out-of-bounds indices will behave. @@ -352,7 +356,8 @@ def choose(a, choices, out=None, mode='raise'): ``choices.shape[0]``) is taken as defining the "sequence". out : array, optional If provided, the result will be inserted into this array. It should - be of the appropriate shape and dtype. + be of the appropriate shape and dtype. Note that `out` is always + buffered if `mode='raise'`; use other modes for better performance. mode : {'raise' (default), 'wrap', 'clip'}, optional Specifies how indices outside `[0, n-1]` will be treated: @@ -509,7 +514,8 @@ def put(a, ind, v, mode='raise'): 'clip' mode means that all indices that are too large are replaced by the index that addresses the last element along that axis. Note - that this disables indexing with negative numbers. + that this disables indexing with negative numbers. In 'raise' mode, + if an exception occurs the target array may still be modified. See Also -------- @@ -912,6 +918,7 @@ def sort(a, axis=-1, kind='quicksort', order=None): data types. .. versionadded:: 1.17.0 + Timsort is added for better performance on already or nearly sorted data. On random data timsort is almost identical to mergesort. It is now used for stable sort while quicksort is still the diff --git a/numpy/core/function_base.py b/numpy/core/function_base.py index f8800b83e..296213823 100644 --- a/numpy/core/function_base.py +++ b/numpy/core/function_base.py @@ -6,7 +6,7 @@ import operator from . import numeric as _nx from .numeric import (result_type, NaN, shares_memory, MAY_SHARE_BOUNDS, - TooHardError, asanyarray) + TooHardError, asanyarray, ndim) from numpy.core.multiarray import add_docstring from numpy.core import overrides @@ -140,7 +140,7 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, dtype = dt delta = stop - start - y = _nx.arange(0, num, dtype=dt).reshape((-1,) + (1,) * delta.ndim) + y = _nx.arange(0, num, dtype=dt).reshape((-1,) + (1,) * ndim(delta)) # In-place multiplication y *= delta/div is faster, but prevents the multiplicant # from overriding what class is produced, and thus prevents, e.g. use of Quantities, # see gh-7142. Hence, we multiply in place only for standard scalar types. diff --git a/numpy/core/getlimits.py b/numpy/core/getlimits.py index 544b8b35f..31fa6b9bf 100644 --- a/numpy/core/getlimits.py +++ b/numpy/core/getlimits.py @@ -505,6 +505,7 @@ class iinfo(object): if self.kind not in 'iu': raise ValueError("Invalid integer data type %r." % (self.kind,)) + @property def min(self): """Minimum value of given dtype.""" if self.kind == 'u': @@ -517,8 +518,7 @@ class iinfo(object): iinfo._min_vals[self.key] = val return val - min = property(min) - + @property def max(self): """Maximum value of given dtype.""" try: @@ -531,8 +531,6 @@ class iinfo(object): iinfo._max_vals[self.key] = val return val - max = property(max) - def __str__(self): """String representation.""" fmt = ( diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py index 386049410..42fee4eb7 100644 --- a/numpy/core/numeric.py +++ b/numpy/core/numeric.py @@ -1203,20 +1203,18 @@ def _tensordot_dispatcher(a, b, axes=None): @array_function_dispatch(_tensordot_dispatcher) def tensordot(a, b, axes=2): """ - Compute tensor dot product along specified axes for arrays >= 1-D. + Compute tensor dot product along specified axes. - Given two tensors (arrays of dimension greater than or equal to one), - `a` and `b`, and an array_like object containing two array_like - objects, ``(a_axes, b_axes)``, sum the products of `a`'s and `b`'s - elements (components) over the axes specified by ``a_axes`` and - ``b_axes``. The third argument can be a single non-negative - integer_like scalar, ``N``; if it is such, then the last ``N`` - dimensions of `a` and the first ``N`` dimensions of `b` are summed - over. + Given two tensors, `a` and `b`, and an array_like object containing + two array_like objects, ``(a_axes, b_axes)``, sum the products of + `a`'s and `b`'s elements (components) over the axes specified by + ``a_axes`` and ``b_axes``. The third argument can be a single non-negative + integer_like scalar, ``N``; if it is such, then the last ``N`` dimensions + of `a` and the first ``N`` dimensions of `b` are summed over. Parameters ---------- - a, b : array_like, len(shape) >= 1 + a, b : array_like Tensors to "dot". axes : int or (2,) array_like diff --git a/numpy/core/overrides.py b/numpy/core/overrides.py index 46097f39c..9f91adc83 100644 --- a/numpy/core/overrides.py +++ b/numpy/core/overrides.py @@ -1,7 +1,6 @@ """Implementation of __array_function__ overrides from NEP-18.""" import collections import functools -import os from numpy.core._multiarray_umath import ( add_docstring, implement_array_function, _get_implementing_args) diff --git a/numpy/core/records.py b/numpy/core/records.py index 42aca5b60..2adcdae61 100644 --- a/numpy/core/records.py +++ b/numpy/core/records.py @@ -610,9 +610,7 @@ def fromarrays(arrayList, dtype=None, shape=None, formats=None, # and determine the formats. formats = [] for obj in arrayList: - if not isinstance(obj, ndarray): - raise ValueError("item in the array list must be an ndarray.") - formats.append(obj.dtype.str) + formats.append(obj.dtype) if dtype is not None: descr = sb.dtype(dtype) @@ -768,7 +766,7 @@ def fromfile(fd, dtype=None, shape=None, offset=0, formats=None, >>> r.shape (10,) """ - + if dtype is None and formats is None: raise TypeError("fromfile() needs a 'dtype' or 'formats' argument") diff --git a/numpy/core/shape_base.py b/numpy/core/shape_base.py index 08e07bb66..0eac772e8 100644 --- a/numpy/core/shape_base.py +++ b/numpy/core/shape_base.py @@ -5,7 +5,6 @@ __all__ = ['atleast_1d', 'atleast_2d', 'atleast_3d', 'block', 'hstack', import functools import operator -import types import warnings from . import numeric as _nx @@ -347,9 +346,9 @@ def stack(arrays, axis=0, out=None): """ Join a sequence of arrays along a new axis. - The `axis` parameter specifies the index of the new axis in the dimensions - of the result. For example, if ``axis=0`` it will be the first dimension - and if ``axis=-1`` it will be the last dimension. + The ``axis`` parameter specifies the index of the new axis in the + dimensions of the result. For example, if ``axis=0`` it will be the first + dimension and if ``axis=-1`` it will be the last dimension. .. versionadded:: 1.10.0 @@ -357,8 +356,10 @@ def stack(arrays, axis=0, out=None): ---------- arrays : sequence of array_like Each array must have the same shape. + axis : int, optional The axis in the result array along which the input arrays are stacked. + out : ndarray, optional If provided, the destination to place the result. The shape must be correct, matching that of what stack would have returned if no diff --git a/numpy/core/src/common/array_assign.c b/numpy/core/src/common/array_assign.c index 02a423e3a..0ac1b01c6 100644 --- a/numpy/core/src/common/array_assign.c +++ b/numpy/core/src/common/array_assign.c @@ -79,7 +79,7 @@ broadcast_error: { Py_DECREF(errmsg); return -1; - } + } } /* See array_assign.h for parameter documentation */ diff --git a/numpy/core/src/multiarray/array_assign_array.c b/numpy/core/src/multiarray/array_assign_array.c index 6e31fd3f7..7ff33ebd7 100644 --- a/numpy/core/src/multiarray/array_assign_array.c +++ b/numpy/core/src/multiarray/array_assign_array.c @@ -420,14 +420,14 @@ PyArray_AssignArray(PyArrayObject *dst, PyArrayObject *src, /* A straightforward where-masked assignment */ /* Do the masked assignment with raw array iteration */ - if (raw_array_wheremasked_assign_array( - PyArray_NDIM(dst), PyArray_DIMS(dst), - PyArray_DESCR(dst), PyArray_DATA(dst), PyArray_STRIDES(dst), - PyArray_DESCR(src), PyArray_DATA(src), src_strides, - PyArray_DESCR(wheremask), PyArray_DATA(wheremask), - wheremask_strides) < 0) { - goto fail; - } + if (raw_array_wheremasked_assign_array( + PyArray_NDIM(dst), PyArray_DIMS(dst), + PyArray_DESCR(dst), PyArray_DATA(dst), PyArray_STRIDES(dst), + PyArray_DESCR(src), PyArray_DATA(src), src_strides, + PyArray_DESCR(wheremask), PyArray_DATA(wheremask), + wheremask_strides) < 0) { + goto fail; + } } if (copied_src) { diff --git a/numpy/core/src/multiarray/common.c b/numpy/core/src/multiarray/common.c index addb67732..52694d491 100644 --- a/numpy/core/src/multiarray/common.c +++ b/numpy/core/src/multiarray/common.c @@ -343,7 +343,7 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims, typestr = PyDict_GetItemString(ip, "typestr"); #if defined(NPY_PY3K) /* Allow unicode type strings */ - if (PyUnicode_Check(typestr)) { + if (typestr && PyUnicode_Check(typestr)) { tmp = PyUnicode_AsASCIIString(typestr); typestr = tmp; } diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c index 2da274658..1e0c1eb5f 100644 --- a/numpy/core/src/multiarray/compiled_base.c +++ b/numpy/core/src/multiarray/compiled_base.c @@ -1198,7 +1198,7 @@ arr_unravel_index(PyObject *self, PyObject *args, PyObject *kwds) * if "dims" is detected in keywords, then replace it with * the new "shape" argument and continue processing as usual. */ - if (kwds) { + if (kwds) { PyObject *dims_item, *shape_item; dims_item = PyDict_GetItemString(kwds, "dims"); shape_item = PyDict_GetItemString(kwds, "shape"); diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c index b2e329d45..42cd31069 100644 --- a/numpy/core/src/multiarray/ctors.c +++ b/numpy/core/src/multiarray/ctors.c @@ -2484,7 +2484,7 @@ PyArray_FromInterface(PyObject *origin) } #endif /* Get offset number from interface specification */ - attr = PyDict_GetItemString(origin, "offset"); + attr = PyDict_GetItemString(iface, "offset"); if (attr) { npy_longlong num = PyLong_AsLongLong(attr); if (error_converting(num)) { @@ -3693,32 +3693,12 @@ PyArray_FromBuffer(PyObject *buf, PyArray_Descr *type, Py_DECREF(type); return NULL; } - if (Py_TYPE(buf)->tp_as_buffer == NULL -#if defined(NPY_PY3K) - || Py_TYPE(buf)->tp_as_buffer->bf_getbuffer == NULL -#else - || (Py_TYPE(buf)->tp_as_buffer->bf_getwritebuffer == NULL - && Py_TYPE(buf)->tp_as_buffer->bf_getreadbuffer == NULL) -#endif - ) { - PyObject *newbuf; - newbuf = PyObject_GetAttr(buf, npy_ma_str_buffer); - if (newbuf == NULL) { - Py_DECREF(type); - return NULL; - } - buf = newbuf; - } - else { - Py_INCREF(buf); - } #if defined(NPY_PY3K) if (PyObject_GetBuffer(buf, &view, PyBUF_WRITABLE|PyBUF_SIMPLE) < 0) { writeable = 0; PyErr_Clear(); if (PyObject_GetBuffer(buf, &view, PyBUF_SIMPLE) < 0) { - Py_DECREF(buf); Py_DECREF(type); return NULL; } @@ -3738,7 +3718,6 @@ PyArray_FromBuffer(PyObject *buf, PyArray_Descr *type, writeable = 0; PyErr_Clear(); if (PyObject_AsReadBuffer(buf, (void *)&data, &ts) == -1) { - Py_DECREF(buf); Py_DECREF(type); return NULL; } @@ -3749,7 +3728,6 @@ PyArray_FromBuffer(PyObject *buf, PyArray_Descr *type, PyErr_Format(PyExc_ValueError, "offset must be non-negative and no greater than buffer "\ "length (%" NPY_INTP_FMT ")", (npy_intp)ts); - Py_DECREF(buf); Py_DECREF(type); return NULL; } @@ -3763,7 +3741,6 @@ PyArray_FromBuffer(PyObject *buf, PyArray_Descr *type, PyErr_SetString(PyExc_ValueError, "buffer size must be a multiple"\ " of element size"); - Py_DECREF(buf); Py_DECREF(type); return NULL; } @@ -3774,7 +3751,6 @@ PyArray_FromBuffer(PyObject *buf, PyArray_Descr *type, PyErr_SetString(PyExc_ValueError, "buffer is smaller than requested"\ " size"); - Py_DECREF(buf); Py_DECREF(type); return NULL; } @@ -3784,7 +3760,6 @@ PyArray_FromBuffer(PyObject *buf, PyArray_Descr *type, &PyArray_Type, type, 1, &n, NULL, data, NPY_ARRAY_DEFAULT, NULL, buf); - Py_DECREF(buf); if (ret == NULL) { return NULL; } diff --git a/numpy/core/src/multiarray/datetime.c b/numpy/core/src/multiarray/datetime.c index 54d19d993..f1e4feac2 100644 --- a/numpy/core/src/multiarray/datetime.c +++ b/numpy/core/src/multiarray/datetime.c @@ -409,6 +409,26 @@ PyArray_TimedeltaStructToTimedelta(NPY_DATETIMEUNIT fr, npy_timedeltastruct *d) } /* + * Computes the python `ret, d = divmod(d, unit)`. + * + * Note that GCC is smart enough at -O2 to eliminate the `if(*d < 0)` branch + * for subsequent calls to this command - it is able to deduce that `*d >= 0`. + */ +static inline +npy_int64 extract_unit(npy_datetime *d, npy_datetime unit) { + assert(unit > 0); + npy_int64 div = *d / unit; + npy_int64 mod = *d % unit; + if (mod < 0) { + mod += unit; + div -= 1; + } + assert(mod >= 0); + *d = mod; + return div; +} + +/* * Converts a datetime based on the given metadata into a datetimestruct */ NPY_NO_EXPORT int @@ -451,14 +471,8 @@ convert_datetime_to_datetimestruct(PyArray_DatetimeMetaData *meta, break; case NPY_FR_M: - if (dt >= 0) { - out->year = 1970 + dt / 12; - out->month = dt % 12 + 1; - } - else { - out->year = 1969 + (dt + 1) / 12; - out->month = 12 + (dt + 1)% 12; - } + out->year = 1970 + extract_unit(&dt, 12); + out->month = dt + 1; break; case NPY_FR_W: @@ -473,169 +487,100 @@ convert_datetime_to_datetimestruct(PyArray_DatetimeMetaData *meta, case NPY_FR_h: perday = 24LL; - if (dt >= 0) { - set_datetimestruct_days(dt / perday, out); - dt = dt % perday; - } - else { - set_datetimestruct_days((dt - (perday-1)) / perday, out); - dt = (perday-1) + (dt + 1) % perday; - } + set_datetimestruct_days(extract_unit(&dt, perday), out); out->hour = (int)dt; break; case NPY_FR_m: perday = 24LL * 60; - if (dt >= 0) { - set_datetimestruct_days(dt / perday, out); - dt = dt % perday; - } - else { - set_datetimestruct_days((dt - (perday-1)) / perday, out); - dt = (perday-1) + (dt + 1) % perday; - } - out->hour = (int)(dt / 60); - out->min = (int)(dt % 60); + set_datetimestruct_days(extract_unit(&dt, perday), out); + out->hour = (int)extract_unit(&dt, 60); + out->min = (int)dt; break; case NPY_FR_s: perday = 24LL * 60 * 60; - if (dt >= 0) { - set_datetimestruct_days(dt / perday, out); - dt = dt % perday; - } - else { - set_datetimestruct_days((dt - (perday-1)) / perday, out); - dt = (perday-1) + (dt + 1) % perday; - } - out->hour = (int)(dt / (60*60)); - out->min = (int)((dt / 60) % 60); - out->sec = (int)(dt % 60); + set_datetimestruct_days(extract_unit(&dt, perday), out); + out->hour = (int)extract_unit(&dt, 60*60); + out->min = (int)extract_unit(&dt, 60); + out->sec = (int)dt; break; case NPY_FR_ms: perday = 24LL * 60 * 60 * 1000; - if (dt >= 0) { - set_datetimestruct_days(dt / perday, out); - dt = dt % perday; - } - else { - set_datetimestruct_days((dt - (perday-1)) / perday, out); - dt = (perday-1) + (dt + 1) % perday; - } - out->hour = (int)(dt / (60*60*1000LL)); - out->min = (int)((dt / (60*1000LL)) % 60); - out->sec = (int)((dt / 1000LL) % 60); - out->us = (int)((dt % 1000LL) * 1000); + set_datetimestruct_days(extract_unit(&dt, perday), out); + out->hour = (int)extract_unit(&dt, 1000LL*60*60); + out->min = (int)extract_unit(&dt, 1000LL*60); + out->sec = (int)extract_unit(&dt, 1000LL); + out->us = (int)(dt * 1000); break; case NPY_FR_us: perday = 24LL * 60LL * 60LL * 1000LL * 1000LL; - - if (dt >= 0) { - set_datetimestruct_days(dt / perday, out); - dt = dt % perday; - } - else { - set_datetimestruct_days((dt - (perday-1)) / perday, out); - dt = (perday-1) + (dt + 1) % perday; - } - out->hour = (int)(dt / (60*60*1000000LL)); - out->min = (int)((dt / (60*1000000LL)) % 60); - out->sec = (int)((dt / 1000000LL) % 60); - out->us = (int)(dt % 1000000LL); + set_datetimestruct_days(extract_unit(&dt, perday), out); + out->hour = (int)extract_unit(&dt, 1000LL*1000*60*60); + out->min = (int)extract_unit(&dt, 1000LL*1000*60); + out->sec = (int)extract_unit(&dt, 1000LL*1000); + out->us = (int)dt; break; case NPY_FR_ns: perday = 24LL * 60LL * 60LL * 1000LL * 1000LL * 1000LL; - if (dt >= 0) { - set_datetimestruct_days(dt / perday, out); - dt = dt % perday; - } - else { - set_datetimestruct_days((dt - (perday-1)) / perday, out); - dt = (perday-1) + (dt + 1) % perday; - } - out->hour = (int)(dt / (60*60*1000000000LL)); - out->min = (int)((dt / (60*1000000000LL)) % 60); - out->sec = (int)((dt / 1000000000LL) % 60); - out->us = (int)((dt / 1000LL) % 1000000LL); - out->ps = (int)((dt % 1000LL) * 1000); + set_datetimestruct_days(extract_unit(&dt, perday), out); + out->hour = (int)extract_unit(&dt, 1000LL*1000*1000*60*60); + out->min = (int)extract_unit(&dt, 1000LL*1000*1000*60); + out->sec = (int)extract_unit(&dt, 1000LL*1000*1000); + out->us = (int)extract_unit(&dt, 1000LL); + out->ps = (int)(dt * 1000); break; case NPY_FR_ps: perday = 24LL * 60 * 60 * 1000 * 1000 * 1000 * 1000; - if (dt >= 0) { - set_datetimestruct_days(dt / perday, out); - dt = dt % perday; - } - else { - set_datetimestruct_days((dt - (perday-1)) / perday, out); - dt = (perday-1) + (dt + 1) % perday; - } - out->hour = (int)(dt / (60*60*1000000000000LL)); - out->min = (int)((dt / (60*1000000000000LL)) % 60); - out->sec = (int)((dt / 1000000000000LL) % 60); - out->us = (int)((dt / 1000000LL) % 1000000LL); - out->ps = (int)(dt % 1000000LL); + set_datetimestruct_days(extract_unit(&dt, perday), out); + out->hour = (int)extract_unit(&dt, 1000LL*1000*1000*1000*60*60); + out->min = (int)extract_unit(&dt, 1000LL*1000*1000*1000*60); + out->sec = (int)extract_unit(&dt, 1000LL*1000*1000*1000); + out->us = (int)extract_unit(&dt, 1000LL*1000); + out->ps = (int)(dt); break; case NPY_FR_fs: /* entire range is only +- 2.6 hours */ - if (dt >= 0) { - out->hour = (int)(dt / (60*60*1000000000000000LL)); - out->min = (int)((dt / (60*1000000000000000LL)) % 60); - out->sec = (int)((dt / 1000000000000000LL) % 60); - out->us = (int)((dt / 1000000000LL) % 1000000LL); - out->ps = (int)((dt / 1000LL) % 1000000LL); - out->as = (int)((dt % 1000LL) * 1000); - } - else { - npy_datetime minutes; - - minutes = dt / (60*1000000000000000LL); - dt = dt % (60*1000000000000000LL); - if (dt < 0) { - dt += (60*1000000000000000LL); - --minutes; - } - /* Offset the negative minutes */ - add_minutes_to_datetimestruct(out, minutes); - out->sec = (int)((dt / 1000000000000000LL) % 60); - out->us = (int)((dt / 1000000000LL) % 1000000LL); - out->ps = (int)((dt / 1000LL) % 1000000LL); - out->as = (int)((dt % 1000LL) * 1000); - } + out->hour = (int)extract_unit(&dt, 1000LL*1000*1000*1000*1000*60*60); + if (out->hour < 0) { + out->year = 1969; + out->month = 12; + out->day = 31; + out->hour += 24; + assert(out->hour >= 0); + } + out->min = (int)extract_unit(&dt, 1000LL*1000*1000*1000*1000*60); + out->sec = (int)extract_unit(&dt, 1000LL*1000*1000*1000*1000); + out->us = (int)extract_unit(&dt, 1000LL*1000*1000); + out->ps = (int)extract_unit(&dt, 1000LL); + out->as = (int)(dt * 1000); break; case NPY_FR_as: /* entire range is only +- 9.2 seconds */ - if (dt >= 0) { - out->sec = (int)((dt / 1000000000000000000LL) % 60); - out->us = (int)((dt / 1000000000000LL) % 1000000LL); - out->ps = (int)((dt / 1000000LL) % 1000000LL); - out->as = (int)(dt % 1000000LL); - } - else { - npy_datetime seconds; - - seconds = dt / 1000000000000000000LL; - dt = dt % 1000000000000000000LL; - if (dt < 0) { - dt += 1000000000000000000LL; - --seconds; - } - /* Offset the negative seconds */ - add_seconds_to_datetimestruct(out, seconds); - out->us = (int)((dt / 1000000000000LL) % 1000000LL); - out->ps = (int)((dt / 1000000LL) % 1000000LL); - out->as = (int)(dt % 1000000LL); - } + out->sec = (int)extract_unit(&dt, 1000LL*1000*1000*1000*1000*1000); + if (out->sec < 0) { + out->year = 1969; + out->month = 12; + out->day = 31; + out->hour = 23; + out->min = 59; + out->sec += 60; + assert(out->sec >= 0); + } + out->us = (int)extract_unit(&dt, 1000LL*1000*1000*1000); + out->ps = (int)extract_unit(&dt, 1000LL*1000); + out->as = (int)dt; break; default: @@ -2468,6 +2413,9 @@ convert_pyobject_to_datetime(PyArray_DatetimeMetaData *meta, PyObject *obj, return -1; } *out = PyLong_AsLongLong(obj); + if (error_converting(*out)) { + return -1; + } return 0; } /* Datetime scalar */ @@ -2666,6 +2614,9 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj, } *out = PyLong_AsLongLong(obj); + if (error_converting(*out)) { + return -1; + } return 0; } /* Timedelta scalar */ @@ -2853,6 +2804,9 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj, } *out = PyLong_AsLongLong(obj); + if (error_converting(*out)) { + return -1; + } return 0; } else { diff --git a/numpy/core/src/multiarray/dtype_transfer.c b/numpy/core/src/multiarray/dtype_transfer.c index 3ab07ad31..a90416a40 100644 --- a/numpy/core/src/multiarray/dtype_transfer.c +++ b/numpy/core/src/multiarray/dtype_transfer.c @@ -2375,7 +2375,7 @@ get_subarray_transfer_function(int aligned, /* Get the subarray shapes and sizes */ if (PyDataType_HASSUBARRAY(src_dtype)) { - if (!(PyArray_IntpConverter(src_dtype->subarray->shape, + if (!(PyArray_IntpConverter(src_dtype->subarray->shape, &src_shape))) { PyErr_SetString(PyExc_ValueError, "invalid subarray shape"); @@ -2385,7 +2385,7 @@ get_subarray_transfer_function(int aligned, src_dtype = src_dtype->subarray->base; } if (PyDataType_HASSUBARRAY(dst_dtype)) { - if (!(PyArray_IntpConverter(dst_dtype->subarray->shape, + if (!(PyArray_IntpConverter(dst_dtype->subarray->shape, &dst_shape))) { npy_free_cache_dim_obj(src_shape); PyErr_SetString(PyExc_ValueError, diff --git a/numpy/core/src/multiarray/flagsobject.c b/numpy/core/src/multiarray/flagsobject.c index 85ea49fb4..e378f1133 100644 --- a/numpy/core/src/multiarray/flagsobject.c +++ b/numpy/core/src/multiarray/flagsobject.c @@ -147,7 +147,7 @@ _UpdateContiguousFlags(PyArrayObject *ap) if (PyArray_STRIDES(ap)[i] != sd) { is_c_contig = 0; break; - } + } /* contiguous, if it got this far */ if (dim == 0) { break; diff --git a/numpy/core/src/multiarray/hashdescr.c b/numpy/core/src/multiarray/hashdescr.c index 8465093b9..0b23b6c21 100644 --- a/numpy/core/src/multiarray/hashdescr.c +++ b/numpy/core/src/multiarray/hashdescr.c @@ -36,17 +36,17 @@ static int _array_descr_builtin(PyArray_Descr* descr, PyObject *l); */ static char _normalize_byteorder(char byteorder) { - switch(byteorder) { - case '=': - if (PyArray_GetEndianness() == NPY_CPU_BIG) { - return '>'; - } - else { - return '<'; - } - default: - return byteorder; - } + switch(byteorder) { + case '=': + if (PyArray_GetEndianness() == NPY_CPU_BIG) { + return '>'; + } + else { + return '<'; + } + default: + return byteorder; + } } /* diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c index 4888224f3..6065c1df4 100644 --- a/numpy/core/src/multiarray/item_selection.c +++ b/numpy/core/src/multiarray/item_selection.c @@ -98,6 +98,10 @@ PyArray_TakeFrom(PyArrayObject *self0, PyObject *indices0, int axis, goto fail; } + if (arrays_overlap(out, self)) { + flags |= NPY_ARRAY_ENSURECOPY; + } + if (clipmode == NPY_RAISE) { /* * we need to make sure and get a copy @@ -261,6 +265,7 @@ PyArray_PutTo(PyArrayObject *self, PyObject* values0, PyObject *indices0, npy_intp i, chunk, ni, max_item, nv, tmp; char *src, *dest; int copied = 0; + int overlap = 0; indices = NULL; values = NULL; @@ -274,24 +279,6 @@ PyArray_PutTo(PyArrayObject *self, PyObject* values0, PyObject *indices0, return NULL; } - if (!PyArray_ISCONTIGUOUS(self)) { - PyArrayObject *obj; - int flags = NPY_ARRAY_CARRAY | NPY_ARRAY_WRITEBACKIFCOPY; - - if (clipmode == NPY_RAISE) { - flags |= NPY_ARRAY_ENSURECOPY; - } - Py_INCREF(PyArray_DESCR(self)); - obj = (PyArrayObject *)PyArray_FromArray(self, - PyArray_DESCR(self), flags); - if (obj != self) { - copied = 1; - } - self = obj; - } - max_item = PyArray_SIZE(self); - dest = PyArray_DATA(self); - chunk = PyArray_DESCR(self)->elsize; indices = (PyArrayObject *)PyArray_ContiguousFromAny(indices0, NPY_INTP, 0, 0); if (indices == NULL) { @@ -308,6 +295,25 @@ PyArray_PutTo(PyArrayObject *self, PyObject* values0, PyObject *indices0, if (nv <= 0) { goto finish; } + + overlap = arrays_overlap(self, values) || arrays_overlap(self, indices); + if (overlap || !PyArray_ISCONTIGUOUS(self)) { + PyArrayObject *obj; + int flags = NPY_ARRAY_CARRAY | NPY_ARRAY_WRITEBACKIFCOPY | + NPY_ARRAY_ENSURECOPY; + + Py_INCREF(PyArray_DESCR(self)); + obj = (PyArrayObject *)PyArray_FromArray(self, + PyArray_DESCR(self), flags); + if (obj != self) { + copied = 1; + } + self = obj; + } + max_item = PyArray_SIZE(self); + dest = PyArray_DATA(self); + chunk = PyArray_DESCR(self)->elsize; + if (PyDataType_REFCHK(PyArray_DESCR(self))) { switch(clipmode) { case NPY_RAISE: @@ -434,10 +440,11 @@ PyArray_PutMask(PyArrayObject *self, PyObject* values0, PyObject* mask0) PyArray_FastPutmaskFunc *func; PyArrayObject *mask, *values; PyArray_Descr *dtype; - npy_intp i, j, chunk, ni, max_item, nv; + npy_intp i, j, chunk, ni, nv; char *src, *dest; npy_bool *mask_data; int copied = 0; + int overlap = 0; mask = NULL; values = NULL; @@ -447,29 +454,14 @@ PyArray_PutMask(PyArrayObject *self, PyObject* values0, PyObject* mask0) "be an array"); return NULL; } - if (!PyArray_ISCONTIGUOUS(self)) { - PyArrayObject *obj; - - dtype = PyArray_DESCR(self); - Py_INCREF(dtype); - obj = (PyArrayObject *)PyArray_FromArray(self, dtype, - NPY_ARRAY_CARRAY | NPY_ARRAY_WRITEBACKIFCOPY); - if (obj != self) { - copied = 1; - } - self = obj; - } - max_item = PyArray_SIZE(self); - dest = PyArray_DATA(self); - chunk = PyArray_DESCR(self)->elsize; mask = (PyArrayObject *)PyArray_FROM_OTF(mask0, NPY_BOOL, NPY_ARRAY_CARRAY | NPY_ARRAY_FORCECAST); if (mask == NULL) { goto fail; } ni = PyArray_SIZE(mask); - if (ni != max_item) { + if (ni != PyArray_SIZE(self)) { PyErr_SetString(PyExc_ValueError, "putmask: mask and data must be " "the same size"); @@ -491,6 +483,27 @@ PyArray_PutMask(PyArrayObject *self, PyObject* values0, PyObject* mask0) } src = PyArray_DATA(values); + overlap = arrays_overlap(self, values) || arrays_overlap(self, mask); + if (overlap || !PyArray_ISCONTIGUOUS(self)) { + int flags = NPY_ARRAY_CARRAY | NPY_ARRAY_WRITEBACKIFCOPY; + PyArrayObject *obj; + + if (overlap) { + flags |= NPY_ARRAY_ENSURECOPY; + } + + dtype = PyArray_DESCR(self); + Py_INCREF(dtype); + obj = (PyArrayObject *)PyArray_FromArray(self, dtype, flags); + if (obj != self) { + copied = 1; + } + self = obj; + } + + chunk = PyArray_DESCR(self)->elsize; + dest = PyArray_DATA(self); + if (PyDataType_REFCHK(PyArray_DESCR(self))) { for (i = 0, j = 0; i < ni; i++, j++) { if (j >= nv) { @@ -594,7 +607,8 @@ PyArray_Repeat(PyArrayObject *aop, PyObject *op, int axis) else { for (j = 0; j < n; j++) { if (counts[j] < 0) { - PyErr_SetString(PyExc_ValueError, "count < 0"); + PyErr_SetString(PyExc_ValueError, + "repeats may not contain negative values."); goto fail; } total += counts[j]; @@ -712,6 +726,13 @@ PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject *out, "choose: invalid shape for output array."); goto fail; } + + for (i = 0; i < n; i++) { + if (arrays_overlap(out, mps[i])) { + flags |= NPY_ARRAY_ENSURECOPY; + } + } + if (clipmode == NPY_RAISE) { /* * we need to make sure and get a copy @@ -2392,7 +2413,7 @@ PyArray_MultiIndexGetItem(PyArrayObject *self, npy_intp *multi_index) npy_intp ind = multi_index[idim]; if (check_and_adjust_index(&ind, shapevalue, idim, NULL) < 0) { - return NULL; + return NULL; } data += ind * strides[idim]; } diff --git a/numpy/core/src/multiarray/iterators.c b/numpy/core/src/multiarray/iterators.c index a3bc8e742..9fcdc91b2 100644 --- a/numpy/core/src/multiarray/iterators.c +++ b/numpy/core/src/multiarray/iterators.c @@ -539,6 +539,7 @@ iter_subscript(PyArrayIterObject *self, PyObject *ind) char *dptr; int size; PyObject *obj = NULL; + PyObject *new; PyArray_CopySwapFunc *copyswap; if (ind == Py_Ellipsis) { @@ -640,36 +641,36 @@ iter_subscript(PyArrayIterObject *self, PyObject *ind) obj = ind; } - if (PyArray_Check(obj)) { - /* Check for Boolean object */ - if (PyArray_TYPE((PyArrayObject *)obj) == NPY_BOOL) { - ret = iter_subscript_Bool(self, (PyArrayObject *)obj); - Py_DECREF(indtype); - } - /* Check for integer array */ - else if (PyArray_ISINTEGER((PyArrayObject *)obj)) { - PyObject *new; - new = PyArray_FromAny(obj, indtype, 0, 0, - NPY_ARRAY_FORCECAST | NPY_ARRAY_ALIGNED, NULL); - if (new == NULL) { - goto fail; - } - Py_DECREF(obj); - obj = new; - new = iter_subscript_int(self, (PyArrayObject *)obj); - Py_DECREF(obj); - return new; - } - else { - goto fail; - } + /* Any remaining valid input is an array or has been turned into one */ + if (!PyArray_Check(obj)) { + goto fail; + } + + /* Check for Boolean array */ + if (PyArray_TYPE((PyArrayObject *)obj) == NPY_BOOL) { + ret = iter_subscript_Bool(self, (PyArrayObject *)obj); + Py_DECREF(indtype); Py_DECREF(obj); return (PyObject *)ret; } - else { - Py_DECREF(indtype); + + /* Only integer arrays left */ + if (!PyArray_ISINTEGER((PyArrayObject *)obj)) { + goto fail; } + Py_INCREF(indtype); + new = PyArray_FromAny(obj, indtype, 0, 0, + NPY_ARRAY_FORCECAST | NPY_ARRAY_ALIGNED, NULL); + if (new == NULL) { + goto fail; + } + Py_DECREF(indtype); + Py_DECREF(obj); + ret = (PyArrayObject *)iter_subscript_int(self, (PyArrayObject *)new); + Py_DECREF(new); + return (PyObject *)ret; + fail: if (!PyErr_Occurred()) { diff --git a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src index 16bacf1ab..63b2a8842 100644 --- a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src +++ b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src @@ -417,32 +417,31 @@ PyArray_GetStridedCopyFn(int aligned, npy_intp src_stride, #if !NPY_USE_UNALIGNED_ACCESS } else { - /* contiguous dst */ - if (itemsize != 0 && dst_stride == itemsize) { - /* contiguous src */ - if (itemsize != 0 && src_stride == itemsize) { - return &_contig_to_contig; - } - /* general src */ - else { - switch (itemsize) { - case 1: - return &_aligned_strided_to_contig_size1; + if (itemsize != 0) { + if (dst_stride == itemsize) { + /* contiguous dst */ + if (src_stride == itemsize) { + /* contiguous src, dst */ + return &_contig_to_contig; + } + else { + /* general src */ + switch (itemsize) { + case 1: + return &_aligned_strided_to_contig_size1; /**begin repeat * #elsize = 2, 4, 8, 16# */ - case @elsize@: - return &_strided_to_contig_size@elsize@; + case @elsize@: + return &_strided_to_contig_size@elsize@; /**end repeat**/ + } } - } - return &_strided_to_strided; - } - /* general dst */ - else { - /* contiguous src */ - if (itemsize != 0 && src_stride == itemsize) { + return &_strided_to_strided; + } + else if (src_stride == itemsize) { + /* contiguous src, general dst */ switch (itemsize) { case 1: return &_aligned_contig_to_strided_size1; @@ -456,18 +455,18 @@ PyArray_GetStridedCopyFn(int aligned, npy_intp src_stride, return &_strided_to_strided; } - /* general src */ - else { - switch (itemsize) { - case 1: - return &_aligned_strided_to_strided_size1; + } + else { + /* general src, dst */ + switch (itemsize) { + case 1: + return &_aligned_strided_to_strided_size1; /**begin repeat * #elsize = 2, 4, 8, 16# */ - case @elsize@: - return &_strided_to_strided_size@elsize@; + case @elsize@: + return &_strided_to_strided_size@elsize@; /**end repeat**/ - } } } } @@ -592,7 +591,7 @@ NPY_NO_EXPORT PyArray_StridedUnaryOp * /* contiguous dst */ if (itemsize != 0 && dst_stride == itemsize) { /* contiguous src */ - if (itemsize != 0 && src_stride == itemsize) { + if (src_stride == itemsize) { switch (itemsize) { /**begin repeat1 * #elsize = 2, 4, 8, 16# diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c index 17edd2bbf..10206c03e 100644 --- a/numpy/core/src/multiarray/mapping.c +++ b/numpy/core/src/multiarray/mapping.c @@ -710,26 +710,26 @@ prepare_index(PyArrayObject *self, PyObject *index, * to find the ellipsis value or append an ellipsis if necessary. */ if (used_ndim < PyArray_NDIM(self)) { - if (index_type & HAS_ELLIPSIS) { - indices[ellipsis_pos].value = PyArray_NDIM(self) - used_ndim; - used_ndim = PyArray_NDIM(self); - new_ndim += indices[ellipsis_pos].value; - } - else { - /* - * There is no ellipsis yet, but it is not a full index - * so we append an ellipsis to the end. - */ - index_type |= HAS_ELLIPSIS; - indices[curr_idx].object = NULL; - indices[curr_idx].type = HAS_ELLIPSIS; - indices[curr_idx].value = PyArray_NDIM(self) - used_ndim; - ellipsis_pos = curr_idx; - - used_ndim = PyArray_NDIM(self); - new_ndim += indices[curr_idx].value; - curr_idx += 1; - } + if (index_type & HAS_ELLIPSIS) { + indices[ellipsis_pos].value = PyArray_NDIM(self) - used_ndim; + used_ndim = PyArray_NDIM(self); + new_ndim += indices[ellipsis_pos].value; + } + else { + /* + * There is no ellipsis yet, but it is not a full index + * so we append an ellipsis to the end. + */ + index_type |= HAS_ELLIPSIS; + indices[curr_idx].object = NULL; + indices[curr_idx].type = HAS_ELLIPSIS; + indices[curr_idx].value = PyArray_NDIM(self) - used_ndim; + ellipsis_pos = curr_idx; + + used_ndim = PyArray_NDIM(self); + new_ndim += indices[curr_idx].value; + curr_idx += 1; + } } else if (used_ndim > PyArray_NDIM(self)) { PyErr_SetString(PyExc_IndexError, diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c index ce6a3870f..1b825d318 100644 --- a/numpy/core/src/multiarray/multiarraymodule.c +++ b/numpy/core/src/multiarray/multiarraymodule.c @@ -4492,7 +4492,6 @@ NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array = NULL; NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array_prepare = NULL; NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array_wrap = NULL; NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array_finalize = NULL; -NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_buffer = NULL; NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_ufunc = NULL; NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_wrapped = NULL; NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_order = NULL; @@ -4509,7 +4508,6 @@ intern_strings(void) npy_ma_str_array_prepare = PyUString_InternFromString("__array_prepare__"); npy_ma_str_array_wrap = PyUString_InternFromString("__array_wrap__"); npy_ma_str_array_finalize = PyUString_InternFromString("__array_finalize__"); - npy_ma_str_buffer = PyUString_InternFromString("__buffer__"); npy_ma_str_ufunc = PyUString_InternFromString("__array_ufunc__"); npy_ma_str_wrapped = PyUString_InternFromString("__wrapped__"); npy_ma_str_order = PyUString_InternFromString("order"); @@ -4521,7 +4519,7 @@ intern_strings(void) return npy_ma_str_array && npy_ma_str_array_prepare && npy_ma_str_array_wrap && npy_ma_str_array_finalize && - npy_ma_str_buffer && npy_ma_str_ufunc && npy_ma_str_wrapped && + npy_ma_str_ufunc && npy_ma_str_wrapped && npy_ma_str_order && npy_ma_str_copy && npy_ma_str_dtype && npy_ma_str_ndmin && npy_ma_str_axis1 && npy_ma_str_axis2; } diff --git a/numpy/core/src/multiarray/multiarraymodule.h b/numpy/core/src/multiarray/multiarraymodule.h index 60a3965c9..6d33c3295 100644 --- a/numpy/core/src/multiarray/multiarraymodule.h +++ b/numpy/core/src/multiarray/multiarraymodule.h @@ -5,7 +5,6 @@ NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array; NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_prepare; NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_wrap; NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_finalize; -NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_buffer; NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_ufunc; NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_wrapped; NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_order; diff --git a/numpy/core/src/multiarray/nditer_constr.c b/numpy/core/src/multiarray/nditer_constr.c index dc58b3a78..3b3635afe 100644 --- a/numpy/core/src/multiarray/nditer_constr.c +++ b/numpy/core/src/multiarray/nditer_constr.c @@ -2120,8 +2120,8 @@ npyiter_apply_forced_iteration_order(NpyIter *iter, NPY_ORDER order) /* Check that all the array inputs are fortran order */ for (iop = 0; iop < nop; ++iop, ++op) { if (*op && !PyArray_CHKFLAGS(*op, NPY_ARRAY_F_CONTIGUOUS)) { - forder = 0; - break; + forder = 0; + break; } } diff --git a/numpy/core/src/npymath/ieee754.c.src b/numpy/core/src/npymath/ieee754.c.src index d960838c8..3f66b24a4 100644 --- a/numpy/core/src/npymath/ieee754.c.src +++ b/numpy/core/src/npymath/ieee754.c.src @@ -681,7 +681,8 @@ void npy_set_floatstatus_invalid(void) fp_raise_xcp(FP_INVALID); } -#elif defined(_MSC_VER) || (defined(__osf__) && defined(__alpha)) +#elif defined(_MSC_VER) || (defined(__osf__) && defined(__alpha)) || \ + defined (__UCLIBC__) || (defined(__arc__) && defined(__GLIBC__)) /* * By using a volatile floating point value, diff --git a/numpy/core/src/npymath/npy_math_complex.c.src b/numpy/core/src/npymath/npy_math_complex.c.src index cf427dad8..7aa07f16d 100644 --- a/numpy/core/src/npymath/npy_math_complex.c.src +++ b/numpy/core/src/npymath/npy_math_complex.c.src @@ -1430,19 +1430,14 @@ npy_casinh@c@(@ctype@ z) #if @precision@ == 1 /* this is sqrt(6*EPS) */ const npy_float SQRT_6_EPSILON = 8.4572793338e-4f; - /* chosen such that pio2_hi + pio2_lo == pio2_hi but causes FE_INEXACT. */ - const volatile npy_float pio2_lo = 7.5497899549e-9f; #endif #if @precision@ == 2 const npy_double SQRT_6_EPSILON = 3.65002414998885671e-08; - const volatile npy_double pio2_lo = 6.1232339957367659e-17; #endif #if @precision@ == 3 const npy_longdouble SQRT_6_EPSILON = 8.0654900873493277169e-10l; - const volatile npy_longdouble pio2_lo = 2.710505431213761085e-20l; #endif const @type@ RECIP_EPSILON = 1.0@c@ / @TEPS@; - const @type@ pio2_hi = NPY_PI_2@c@; @type@ x, y, ax, ay, wx, wy, rx, ry, B, sqrt_A2my2, new_y; npy_int B_is_usable; diff --git a/numpy/core/src/umath/fast_loop_macros.h b/numpy/core/src/umath/fast_loop_macros.h index 37656dcf5..7a1ed66bc 100644 --- a/numpy/core/src/umath/fast_loop_macros.h +++ b/numpy/core/src/umath/fast_loop_macros.h @@ -64,6 +64,8 @@ #define IS_UNARY_CONT(tin, tout) (steps[0] == sizeof(tin) && \ steps[1] == sizeof(tout)) +#define IS_OUTPUT_CONT(tout) (steps[1] == sizeof(tout)) + #define IS_BINARY_REDUCE ((args[0] == args[2])\ && (steps[0] == steps[2])\ && (steps[0] == 0)) @@ -72,50 +74,52 @@ #define IS_BINARY_CONT(tin, tout) (steps[0] == sizeof(tin) && \ steps[1] == sizeof(tin) && \ steps[2] == sizeof(tout)) + /* binary loop input and output contiguous with first scalar */ #define IS_BINARY_CONT_S1(tin, tout) (steps[0] == 0 && \ steps[1] == sizeof(tin) && \ steps[2] == sizeof(tout)) + /* binary loop input and output contiguous with second scalar */ #define IS_BINARY_CONT_S2(tin, tout) (steps[0] == sizeof(tin) && \ steps[1] == 0 && \ steps[2] == sizeof(tout)) - /* * loop with contiguous specialization * op should be the code working on `tin in` and - * storing the result in `tout * out` + * storing the result in `tout *out` * combine with NPY_GCC_OPT_3 to allow autovectorization * should only be used where its worthwhile to avoid code bloat */ #define BASE_UNARY_LOOP(tin, tout, op) \ UNARY_LOOP { \ const tin in = *(tin *)ip1; \ - tout * out = (tout *)op1; \ + tout *out = (tout *)op1; \ op; \ } -#define UNARY_LOOP_FAST(tin, tout, op) \ + +#define UNARY_LOOP_FAST(tin, tout, op) \ do { \ - /* condition allows compiler to optimize the generic macro */ \ - if (IS_UNARY_CONT(tin, tout)) { \ - if (args[0] == args[1]) { \ - BASE_UNARY_LOOP(tin, tout, op) \ + /* condition allows compiler to optimize the generic macro */ \ + if (IS_UNARY_CONT(tin, tout)) { \ + if (args[0] == args[1]) { \ + BASE_UNARY_LOOP(tin, tout, op) \ + } \ + else { \ + BASE_UNARY_LOOP(tin, tout, op) \ + } \ } \ else { \ BASE_UNARY_LOOP(tin, tout, op) \ } \ } \ - else { \ - BASE_UNARY_LOOP(tin, tout, op) \ - } \ - } \ while (0) /* * loop with contiguous specialization * op should be the code working on `tin in1`, `tin in2` and - * storing the result in `tout * out` + * storing the result in `tout *out` * combine with NPY_GCC_OPT_3 to allow autovectorization * should only be used where its worthwhile to avoid code bloat */ @@ -123,9 +127,10 @@ BINARY_LOOP { \ const tin in1 = *(tin *)ip1; \ const tin in2 = *(tin *)ip2; \ - tout * out = (tout *)op1; \ + tout *out = (tout *)op1; \ op; \ } + /* * unfortunately gcc 6/7 regressed and we need to give it additional hints to * vectorize inplace operations (PR80198) @@ -146,59 +151,62 @@ for(i = 0; i < n; i++, ip1 += is1, ip2 += is2, op1 += os1) { \ const tin in1 = *(tin *)ip1; \ const tin in2 = *(tin *)ip2; \ - tout * out = (tout *)op1; \ + tout *out = (tout *)op1; \ op; \ } + #define BASE_BINARY_LOOP_S(tin, tout, cin, cinp, vin, vinp, op) \ const tin cin = *(tin *)cinp; \ BINARY_LOOP { \ const tin vin = *(tin *)vinp; \ - tout * out = (tout *)op1; \ + tout *out = (tout *)op1; \ op; \ } + /* PR80198 again, scalar works without the pragma */ #define BASE_BINARY_LOOP_S_INP(tin, tout, cin, cinp, vin, vinp, op) \ const tin cin = *(tin *)cinp; \ BINARY_LOOP { \ const tin vin = *(tin *)vinp; \ - tout * out = (tout *)vinp; \ + tout *out = (tout *)vinp; \ op; \ } -#define BINARY_LOOP_FAST(tin, tout, op) \ + +#define BINARY_LOOP_FAST(tin, tout, op) \ do { \ - /* condition allows compiler to optimize the generic macro */ \ - if (IS_BINARY_CONT(tin, tout)) { \ - if (abs_ptrdiff(args[2], args[0]) == 0 && \ - abs_ptrdiff(args[2], args[1]) >= NPY_MAX_SIMD_SIZE) { \ - BASE_BINARY_LOOP_INP(tin, tout, op) \ - } \ - else if (abs_ptrdiff(args[2], args[1]) == 0 && \ - abs_ptrdiff(args[2], args[0]) >= NPY_MAX_SIMD_SIZE) { \ - BASE_BINARY_LOOP_INP(tin, tout, op) \ + /* condition allows compiler to optimize the generic macro */ \ + if (IS_BINARY_CONT(tin, tout)) { \ + if (abs_ptrdiff(args[2], args[0]) == 0 && \ + abs_ptrdiff(args[2], args[1]) >= NPY_MAX_SIMD_SIZE) { \ + BASE_BINARY_LOOP_INP(tin, tout, op) \ + } \ + else if (abs_ptrdiff(args[2], args[1]) == 0 && \ + abs_ptrdiff(args[2], args[0]) >= NPY_MAX_SIMD_SIZE) { \ + BASE_BINARY_LOOP_INP(tin, tout, op) \ + } \ + else { \ + BASE_BINARY_LOOP(tin, tout, op) \ + } \ } \ - else { \ - BASE_BINARY_LOOP(tin, tout, op) \ + else if (IS_BINARY_CONT_S1(tin, tout)) { \ + if (abs_ptrdiff(args[2], args[1]) == 0) { \ + BASE_BINARY_LOOP_S_INP(tin, tout, in1, args[0], in2, ip2, op) \ + } \ + else { \ + BASE_BINARY_LOOP_S(tin, tout, in1, args[0], in2, ip2, op) \ + } \ } \ - } \ - else if (IS_BINARY_CONT_S1(tin, tout)) { \ - if (abs_ptrdiff(args[2], args[1]) == 0) { \ - BASE_BINARY_LOOP_S_INP(tin, tout, in1, args[0], in2, ip2, op) \ + else if (IS_BINARY_CONT_S2(tin, tout)) { \ + if (abs_ptrdiff(args[2], args[0]) == 0) { \ + BASE_BINARY_LOOP_S_INP(tin, tout, in2, args[1], in1, ip1, op) \ + } \ + else { \ + BASE_BINARY_LOOP_S(tin, tout, in2, args[1], in1, ip1, op) \ + }\ } \ else { \ - BASE_BINARY_LOOP_S(tin, tout, in1, args[0], in2, ip2, op) \ - } \ - } \ - else if (IS_BINARY_CONT_S2(tin, tout)) { \ - if (abs_ptrdiff(args[2], args[0]) == 0) { \ - BASE_BINARY_LOOP_S_INP(tin, tout, in2, args[1], in1, ip1, op) \ + BASE_BINARY_LOOP(tin, tout, op) \ } \ - else { \ - BASE_BINARY_LOOP_S(tin, tout, in2, args[1], in1, ip1, op) \ - }\ - } \ - else { \ - BASE_BINARY_LOOP(tin, tout, op) \ - } \ } \ while (0) diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index 04e6cbdee..1017bb94a 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -644,6 +644,23 @@ BOOL__ones_like(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UN } +/**begin repeat + * #kind = isnan, isinf, isfinite# + * #func = npy_isnan, npy_isinf, npy_isfinite# + * #val = NPY_FALSE, NPY_FALSE, NPY_TRUE# + **/ +NPY_NO_EXPORT void +BOOL_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)) +{ + /* + * The (void)in; suppresses an unused variable warning raised by gcc and allows + * us to re-use this macro even though we do not depend on in + */ + UNARY_LOOP_FAST(npy_bool, npy_bool, (void)in; *out = @val@); +} + +/**end repeat**/ + /* ***************************************************************************** ** INTEGER LOOPS @@ -875,6 +892,22 @@ NPY_NO_EXPORT void } } +/**begin repeat1 + * #kind = isnan, isinf, isfinite# + * #func = npy_isnan, npy_isinf, npy_isfinite# + * #val = NPY_FALSE, NPY_FALSE, NPY_TRUE# + **/ +NPY_NO_EXPORT void +@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)) +{ + /* + * The (void)in; suppresses an unused variable warning raised by gcc and allows + * us to re-use this macro even though we do not depend on in + */ + UNARY_LOOP_FAST(@type@, npy_bool, (void)in; *out = @val@); +} +/**end repeat1**/ + /**end repeat**/ /**begin repeat @@ -994,13 +1027,10 @@ NPY_NO_EXPORT void * #c = u,u,u,ul,ull# */ -NPY_NO_EXPORT void +NPY_NO_EXPORT NPY_GCC_OPT_3 void @TYPE@_absolute(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)) { - UNARY_LOOP { - const @type@ in1 = *(@type@ *)ip1; - *((@type@ *)op1) = in1; - } + UNARY_LOOP_FAST(@type@, @type@, *out = in); } NPY_NO_EXPORT NPY_GCC_OPT_3 void @@ -2198,13 +2228,10 @@ HALF_conjugate(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNU } } -NPY_NO_EXPORT void +NPY_NO_EXPORT NPY_GCC_OPT_3 void HALF_absolute(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)) { - UNARY_LOOP { - const npy_half in1 = *(npy_half *)ip1; - *((npy_half *)op1) = in1&0x7fffu; - } + UNARY_LOOP_FAST(npy_half, npy_half, *out = in&0x7fffu); } NPY_NO_EXPORT void diff --git a/numpy/core/src/umath/loops.h.src b/numpy/core/src/umath/loops.h.src index 5264a6533..f48319056 100644 --- a/numpy/core/src/umath/loops.h.src +++ b/numpy/core/src/umath/loops.h.src @@ -38,6 +38,13 @@ BOOL_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED NPY_NO_EXPORT void BOOL__ones_like(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data)); +/**begin repeat + * #kind = isnan, isinf, isfinite# + **/ +NPY_NO_EXPORT void +BOOL_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)); +/**end repeat**/ + /* ***************************************************************************** ** INTEGER LOOPS @@ -146,6 +153,13 @@ NPY_NO_EXPORT void NPY_NO_EXPORT void @S@@TYPE@_lcm(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)); +/**begin repeat2 + * #kind = isnan, isinf, isfinite# + **/ +NPY_NO_EXPORT void +@S@@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func)); +/**end repeat2**/ + /**end repeat1**/ /**end repeat**/ diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index f198a19bd..f5084e6b3 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -563,14 +563,14 @@ _get_size(const char* str) if (stop == str || _is_alpha_underscore(*stop)) { /* not a well formed number */ - return -1; + return -1; } if (size >= NPY_MAX_INTP || size <= NPY_MIN_INTP) { /* len(str) too long */ return -1; } return size; - } +} /* * Return the ending position of a variable name including optional modifier diff --git a/numpy/core/src/umath/umathmodule.c b/numpy/core/src/umath/umathmodule.c index 23e3ffcfc..6ec474376 100644 --- a/numpy/core/src/umath/umathmodule.c +++ b/numpy/core/src/umath/umathmodule.c @@ -187,7 +187,7 @@ add_newdoc_ufunc(PyObject *NPY_UNUSED(dummy), PyObject *args) #else if (!PyArg_ParseTuple(args, "O!O!:_add_newdoc_ufunc", &PyUFunc_Type, &ufunc, &PyString_Type, &str)) { - return NULL; + return NULL; } docstr = PyString_AS_STRING(str); #endif diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py index 43d29f42d..91cd2c115 100644 --- a/numpy/core/tests/test_datetime.py +++ b/numpy/core/tests/test_datetime.py @@ -1543,6 +1543,12 @@ class TestDateTime(object): assert_equal(x[0].astype(np.int64), 322689600000000000) + # gh-13062 + with pytest.raises(OverflowError): + np.datetime64(2**64, 'D') + with pytest.raises(OverflowError): + np.timedelta64(2**64, 'D') + def test_datetime_as_string(self): # Check all the units with default string conversion date = '1959-10-13' @@ -2214,6 +2220,44 @@ class TestDateTime(object): assert_raises(RecursionError, obj_arr.astype, 'M8') assert_raises(RecursionError, obj_arr.astype, 'm8') + @pytest.mark.parametrize("time_unit", [ + "Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", "fs", "as", + # compound units + "10D", "2M", + ]) + def test_limit_symmetry(self, time_unit): + """ + Dates should have symmetric limits around the unix epoch at +/-np.int64 + """ + epoch = np.datetime64(0, time_unit) + latest = np.datetime64(np.iinfo(np.int64).max, time_unit) + earliest = np.datetime64(-np.iinfo(np.int64).max, time_unit) + + # above should not have overflowed + assert earliest < epoch < latest + + @pytest.mark.parametrize("time_unit", [ + "Y", "M", + pytest.param("W", marks=pytest.mark.xfail(reason="gh-13197")), + "D", "h", "m", + "s", "ms", "us", "ns", "ps", "fs", "as", + pytest.param("10D", marks=pytest.mark.xfail(reason="similar to gh-13197")), + ]) + @pytest.mark.parametrize("sign", [-1, 1]) + def test_limit_str_roundtrip(self, time_unit, sign): + """ + Limits should roundtrip when converted to strings. + + This tests the conversion to and from npy_datetimestruct. + """ + # TODO: add absolute (gold standard) time span limit strings + limit = np.datetime64(np.iinfo(np.int64).max * sign, time_unit) + + # Convert to string and back. Explicit unit needed since the day and + # week reprs are not distinguishable. + limit_via_str = np.datetime64(str(limit), time_unit) + assert limit_via_str == limit + class TestDateTimeData(object): diff --git a/numpy/core/tests/test_function_base.py b/numpy/core/tests/test_function_base.py index 459bacab0..8b820bd75 100644 --- a/numpy/core/tests/test_function_base.py +++ b/numpy/core/tests/test_function_base.py @@ -362,3 +362,9 @@ class TestLinspace(object): assert_(isinstance(y, tuple) and len(y) == 2 and len(y[0]) == num and isnan(y[1]), 'num={0}, endpoint={1}'.format(num, ept)) + + def test_object(self): + start = array(1, dtype='O') + stop = array(2, dtype='O') + y = linspace(start, stop, 3) + assert_array_equal(y, array([1., 1.5, 2.])) diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index 4f64661bd..c45029599 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -1491,6 +1491,11 @@ class TestMethods(object): # gh-12031, caused SEGFAULT assert_raises(TypeError, oned.choose,np.void(0), [oned]) + # gh-6272 check overlap on out + x = np.arange(5) + y = np.choose([0,0,0], [x[:3], x[:3], x[:3]], out=x[1:4], mode='wrap') + assert_equal(y, np.array([0, 1, 2])) + def test_prod(self): ba = [1, 2, 10, 11, 6, 5, 4] ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]] @@ -4377,6 +4382,16 @@ class TestPutmask(object): assert_array_equal(rec['y'], [11, 4]) assert_array_equal(rec['z'], [3, 3]) + def test_overlaps(self): + # gh-6272 check overlap + x = np.array([True, False, True, False]) + np.putmask(x[1:4], [True, True, True], x[:3]) + assert_equal(x, np.array([True, True, False, True])) + + x = np.array([True, False, True, False]) + np.putmask(x[1:4], x[:3], [True, False, True]) + assert_equal(x, np.array([True, True, True, True])) + class TestTake(object): def tst_basic(self, x): @@ -4425,6 +4440,11 @@ class TestTake(object): rec1 = rec.take([1]) assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0) + def test_out_overlap(self): + # gh-6272 check overlap on out + x = np.arange(5) + y = np.take(x, [1, 2, 3], out=x[2:5], mode='wrap') + assert_equal(y, np.array([1, 2, 3])) class TestLexsort(object): def test_basic(self): @@ -4873,6 +4893,22 @@ class TestFlat(object): assert_(e.flags.writebackifcopy is False) assert_(f.flags.writebackifcopy is False) + @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts") + def test_refcount(self): + # includes regression test for reference count error gh-13165 + inds = [np.intp(0), np.array([True]*self.a.size), np.array([0]), None] + indtype = np.dtype(np.intp) + rc_indtype = sys.getrefcount(indtype) + for ind in inds: + rc_ind = sys.getrefcount(ind) + for _ in range(100): + try: + self.a.flat[ind] + except IndexError: + pass + assert_(abs(sys.getrefcount(ind) - rc_ind) < 50) + assert_(abs(sys.getrefcount(indtype) - rc_indtype) < 50) + class TestResize(object): def test_basic(self): @@ -7105,6 +7141,19 @@ def test_array_interface_empty_shape(): assert_equal(arr1, arr2) assert_equal(arr1, arr3) +def test_array_interface_offset(): + arr = np.array([1, 2, 3], dtype='int32') + interface = dict(arr.__array_interface__) + interface['data'] = memoryview(arr) + interface['shape'] = (2,) + interface['offset'] = 4 + + + class DummyArray(object): + __array_interface__ = interface + + arr1 = np.asarray(DummyArray()) + assert_equal(arr1, arr[1:]) def test_flat_element_deletion(): it = np.ones(3).flat diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py index 90ac43a56..1822a7adf 100644 --- a/numpy/core/tests/test_numeric.py +++ b/numpy/core/tests/test_numeric.py @@ -216,6 +216,9 @@ class TestNonarrayArgs(object): assert_(np.isnan(np.var([]))) assert_(w[0].category is RuntimeWarning) + B = np.array([None, 0]) + B[0] = 1j + assert_almost_equal(np.var(B), 0.25) class TestIsscalar(object): def test_isscalar(self): diff --git a/numpy/core/tests/test_records.py b/numpy/core/tests/test_records.py index f0ec38029..878ef2ac9 100644 --- a/numpy/core/tests/test_records.py +++ b/numpy/core/tests/test_records.py @@ -437,6 +437,13 @@ class TestRecord(object): arr = np.zeros((3,), dtype=[('x', int), ('y', int)]) assert_raises(ValueError, lambda: arr[['nofield']]) + def test_fromarrays_nested_structured_arrays(self): + arrays = [ + np.arange(10), + np.ones(10, dtype=[('a', '<u2'), ('b', '<f4')]), + ] + arr = np.rec.fromarrays(arrays) # ValueError? + def test_find_duplicate(): l1 = [1, 2, 3, 4, 5, 6] diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py index 1ba0cda21..4b551d8aa 100644 --- a/numpy/core/tests/test_regression.py +++ b/numpy/core/tests/test_regression.py @@ -2448,3 +2448,9 @@ class TestRegression(object): for proto in range(2, pickle.HIGHEST_PROTOCOL + 1): dumped = pickle.dumps(arr, protocol=proto) assert_equal(pickle.loads(dumped), arr) + + def test_bad_array_interface(self): + class T(object): + __array_interface__ = {} + + np.array([T()]) diff --git a/numpy/core/tests/test_scalarmath.py b/numpy/core/tests/test_scalarmath.py index 51bcf2b8d..ebba457e3 100644 --- a/numpy/core/tests/test_scalarmath.py +++ b/numpy/core/tests/test_scalarmath.py @@ -422,7 +422,7 @@ class TestConversion(object): @pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble), reason="long double is same as double") - @pytest.mark.skipif(platform.machine().startswith("ppc64"), + @pytest.mark.skipif(platform.machine().startswith("ppc"), reason="IBM double double") def test_int_from_huge_longdouble(self): # Produce a longdouble that would overflow a double, diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py index 478a08397..b6b68d922 100644 --- a/numpy/core/tests/test_ufunc.py +++ b/numpy/core/tests/test_ufunc.py @@ -1915,3 +1915,24 @@ class TestUfunc(object): exc = pytest.raises(TypeError, np.sqrt, None) # minimally check the exception text assert 'loop of ufunc does not support' in str(exc) + + @pytest.mark.parametrize('nat', [np.datetime64('nat'), np.timedelta64('nat')]) + def test_nat_is_not_finite(self, nat): + try: + assert not np.isfinite(nat) + except TypeError: + pass # ok, just not implemented + + @pytest.mark.parametrize('nat', [np.datetime64('nat'), np.timedelta64('nat')]) + def test_nat_is_nan(self, nat): + try: + assert np.isnan(nat) + except TypeError: + pass # ok, just not implemented + + @pytest.mark.parametrize('nat', [np.datetime64('nat'), np.timedelta64('nat')]) + def test_nat_is_not_inf(self, nat): + try: + assert not np.isinf(nat) + except TypeError: + pass # ok, just not implemented diff --git a/numpy/ctypeslib.py b/numpy/ctypeslib.py index 02c3bd211..1967a85b6 100644 --- a/numpy/ctypeslib.py +++ b/numpy/ctypeslib.py @@ -51,7 +51,7 @@ Then, we're ready to call ``foo_func``: """ from __future__ import division, absolute_import, print_function -__all__ = ['load_library', 'ndpointer', 'test', 'ctypes_load_library', +__all__ = ['load_library', 'ndpointer', 'ctypes_load_library', 'c_intp', 'as_ctypes', 'as_array'] import os diff --git a/numpy/distutils/_shell_utils.py b/numpy/distutils/_shell_utils.py index 5ef874900..82abd5f4e 100644 --- a/numpy/distutils/_shell_utils.py +++ b/numpy/distutils/_shell_utils.py @@ -24,12 +24,12 @@ class CommandLineParser: @staticmethod def join(argv): """ Join a list of arguments into a command line string """ - raise NotImplemented + raise NotImplementedError @staticmethod def split(cmd): """ Split a command line string into a list of arguments """ - raise NotImplemented + raise NotImplementedError class WindowsParser: diff --git a/numpy/distutils/ccompiler.py b/numpy/distutils/ccompiler.py index 552b9566f..14451fa66 100644 --- a/numpy/distutils/ccompiler.py +++ b/numpy/distutils/ccompiler.py @@ -796,63 +796,3 @@ for _cc in ['msvc9', 'msvc', '_msvc', 'bcpp', 'cygwinc', 'emxc', 'unixc']: if _m is not None: setattr(_m, 'gen_lib_options', gen_lib_options) - -##Fix distutils.util.split_quoted: -# NOTE: I removed this fix in revision 4481 (see ticket #619), but it appears -# that removing this fix causes f2py problems on Windows XP (see ticket #723). -# Specifically, on WinXP when gfortran is installed in a directory path, which -# contains spaces, then f2py is unable to find it. -import string -_wordchars_re = re.compile(r'[^\\\'\"%s ]*' % string.whitespace) -_squote_re = re.compile(r"'(?:[^'\\]|\\.)*'") -_dquote_re = re.compile(r'"(?:[^"\\]|\\.)*"') -_has_white_re = re.compile(r'\s') -def split_quoted(s): - s = s.strip() - words = [] - pos = 0 - - while s: - m = _wordchars_re.match(s, pos) - end = m.end() - if end == len(s): - words.append(s[:end]) - break - - if s[end] in string.whitespace: # unescaped, unquoted whitespace: now - words.append(s[:end]) # we definitely have a word delimiter - s = s[end:].lstrip() - pos = 0 - - elif s[end] == '\\': # preserve whatever is being escaped; - # will become part of the current word - s = s[:end] + s[end+1:] - pos = end+1 - - else: - if s[end] == "'": # slurp singly-quoted string - m = _squote_re.match(s, end) - elif s[end] == '"': # slurp doubly-quoted string - m = _dquote_re.match(s, end) - else: - raise RuntimeError("this can't happen (bad char '%c')" % s[end]) - - if m is None: - raise ValueError("bad string (mismatched %s quotes?)" % s[end]) - - (beg, end) = m.span() - if _has_white_re.search(s[beg+1:end-1]): - s = s[:beg] + s[beg+1:end-1] + s[end:] - pos = m.end() - 2 - else: - # Keeping quotes when a quoted word does not contain - # white-space. XXX: send a patch to distutils - pos = m.end() - - if pos >= len(s): - words.append(s) - break - - return words -ccompiler.split_quoted = split_quoted -##Fix distutils.util.split_quoted: diff --git a/numpy/distutils/command/build_ext.py b/numpy/distutils/command/build_ext.py index ab9d585a5..ef54fb25e 100644 --- a/numpy/distutils/command/build_ext.py +++ b/numpy/distutils/command/build_ext.py @@ -281,8 +281,8 @@ class build_ext (old_build_ext): runtime_lib = os.path.join(self.extra_dll_dir, fn) copy_file(runtime_lib, shared_lib_dir) - def swig_sources(self, sources): - # Do nothing. Swig sources have beed handled in build_src command. + def swig_sources(self, sources, extensions=None): + # Do nothing. Swig sources have been handled in build_src command. return sources def build_extension(self, ext): diff --git a/numpy/distutils/command/build_src.py b/numpy/distutils/command/build_src.py index 668bc23fe..6d5d305d2 100644 --- a/numpy/distutils/command/build_src.py +++ b/numpy/distutils/command/build_src.py @@ -28,20 +28,14 @@ def subst_vars(target, source, d): """Substitute any occurrence of @foo@ by d['foo'] from source file into target.""" var = re.compile('@([a-zA-Z_]+)@') - fs = open(source, 'r') - try: - ft = open(target, 'w') - try: + with open(source, 'r') as fs: + with open(target, 'w') as ft: for l in fs: m = var.search(l) if m: ft.write(l.replace('@%s@' % m.group(1), d[m.group(1)])) else: ft.write(l) - finally: - ft.close() - finally: - fs.close() class build_src(build_ext.build_ext): @@ -425,9 +419,8 @@ class build_src(build_ext.build_ext): else: log.info("conv_template:> %s" % (target_file)) outstr = process_c_file(source) - fid = open(target_file, 'w') - fid.write(outstr) - fid.close() + with open(target_file, 'w') as fid: + fid.write(outstr) if _header_ext_match(target_file): d = os.path.dirname(target_file) if d not in include_dirs: @@ -723,25 +716,23 @@ _has_c_header = re.compile(r'-[*]-\s*c\s*-[*]-', re.I).search _has_cpp_header = re.compile(r'-[*]-\s*c[+][+]\s*-[*]-', re.I).search def get_swig_target(source): - f = open(source, 'r') - result = None - line = f.readline() - if _has_cpp_header(line): - result = 'c++' - if _has_c_header(line): - result = 'c' - f.close() + with open(source, 'r') as f: + result = None + line = f.readline() + if _has_cpp_header(line): + result = 'c++' + if _has_c_header(line): + result = 'c' return result def get_swig_modulename(source): - f = open(source, 'r') - name = None - for line in f: - m = _swig_module_name_match(line) - if m: - name = m.group('name') - break - f.close() + with open(source, 'r') as f: + name = None + for line in f: + m = _swig_module_name_match(line) + if m: + name = m.group('name') + break return name def _find_swig_target(target_dir, name): @@ -760,15 +751,14 @@ _f2py_user_module_name_match = re.compile(r'\s*python\s*module\s*(?P<name>[\w_]* def get_f2py_modulename(source): name = None - f = open(source) - for line in f: - m = _f2py_module_name_match(line) - if m: - if _f2py_user_module_name_match(line): # skip *__user__* names - continue - name = m.group('name') - break - f.close() + with open(source) as f: + for line in f: + m = _f2py_module_name_match(line) + if m: + if _f2py_user_module_name_match(line): # skip *__user__* names + continue + name = m.group('name') + break return name ########################################## diff --git a/numpy/distutils/command/install.py b/numpy/distutils/command/install.py index a1dd47755..c74ae9446 100644 --- a/numpy/distutils/command/install.py +++ b/numpy/distutils/command/install.py @@ -64,16 +64,15 @@ class install(old_install): # bdist_rpm fails when INSTALLED_FILES contains # paths with spaces. Such paths must be enclosed # with double-quotes. - f = open(self.record, 'r') - lines = [] - need_rewrite = False - for l in f: - l = l.rstrip() - if ' ' in l: - need_rewrite = True - l = '"%s"' % (l) - lines.append(l) - f.close() + with open(self.record, 'r') as f: + lines = [] + need_rewrite = False + for l in f: + l = l.rstrip() + if ' ' in l: + need_rewrite = True + l = '"%s"' % (l) + lines.append(l) if need_rewrite: self.execute(write_file, (self.record, lines), diff --git a/numpy/distutils/exec_command.py b/numpy/distutils/exec_command.py index ede347b03..2e7b9e463 100644 --- a/numpy/distutils/exec_command.py +++ b/numpy/distutils/exec_command.py @@ -57,6 +57,7 @@ import os import sys import subprocess import locale +import warnings from numpy.distutils.misc_util import is_sequence, make_temp_file from numpy.distutils import log @@ -105,6 +106,9 @@ def forward_bytes_to_stdout(val): def temp_file_name(): + # 2019-01-30, 1.17 + warnings.warn('temp_file_name is deprecated since NumPy v1.17, use ' + 'tempfile.mkstemp instead', DeprecationWarning, stacklevel=1) fo, name = make_temp_file() fo.close() return name @@ -179,24 +183,14 @@ def _update_environment( **env ): for name, value in env.items(): os.environ[name] = value or '' -def _supports_fileno(stream): - """ - Returns True if 'stream' supports the file descriptor and allows fileno(). - """ - if hasattr(stream, 'fileno'): - try: - stream.fileno() - return True - except IOError: - return False - else: - return False - def exec_command(command, execute_in='', use_shell=None, use_tee=None, _with_python = 1, **env ): """ Return (status,output) of executed command. + .. deprecated:: 1.17 + Use subprocess.Popen instead + Parameters ---------- command : str @@ -220,6 +214,9 @@ def exec_command(command, execute_in='', use_shell=None, use_tee=None, Wild cards will not work for non-posix systems or when use_shell=0. """ + # 2019-01-30, 1.17 + warnings.warn('exec_command is deprecated since NumPy v1.17, use ' + 'subprocess.Popen instead', DeprecationWarning, stacklevel=1) log.debug('exec_command(%r,%s)' % (command,\ ','.join(['%s=%r'%kv for kv in env.items()]))) diff --git a/numpy/distutils/fcompiler/environment.py b/numpy/distutils/fcompiler/environment.py index 4238f35cb..73a5e98e1 100644 --- a/numpy/distutils/fcompiler/environment.py +++ b/numpy/distutils/fcompiler/environment.py @@ -51,13 +51,16 @@ class EnvironmentConfig(object): def _get_var(self, name, conf_desc): hook, envvar, confvar, convert, append = conf_desc + if convert is None: + convert = lambda x: x var = self._hook_handler(name, hook) if envvar is not None: envvar_contents = os.environ.get(envvar) if envvar_contents is not None: + envvar_contents = convert(envvar_contents) if var and append: if os.environ.get('NPY_DISTUTILS_APPEND_FLAGS', '0') == '1': - var = var + [envvar_contents] + var.extend(envvar_contents) else: var = envvar_contents if 'NPY_DISTUTILS_APPEND_FLAGS' not in os.environ.keys(): @@ -70,11 +73,12 @@ class EnvironmentConfig(object): else: var = envvar_contents if confvar is not None and self._conf: - var = self._conf.get(confvar, (None, var))[1] - if convert is not None: - var = convert(var) + if confvar in self._conf: + source, confvar_contents = self._conf[confvar] + var = convert(confvar_contents) return var + def clone(self, hook_handler): ec = self.__class__(distutils_section=self._distutils_section, **self._conf_keys) diff --git a/numpy/distutils/fcompiler/ibm.py b/numpy/distutils/fcompiler/ibm.py index c4cb2fca7..70d2132e1 100644 --- a/numpy/distutils/fcompiler/ibm.py +++ b/numpy/distutils/fcompiler/ibm.py @@ -78,15 +78,14 @@ class IBMFCompiler(FCompiler): xlf_cfg = '/etc/opt/ibmcmp/xlf/%s/xlf.cfg' % version fo, new_cfg = make_temp_file(suffix='_xlf.cfg') log.info('Creating '+new_cfg) - fi = open(xlf_cfg, 'r') - crt1_match = re.compile(r'\s*crt\s*[=]\s*(?P<path>.*)/crt1.o').match - for line in fi: - m = crt1_match(line) - if m: - fo.write('crt = %s/bundle1.o\n' % (m.group('path'))) - else: - fo.write(line) - fi.close() + with open(xlf_cfg, 'r') as fi: + crt1_match = re.compile(r'\s*crt\s*[=]\s*(?P<path>.*)/crt1.o').match + for line in fi: + m = crt1_match(line) + if m: + fo.write('crt = %s/bundle1.o\n' % (m.group('path'))) + else: + fo.write(line) fo.close() opt.append('-F'+new_cfg) return opt diff --git a/numpy/distutils/line_endings.py b/numpy/distutils/line_endings.py index 5ecb104ff..2420798ab 100644 --- a/numpy/distutils/line_endings.py +++ b/numpy/distutils/line_endings.py @@ -19,9 +19,8 @@ def dos2unix(file): newdata = re.sub("\r\n", "\n", data) if newdata != data: print('dos2unix:', file) - f = open(file, "wb") - f.write(newdata) - f.close() + with open(file, "wb") as f: + f.write(newdata) return file else: print(file, 'ok') @@ -53,9 +52,8 @@ def unix2dos(file): newdata = re.sub("\n", "\r\n", newdata) if newdata != data: print('unix2dos:', file) - f = open(file, "wb") - f.write(newdata) - f.close() + with open(file, "wb") as f: + f.write(newdata) return file else: print(file, 'ok') diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py index 67a5f7234..cba84bffa 100644 --- a/numpy/distutils/misc_util.py +++ b/numpy/distutils/misc_util.py @@ -218,15 +218,14 @@ def get_mathlibs(path=None): raise DistutilsError('_numpyconfig.h not found in numpy include ' 'dirs %r' % (dirs,)) - fid = open(config_file) - mathlibs = [] - s = '#define MATHLIB' - for line in fid: - if line.startswith(s): - value = line[len(s):].strip() - if value: - mathlibs.extend(value.split(',')) - fid.close() + with open(config_file) as fid: + mathlibs = [] + s = '#define MATHLIB' + for line in fid: + if line.startswith(s): + value = line[len(s):].strip() + if value: + mathlibs.extend(value.split(',')) return mathlibs def minrelpath(path): @@ -443,14 +442,13 @@ def _get_f90_modules(source): if not f90_ext_match(source): return [] modules = [] - f = open(source, 'r') - for line in f: - m = f90_module_name_match(line) - if m: - name = m.group('name') - modules.append(name) - # break # XXX can we assume that there is one module per file? - f.close() + with open(source, 'r') as f: + for line in f: + m = f90_module_name_match(line) + if m: + name = m.group('name') + modules.append(name) + # break # XXX can we assume that there is one module per file? return modules def is_string(s): @@ -1833,67 +1831,53 @@ class Configuration(object): def _get_svn_revision(self, path): """Return path's SVN revision number. """ - revision = None - m = None - cwd = os.getcwd() try: - os.chdir(path or '.') - p = subprocess.Popen(['svnversion'], shell=True, - stdout=subprocess.PIPE, stderr=None, - close_fds=True) - sout = p.stdout - m = re.match(r'(?P<revision>\d+)', sout.read()) - except Exception: + output = subprocess.check_output( + ['svnversion'], shell=True, cwd=path) + except (subprocess.CalledProcessError, OSError): pass - os.chdir(cwd) - if m: - revision = int(m.group('revision')) - return revision + else: + m = re.match(rb'(?P<revision>\d+)', output) + if m: + return int(m.group('revision')) + if sys.platform=='win32' and os.environ.get('SVN_ASP_DOT_NET_HACK', None): entries = njoin(path, '_svn', 'entries') else: entries = njoin(path, '.svn', 'entries') if os.path.isfile(entries): - f = open(entries) - fstr = f.read() - f.close() + with open(entries) as f: + fstr = f.read() if fstr[:5] == '<?xml': # pre 1.4 m = re.search(r'revision="(?P<revision>\d+)"', fstr) if m: - revision = int(m.group('revision')) + return int(m.group('revision')) else: # non-xml entries file --- check to be sure that m = re.search(r'dir[\n\r]+(?P<revision>\d+)', fstr) if m: - revision = int(m.group('revision')) - return revision + return int(m.group('revision')) + return None def _get_hg_revision(self, path): """Return path's Mercurial revision number. """ - revision = None - m = None - cwd = os.getcwd() try: - os.chdir(path or '.') - p = subprocess.Popen(['hg identify --num'], shell=True, - stdout=subprocess.PIPE, stderr=None, - close_fds=True) - sout = p.stdout - m = re.match(r'(?P<revision>\d+)', sout.read()) - except Exception: + output = subprocess.check_output( + ['hg identify --num'], shell=True, cwd=path) + except (subprocess.CalledProcessError, OSError): pass - os.chdir(cwd) - if m: - revision = int(m.group('revision')) - return revision + else: + m = re.match(rb'(?P<revision>\d+)', output) + if m: + return int(m.group('revision')) + branch_fn = njoin(path, '.hg', 'branch') branch_cache_fn = njoin(path, '.hg', 'branch.cache') if os.path.isfile(branch_fn): branch0 = None - f = open(branch_fn) - revision0 = f.read().strip() - f.close() + with open(branch_fn) as f: + revision0 = f.read().strip() branch_map = {} for line in file(branch_cache_fn, 'r'): @@ -1906,8 +1890,9 @@ class Configuration(object): continue branch_map[branch1] = revision1 - revision = branch_map.get(branch0) - return revision + return branch_map.get(branch0) + + return None def get_version(self, version_file=None, version_variable=None): @@ -2005,9 +1990,8 @@ class Configuration(object): if not os.path.isfile(target): version = str(revision) self.info('Creating %s (version=%r)' % (target, version)) - f = open(target, 'w') - f.write('version = %r\n' % (version)) - f.close() + with open(target, 'w') as f: + f.write('version = %r\n' % (version)) def rm_file(f=target,p=self.info): if delete: @@ -2046,9 +2030,8 @@ class Configuration(object): if not os.path.isfile(target): version = str(revision) self.info('Creating %s (version=%r)' % (target, version)) - f = open(target, 'w') - f.write('version = %r\n' % (version)) - f.close() + with open(target, 'w') as f: + f.write('version = %r\n' % (version)) def rm_file(f=target,p=self.info): if delete: @@ -2284,13 +2267,13 @@ def generate_config_py(target): from numpy.distutils.system_info import system_info from distutils.dir_util import mkpath mkpath(os.path.dirname(target)) - f = open(target, 'w') - f.write('# This file is generated by numpy\'s %s\n' % (os.path.basename(sys.argv[0]))) - f.write('# It contains system_info results at the time of building this package.\n') - f.write('__all__ = ["get_info","show"]\n\n') + with open(target, 'w') as f: + f.write('# This file is generated by numpy\'s %s\n' % (os.path.basename(sys.argv[0]))) + f.write('# It contains system_info results at the time of building this package.\n') + f.write('__all__ = ["get_info","show"]\n\n') - # For gfortran+msvc combination, extra shared libraries may exist - f.write(""" + # For gfortran+msvc combination, extra shared libraries may exist + f.write(""" import os import sys @@ -2303,9 +2286,9 @@ if sys.platform == 'win32' and os.path.isdir(extra_dll_dir): """) - for k, i in system_info.saved_results.items(): - f.write('%s=%r\n' % (k, i)) - f.write(r''' + for k, i in system_info.saved_results.items(): + f.write('%s=%r\n' % (k, i)) + f.write(r''' def get_info(name): g = globals() return g.get(name, g.get(name + "_info", {})) @@ -2321,9 +2304,8 @@ def show(): if k == "sources" and len(v) > 200: v = v[:60] + " ...\n... " + v[-60:] print(" %s = %s" % (k,v)) - ''') + ''') - f.close() return target def msvc_version(compiler): diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py index 242494331..8a42434ff 100644 --- a/numpy/distutils/system_info.py +++ b/numpy/distutils/system_info.py @@ -906,7 +906,6 @@ class fftw_info(system_info): == len(ver_param['includes']): dict_append(info, include_dirs=[d]) flag = 1 - incl_dirs = [d] break if flag: dict_append(info, define_macros=ver_param['macros']) @@ -1058,9 +1057,9 @@ class mkl_info(system_info): for d in paths: dirs = glob(os.path.join(d, 'mkl', '*')) dirs += glob(os.path.join(d, 'mkl*')) - for d in dirs: - if os.path.isdir(os.path.join(d, 'lib')): - return d + for sub_dir in dirs: + if os.path.isdir(os.path.join(sub_dir, 'lib')): + return sub_dir return None def __init__(self): @@ -1690,23 +1689,46 @@ class blas_info(system_info): else: info['include_dirs'] = self.get_include_dirs() if platform.system() == 'Windows': - # The check for windows is needed because has_cblas uses the + # The check for windows is needed because get_cblas_libs uses the # same compiler that was used to compile Python and msvc is # often not installed when mingw is being used. This rough # treatment is not desirable, but windows is tricky. info['language'] = 'f77' # XXX: is it generally true? else: - lib = self.has_cblas(info) + lib = self.get_cblas_libs(info) if lib is not None: info['language'] = 'c' - info['libraries'] = [lib] + info['libraries'] = lib info['define_macros'] = [('HAVE_CBLAS', None)] self.set_info(**info) - def has_cblas(self, info): + def get_cblas_libs(self, info): + """ Check whether we can link with CBLAS interface + + This method will search through several combinations of libraries + to check whether CBLAS is present: + + 1. Libraries in ``info['libraries']``, as is + 2. As 1. but also explicitly adding ``'cblas'`` as a library + 3. As 1. but also explicitly adding ``'blas'`` as a library + 4. Check only library ``'cblas'`` + 5. Check only library ``'blas'`` + + Parameters + ---------- + info : dict + system information dictionary for compilation and linking + + Returns + ------- + libraries : list of str or None + a list of libraries that enables the use of CBLAS interface. + Returns None if not found or a compilation error occurs. + + Since 1.17 returns a list. + """ # primitive cblas check by looking for the header and trying to link # cblas or blas - res = False c = customized_ccompiler() tmpdir = tempfile.mkdtemp() s = """#include <cblas.h> @@ -1725,27 +1747,26 @@ class blas_info(system_info): # check we can compile (find headers) obj = c.compile([src], output_dir=tmpdir, include_dirs=self.get_include_dirs()) + except (distutils.ccompiler.CompileError, distutils.ccompiler.LinkError): + return None - # check we can link (find library) - # some systems have separate cblas and blas libs. First - # check for cblas lib, and if not present check for blas lib. + # check we can link (find library) + # some systems have separate cblas and blas libs. + for libs in [info['libraries'], ['cblas'] + info['libraries'], + ['blas'] + info['libraries'], ['cblas'], ['blas']]: try: c.link_executable(obj, os.path.join(tmpdir, "a.out"), - libraries=["cblas"], + libraries=libs, library_dirs=info['library_dirs'], extra_postargs=info.get('extra_link_args', [])) - res = "cblas" + return libs + # This breaks the for loop + break except distutils.ccompiler.LinkError: - c.link_executable(obj, os.path.join(tmpdir, "a.out"), - libraries=["blas"], - library_dirs=info['library_dirs'], - extra_postargs=info.get('extra_link_args', [])) - res = "blas" - except distutils.ccompiler.CompileError: - res = None + pass finally: shutil.rmtree(tmpdir) - return res + return None class openblas_info(blas_info): diff --git a/numpy/distutils/tests/test_exec_command.py b/numpy/distutils/tests/test_exec_command.py index 8bd265007..37912f5ba 100644 --- a/numpy/distutils/tests/test_exec_command.py +++ b/numpy/distutils/tests/test_exec_command.py @@ -6,7 +6,7 @@ from tempfile import TemporaryFile from numpy.distutils import exec_command from numpy.distutils.exec_command import get_pythonexe -from numpy.testing import tempdir, assert_ +from numpy.testing import tempdir, assert_, assert_warns # In python 3 stdout, stderr are text (unicode compliant) devices, so to # emulate them import StringIO from the io module. @@ -71,27 +71,31 @@ def test_exec_command_stdout(): # Test posix version: with redirect_stdout(StringIO()): with redirect_stderr(TemporaryFile()): - exec_command.exec_command("cd '.'") + with assert_warns(DeprecationWarning): + exec_command.exec_command("cd '.'") if os.name == 'posix': # Test general (non-posix) version: with emulate_nonposix(): with redirect_stdout(StringIO()): with redirect_stderr(TemporaryFile()): - exec_command.exec_command("cd '.'") + with assert_warns(DeprecationWarning): + exec_command.exec_command("cd '.'") def test_exec_command_stderr(): # Test posix version: with redirect_stdout(TemporaryFile(mode='w+')): with redirect_stderr(StringIO()): - exec_command.exec_command("cd '.'") + with assert_warns(DeprecationWarning): + exec_command.exec_command("cd '.'") if os.name == 'posix': # Test general (non-posix) version: with emulate_nonposix(): with redirect_stdout(TemporaryFile()): with redirect_stderr(StringIO()): - exec_command.exec_command("cd '.'") + with assert_warns(DeprecationWarning): + exec_command.exec_command("cd '.'") class TestExecCommand(object): @@ -205,11 +209,12 @@ class TestExecCommand(object): def test_basic(self): with redirect_stdout(StringIO()): with redirect_stderr(StringIO()): - if os.name == "posix": - self.check_posix(use_tee=0) - self.check_posix(use_tee=1) - elif os.name == "nt": - self.check_nt(use_tee=0) - self.check_nt(use_tee=1) - self.check_execute_in(use_tee=0) - self.check_execute_in(use_tee=1) + with assert_warns(DeprecationWarning): + if os.name == "posix": + self.check_posix(use_tee=0) + self.check_posix(use_tee=1) + elif os.name == "nt": + self.check_nt(use_tee=0) + self.check_nt(use_tee=1) + self.check_execute_in(use_tee=0) + self.check_execute_in(use_tee=1) diff --git a/numpy/doc/basics.py b/numpy/doc/basics.py index c87a40ccd..61f5bf4ef 100644 --- a/numpy/doc/basics.py +++ b/numpy/doc/basics.py @@ -260,6 +260,45 @@ identical behaviour between arrays and scalars, irrespective of whether the value is inside an array or not. NumPy scalars also have many of the same methods arrays do. +Overflow Errors +=============== + +The fixed size of NumPy numeric types may cause overflow errors when a value +requires more memory than available in the data type. For example, +`numpy.power` evaluates ``100 * 10 ** 8`` correctly for 64-bit integers, +but gives 1874919424 (incorrect) for a 32-bit integer. + + >>> np.power(100, 8, dtype=np.int64) + 10000000000000000 + >>> np.power(100, 8, dtype=np.int32) + 1874919424 + +The behaviour of NumPy and Python integer types differs significantly for +integer overflows and may confuse users expecting NumPy integers to behave +similar to Python's ``int``. Unlike NumPy, the size of Python's ``int`` is +flexible. This means Python integers may expand to accomodate any integer and +will not overflow. + +NumPy provides `numpy.iinfo` and `numpy.finfo` to verify the +minimum or maximum values of NumPy integer and floating point values +respectively :: + + >>> np.iinfo(np.int) # Bounds of the default integer on this system. + iinfo(min=-9223372036854775808, max=9223372036854775807, dtype=int64) + >>> np.iinfo(np.int32) # Bounds of a 32-bit integer + iinfo(min=-2147483648, max=2147483647, dtype=int32) + >>> np.iinfo(np.int64) # Bounds of a 64-bit integer + iinfo(min=-9223372036854775808, max=9223372036854775807, dtype=int64) + +If 64-bit integers are still too small the result may be cast to a +floating point number. Floating point numbers offer a larger, but inexact, +range of possible values. + + >>> np.power(100, 100, dtype=np.int64) # Incorrect even with 64-bit int + 0 + >>> np.power(100, 100, dtype=np.float64) + 1e+200 + Extended Precision ================== diff --git a/numpy/doc/byteswapping.py b/numpy/doc/byteswapping.py index f9491ed43..7a749c8d5 100644 --- a/numpy/doc/byteswapping.py +++ b/numpy/doc/byteswapping.py @@ -31,16 +31,16 @@ Let's say the two integers were in fact 1 and 770. Because 770 = 256 * 3 + 2, the 4 bytes in memory would contain respectively: 0, 1, 3, 2. The bytes I have loaded from the file would have these contents: ->>> big_end_str = chr(0) + chr(1) + chr(3) + chr(2) ->>> big_end_str -'\\x00\\x01\\x03\\x02' +>>> big_end_buffer = bytearray([0,1,3,2]) +>>> big_end_buffer +bytearray(b'\\x00\\x01\\x03\\x02') We might want to use an ``ndarray`` to access these integers. In that case, we can create an array around this memory, and tell numpy that there are two integers, and that they are 16 bit and big-endian: >>> import numpy as np ->>> big_end_arr = np.ndarray(shape=(2,),dtype='>i2', buffer=big_end_str) +>>> big_end_arr = np.ndarray(shape=(2,),dtype='>i2', buffer=big_end_buffer) >>> big_end_arr[0] 1 >>> big_end_arr[1] @@ -53,7 +53,7 @@ integer, the dtype string would be ``<u4``. In fact, why don't we try that? ->>> little_end_u4 = np.ndarray(shape=(1,),dtype='<u4', buffer=big_end_str) +>>> little_end_u4 = np.ndarray(shape=(1,),dtype='<u4', buffer=big_end_buffer) >>> little_end_u4[0] == 1 * 256**1 + 3 * 256**2 + 2 * 256**3 True @@ -97,7 +97,7 @@ Data and dtype endianness don't match, change dtype to match data We make something where they don't match: ->>> wrong_end_dtype_arr = np.ndarray(shape=(2,),dtype='<i2', buffer=big_end_str) +>>> wrong_end_dtype_arr = np.ndarray(shape=(2,),dtype='<i2', buffer=big_end_buffer) >>> wrong_end_dtype_arr[0] 256 @@ -110,7 +110,7 @@ the correct endianness: Note the array has not changed in memory: ->>> fixed_end_dtype_arr.tobytes() == big_end_str +>>> fixed_end_dtype_arr.tobytes() == big_end_buffer True Data and type endianness don't match, change data to match dtype @@ -126,7 +126,7 @@ that needs a certain byte ordering. Now the array *has* changed in memory: ->>> fixed_end_mem_arr.tobytes() == big_end_str +>>> fixed_end_mem_arr.tobytes() == big_end_buffer False Data and dtype endianness match, swap data and dtype @@ -140,7 +140,7 @@ the previous operations: >>> swapped_end_arr = big_end_arr.byteswap().newbyteorder() >>> swapped_end_arr[0] 1 ->>> swapped_end_arr.tobytes() == big_end_str +>>> swapped_end_arr.tobytes() == big_end_buffer False An easier way of casting the data to a specific dtype and byte ordering @@ -149,7 +149,7 @@ can be achieved with the ndarray astype method: >>> swapped_end_arr = big_end_arr.astype('<i2') >>> swapped_end_arr[0] 1 ->>> swapped_end_arr.tobytes() == big_end_str +>>> swapped_end_arr.tobytes() == big_end_buffer False """ diff --git a/numpy/doc/glossary.py b/numpy/doc/glossary.py index a3707340d..7d1c9a1d5 100644 --- a/numpy/doc/glossary.py +++ b/numpy/doc/glossary.py @@ -159,7 +159,7 @@ Glossary field In a :term:`structured data type`, each sub-type is called a `field`. - The `field` has a name (a string), a type (any valid :term:`dtype`, and + The `field` has a name (a string), a type (any valid dtype, and an optional `title`. See :ref:`arrays.dtypes` Fortran order @@ -209,6 +209,9 @@ Glossary Key 1: b Key 2: c + itemsize + The size of the dtype element in bytes. + list A Python container that can hold any number of objects or items. The items do not have to be of the same type, and can even be @@ -345,31 +348,31 @@ Glossary Painting the city red! slice - Used to select only certain elements from a sequence:: + Used to select only certain elements from a sequence: - >>> x = range(5) - >>> x - [0, 1, 2, 3, 4] + >>> x = range(5) + >>> x + [0, 1, 2, 3, 4] - >>> x[1:3] # slice from 1 to 3 (excluding 3 itself) - [1, 2] + >>> x[1:3] # slice from 1 to 3 (excluding 3 itself) + [1, 2] - >>> x[1:5:2] # slice from 1 to 5, but skipping every second element - [1, 3] + >>> x[1:5:2] # slice from 1 to 5, but skipping every second element + [1, 3] - >>> x[::-1] # slice a sequence in reverse - [4, 3, 2, 1, 0] + >>> x[::-1] # slice a sequence in reverse + [4, 3, 2, 1, 0] Arrays may have more than one dimension, each which can be sliced - individually:: + individually: - >>> x = np.array([[1, 2], [3, 4]]) - >>> x - array([[1, 2], - [3, 4]]) + >>> x = np.array([[1, 2], [3, 4]]) + >>> x + array([[1, 2], + [3, 4]]) - >>> x[:, 1] - array([2, 4]) + >>> x[:, 1] + array([2, 4]) structure See :term:`structured data type` @@ -377,6 +380,20 @@ Glossary structured data type A data type composed of other datatypes + subarray data type + A :term:`structured data type` may contain a :term:`ndarray` with its + own dtype and shape: + + >>> dt = np.dtype([('a', np.int32), ('b', np.float32, (3,))]) + >>> np.zeros(3, dtype=dt) + array([(0, [0., 0., 0.]), (0, [0., 0., 0.]), (0, [0., 0., 0.])], + dtype=[('a', '<i4'), ('b', '<f4', (3,))]) + + title + In addition to field names, structured array fields may have an + associated :ref:`title <titles>` which is an alias to the name and is + commonly used for plotting. + tuple A sequence that may contain a variable number of types of any kind. A tuple is immutable, i.e., once constructed it cannot be @@ -413,8 +430,19 @@ Glossary 'alpha' ufunc - Universal function. A fast element-wise array operation. Examples include - ``add``, ``sin`` and ``logical_or``. + Universal function. A fast element-wise, :term:`vectorized + <vectorization>` array operation. Examples include ``add``, ``sin`` and + ``logical_or``. + + vectorization + Optimizing a looping block by specialized code. In a traditional sense, + vectorization performs the same operation on multiple elements with + fixed strides between them via specialized hardware. Compilers know how + to take advantage of well-constructed loops to implement such + optimizations. NumPy uses :ref:`vectorization <whatis-vectorization>` + to mean any optimization via specialized code performing the same + operations on multiple elements, typically achieving speedups by + avoiding some of the overhead in looking up and converting the elements. view An array that does not own its data, but refers to another array's diff --git a/numpy/doc/indexing.py b/numpy/doc/indexing.py index 087a688bc..f80d6c29e 100644 --- a/numpy/doc/indexing.py +++ b/numpy/doc/indexing.py @@ -93,7 +93,9 @@ well. A few examples illustrates best: :: [21, 24, 27]]) Note that slices of arrays do not copy the internal array data but -only produce new views of the original data. +only produce new views of the original data. This is different from +list or tuple slicing and an explicit ``copy()`` is recommended if +the original data is not required anymore. It is possible to index arrays with other arrays for the purposes of selecting lists of values out of arrays into new arrays. There are diff --git a/numpy/doc/structured_arrays.py b/numpy/doc/structured_arrays.py index da3a74bd6..c0437dc07 100644 --- a/numpy/doc/structured_arrays.py +++ b/numpy/doc/structured_arrays.py @@ -57,10 +57,10 @@ A structured datatype can be thought of as a sequence of bytes of a certain length (the structure's :term:`itemsize`) which is interpreted as a collection of fields. Each field has a name, a datatype, and a byte offset within the structure. The datatype of a field may be any numpy datatype including other -structured datatypes, and it may also be a :term:`sub-array` which behaves like -an ndarray of a specified shape. The offsets of the fields are arbitrary, and -fields may even overlap. These offsets are usually determined automatically by -numpy, but can also be specified. +structured datatypes, and it may also be a :term:`subarray data type` which +behaves like an ndarray of a specified shape. The offsets of the fields are +arbitrary, and fields may even overlap. These offsets are usually determined +automatically by numpy, but can also be specified. Structured Datatype Creation ---------------------------- @@ -231,7 +231,7 @@ each field's offset is a multiple of its size and that the itemsize is a multiple of the largest field size, and raise an exception if not. If the offsets of the fields and itemsize of a structured array satisfy the -alignment conditions, the array will have the ``ALIGNED`` :ref:`flag +alignment conditions, the array will have the ``ALIGNED`` :attr:`flag <numpy.ndarray.flags>` set. A convenience function :func:`numpy.lib.recfunctions.repack_fields` converts an @@ -266,7 +266,7 @@ providing a 3-element tuple ``(datatype, offset, title)`` instead of the usual >>> np.dtype({'name': ('i4', 0, 'my title')}) dtype([(('my title', 'name'), '<i4')]) -The ``dtype.fields`` dictionary will contain :term:`titles` as keys, if any +The ``dtype.fields`` dictionary will contain titles as keys, if any titles are used. This means effectively that a field with a title will be represented twice in the fields dictionary. The tuple values for these fields will also have a third element, the field title. Because of this, and because @@ -431,8 +431,9 @@ array, as follows:: Assignment to the view modifies the original array. The view's fields will be in the order they were indexed. Note that unlike for single-field indexing, the -view's dtype has the same itemsize as the original array, and has fields at the -same offsets as in the original array, and unindexed fields are merely missing. +dtype of the view has the same itemsize as the original array, and has fields +at the same offsets as in the original array, and unindexed fields are merely +missing. .. warning:: In Numpy 1.15, indexing an array with a multi-field index returned a copy of @@ -453,7 +454,7 @@ same offsets as in the original array, and unindexed fields are merely missing. Numpy 1.12, and similar code has raised ``FutureWarning`` since 1.7. In 1.16 a number of functions have been introduced in the - :module:`numpy.lib.recfunctions` module to help users account for this + :mod:`numpy.lib.recfunctions` module to help users account for this change. These are :func:`numpy.lib.recfunctions.repack_fields`. :func:`numpy.lib.recfunctions.structured_to_unstructured`, @@ -610,7 +611,7 @@ creating record arrays, see :ref:`record array creation routines <routines.array-creation.rec>`. A record array representation of a structured array can be obtained using the -appropriate :ref:`view`:: +appropriate `view <numpy-ndarray-view>`_:: >>> arr = np.array([(1, 2., 'Hello'), (2, 3., "World")], ... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'a10')]) diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py index c4a650585..0e9cba1eb 100755 --- a/numpy/f2py/crackfortran.py +++ b/numpy/f2py/crackfortran.py @@ -2399,7 +2399,7 @@ def _selected_real_kind_func(p, r=0, radix=0): if p < 16: return 8 machine = platform.machine().lower() - if machine.startswith(('aarch64', 'power', 'ppc64', 's390x', 'sparc')): + if machine.startswith(('aarch64', 'power', 'ppc', 'riscv', 's390x', 'sparc')): if p <= 20: return 16 else: diff --git a/numpy/f2py/src/fortranobject.c b/numpy/f2py/src/fortranobject.c index 78b06f066..4a981bf55 100644 --- a/numpy/f2py/src/fortranobject.c +++ b/numpy/f2py/src/fortranobject.c @@ -135,7 +135,7 @@ format_def(char *buf, Py_ssize_t size, FortranDataDef def) if (def.data == NULL) { static const char notalloc[] = ", not allocated"; - if (size < sizeof(notalloc)) { + if ((size_t) size < sizeof(notalloc)) { return -1; } memcpy(p, notalloc, sizeof(notalloc)); diff --git a/numpy/lib/arraypad.py b/numpy/lib/arraypad.py index b236cc449..07146f404 100644 --- a/numpy/lib/arraypad.py +++ b/numpy/lib/arraypad.py @@ -16,50 +16,67 @@ __all__ = ['pad'] # Private utility functions. -def _arange_ndarray(arr, shape, axis, reverse=False): +def _linear_ramp(ndim, axis, start, stop, size, reverse=False): """ - Create an ndarray of `shape` with increments along specified `axis` + Create a linear ramp of `size` in `axis` with `ndim`. + + This algorithm behaves like a vectorized version of `numpy.linspace`. + The resulting linear ramp is broadcastable to any array that matches the + ramp in `shape[axis]` and `ndim`. Parameters ---------- - arr : ndarray - Input array of arbitrary shape. - shape : tuple of ints - Shape of desired array. Should be equivalent to `arr.shape` except - `shape[axis]` which may have any positive value. + ndim : int + Number of dimensions of the resulting array. All dimensions except + the one specified by `axis` will have the size 1. axis : int - Axis to increment along. + The dimension that contains the linear ramp of `size`. + start : int or ndarray + The starting value(s) of the linear ramp. If given as an array, its + size must match `size`. + stop : int or ndarray + The stop value(s) (not included!) of the linear ramp. If given as an + array, its size must match `size`. + size : int + The number of elements in the linear ramp. If this argument is 0 the + dimensions of `ramp` will all be of length 1 except for the one given + by `axis` which will be 0. reverse : bool - If False, increment in a positive fashion from 1 to `shape[axis]`, - inclusive. If True, the bounds are the same but the order reversed. + If False, increment in a positive fashion, otherwise decrement. Returns ------- - padarr : ndarray - Output array sized to pad `arr` along `axis`, with linear range from - 1 to `shape[axis]` along specified `axis`. - - Notes - ----- - The range is deliberately 1-indexed for this specific use case. Think of - this algorithm as broadcasting `np.arange` to a single `axis` of an - arbitrarily shaped ndarray. + ramp : ndarray + Output array of dtype np.float64 that in- or decrements along the given + `axis`. + Examples + -------- + >>> _linear_ramp(ndim=2, axis=0, start=np.arange(3), stop=10, size=2) + array([[0. , 1. , 2. ], + [5. , 5.5, 6. ]]) + >>> _linear_ramp(ndim=3, axis=0, start=2, stop=0, size=0) + array([], shape=(0, 1, 1), dtype=float64) """ - initshape = tuple(1 if i != axis else shape[axis] - for (i, x) in enumerate(arr.shape)) - if not reverse: - padarr = np.arange(1, shape[axis] + 1) - else: - padarr = np.arange(shape[axis], 0, -1) - padarr = padarr.reshape(initshape) - for i, dim in enumerate(shape): - if padarr.shape[i] != dim: - padarr = padarr.repeat(dim, axis=i) - return padarr + # Create initial ramp + ramp = np.arange(size, dtype=np.float64) + if reverse: + ramp = ramp[::-1] + + # Make sure, that ramp is broadcastable + init_shape = (1,) * axis + (size,) + (1,) * (ndim - axis - 1) + ramp = ramp.reshape(init_shape) + + if size != 0: + # And scale to given start and stop values + gain = (stop - start) / float(size) + ramp = ramp * gain + ramp += start + return ramp -def _round_ifneeded(arr, dtype): + +def _round_if_needed(arr, dtype): """ Rounds arr inplace if destination dtype is integer. @@ -69,821 +86,418 @@ def _round_ifneeded(arr, dtype): Input array. dtype : dtype The dtype of the destination array. - """ if np.issubdtype(dtype, np.integer): arr.round(out=arr) -def _slice_at_axis(shape, sl, axis): - """ - Construct a slice tuple the length of shape, with sl at the specified axis - """ - slice_tup = (slice(None),) - return slice_tup * axis + (sl,) + slice_tup * (len(shape) - axis - 1) - - -def _slice_first(shape, n, axis): - """ Construct a slice tuple to take the first n elements along axis """ - return _slice_at_axis(shape, slice(0, n), axis=axis) - - -def _slice_last(shape, n, axis): - """ Construct a slice tuple to take the last n elements along axis """ - dim = shape[axis] # doing this explicitly makes n=0 work - return _slice_at_axis(shape, slice(dim - n, dim), axis=axis) - - -def _do_prepend(arr, pad_chunk, axis): - return np.concatenate( - (pad_chunk.astype(arr.dtype, copy=False), arr), axis=axis) - - -def _do_append(arr, pad_chunk, axis): - return np.concatenate( - (arr, pad_chunk.astype(arr.dtype, copy=False)), axis=axis) - - -def _prepend_const(arr, pad_amt, val, axis=-1): - """ - Prepend constant `val` along `axis` of `arr`. - - Parameters - ---------- - arr : ndarray - Input array of arbitrary shape. - pad_amt : int - Amount of padding to prepend. - val : scalar - Constant value to use. For best results should be of type `arr.dtype`; - if not `arr.dtype` will be cast to `arr.dtype`. - axis : int - Axis along which to pad `arr`. - - Returns - ------- - padarr : ndarray - Output array, with `pad_amt` constant `val` prepended along `axis`. - - """ - if pad_amt == 0: - return arr - padshape = tuple(x if i != axis else pad_amt - for (i, x) in enumerate(arr.shape)) - return _do_prepend(arr, np.full(padshape, val, dtype=arr.dtype), axis) - - -def _append_const(arr, pad_amt, val, axis=-1): +def _slice_at_axis(sl, axis): """ - Append constant `val` along `axis` of `arr`. + Construct tuple of slices to slice an array in the given dimension. Parameters ---------- - arr : ndarray - Input array of arbitrary shape. - pad_amt : int - Amount of padding to append. - val : scalar - Constant value to use. For best results should be of type `arr.dtype`; - if not `arr.dtype` will be cast to `arr.dtype`. + sl : slice + The slice for the given dimension. axis : int - Axis along which to pad `arr`. + The axis to which `sl` is applied. All other dimensions are left + "unsliced". Returns ------- - padarr : ndarray - Output array, with `pad_amt` constant `val` appended along `axis`. - - """ - if pad_amt == 0: - return arr - padshape = tuple(x if i != axis else pad_amt - for (i, x) in enumerate(arr.shape)) - return _do_append(arr, np.full(padshape, val, dtype=arr.dtype), axis) + sl : tuple of slices + A tuple with slices matching `shape` in length. - - -def _prepend_edge(arr, pad_amt, axis=-1): - """ - Prepend `pad_amt` to `arr` along `axis` by extending edge values. - - Parameters - ---------- - arr : ndarray - Input array of arbitrary shape. - pad_amt : int - Amount of padding to prepend. - axis : int - Axis along which to pad `arr`. - - Returns - ------- - padarr : ndarray - Output array, extended by `pad_amt` edge values appended along `axis`. - - """ - if pad_amt == 0: - return arr - - edge_slice = _slice_first(arr.shape, 1, axis=axis) - edge_arr = arr[edge_slice] - return _do_prepend(arr, edge_arr.repeat(pad_amt, axis=axis), axis) - - -def _append_edge(arr, pad_amt, axis=-1): + Examples + -------- + >>> _slice_at_axis(slice(None, 3, -1), 1) + (slice(None, None, None), slice(None, 3, -1), (...,)) """ - Append `pad_amt` to `arr` along `axis` by extending edge values. - - Parameters - ---------- - arr : ndarray - Input array of arbitrary shape. - pad_amt : int - Amount of padding to append. - axis : int - Axis along which to pad `arr`. + return (slice(None),) * axis + (sl,) + (...,) - Returns - ------- - padarr : ndarray - Output array, extended by `pad_amt` edge values prepended along - `axis`. +def _view_roi(array, original_area_slice, axis): """ - if pad_amt == 0: - return arr - - edge_slice = _slice_last(arr.shape, 1, axis=axis) - edge_arr = arr[edge_slice] - return _do_append(arr, edge_arr.repeat(pad_amt, axis=axis), axis) - + Get a view of the current region of interest during iterative padding. -def _prepend_ramp(arr, pad_amt, end, axis=-1): - """ - Prepend linear ramp along `axis`. + When padding multiple dimensions iteratively corner values are + unnecessarily overwritten multiple times. This function reduces the + working area for the first dimensions so that corners are excluded. Parameters ---------- - arr : ndarray - Input array of arbitrary shape. - pad_amt : int - Amount of padding to prepend. - end : scalar - Constal value to use. For best results should be of type `arr.dtype`; - if not `arr.dtype` will be cast to `arr.dtype`. + array : ndarray + The array with the region of interest. + original_area_slice : tuple of slices + Denotes the area with original values of the unpadded array. axis : int - Axis along which to pad `arr`. + The currently padded dimension assuming that `axis` is padded before + `axis` + 1. Returns ------- - padarr : ndarray - Output array, with `pad_amt` values prepended along `axis`. The - prepended region ramps linearly from the edge value to `end`. - + roi : ndarray + The region of interest of the original `array`. """ - if pad_amt == 0: - return arr + axis += 1 + sl = (slice(None),) * axis + original_area_slice[axis:] + return array[sl] - # Generate shape for final concatenated array - padshape = tuple(x if i != axis else pad_amt - for (i, x) in enumerate(arr.shape)) - # Generate an n-dimensional array incrementing along `axis` - ramp_arr = _arange_ndarray(arr, padshape, axis, - reverse=True).astype(np.float64) - - # Appropriate slicing to extract n-dimensional edge along `axis` - edge_slice = _slice_first(arr.shape, 1, axis=axis) - - # Extract edge, and extend along `axis` - edge_pad = arr[edge_slice].repeat(pad_amt, axis) - - # Linear ramp - slope = (end - edge_pad) / float(pad_amt) - ramp_arr = ramp_arr * slope - ramp_arr += edge_pad - _round_ifneeded(ramp_arr, arr.dtype) - - # Ramp values will most likely be float, cast them to the same type as arr - return _do_prepend(arr, ramp_arr, axis) - - -def _append_ramp(arr, pad_amt, end, axis=-1): +def _pad_simple(array, pad_width, fill_value=None): """ - Append linear ramp along `axis`. + Pad array on all sides with either a single value or undefined values. Parameters ---------- - arr : ndarray - Input array of arbitrary shape. - pad_amt : int - Amount of padding to append. - end : scalar - Constal value to use. For best results should be of type `arr.dtype`; - if not `arr.dtype` will be cast to `arr.dtype`. - axis : int - Axis along which to pad `arr`. + array : ndarray + Array to grow. + pad_width : sequence of tuple[int, int] + Pad width on both sides for each dimension in `arr`. + fill_value : scalar, optional + If provided the padded area is filled with this value, otherwise + the pad area left undefined. Returns ------- - padarr : ndarray - Output array, with `pad_amt` values appended along `axis`. The - appended region ramps linearly from the edge value to `end`. - + padded : ndarray + The padded array with the same dtype as`array`. Its order will default + to C-style if `array` is not F-contiguous. + original_area_slice : tuple + A tuple of slices pointing to the area of the original array. """ - if pad_amt == 0: - return arr - - # Generate shape for final concatenated array - padshape = tuple(x if i != axis else pad_amt - for (i, x) in enumerate(arr.shape)) - - # Generate an n-dimensional array incrementing along `axis` - ramp_arr = _arange_ndarray(arr, padshape, axis, - reverse=False).astype(np.float64) - - # Slice a chunk from the edge to calculate stats on - edge_slice = _slice_last(arr.shape, 1, axis=axis) + # Allocate grown array + new_shape = tuple( + left + size + right + for size, (left, right) in zip(array.shape, pad_width) + ) + order = 'F' if array.flags.fnc else 'C' # Fortran and not also C-order + padded = np.empty(new_shape, dtype=array.dtype, order=order) - # Extract edge, and extend along `axis` - edge_pad = arr[edge_slice].repeat(pad_amt, axis) + if fill_value is not None: + padded.fill(fill_value) - # Linear ramp - slope = (end - edge_pad) / float(pad_amt) - ramp_arr = ramp_arr * slope - ramp_arr += edge_pad - _round_ifneeded(ramp_arr, arr.dtype) + # Copy old array into correct space + original_area_slice = tuple( + slice(left, left + size) + for size, (left, right) in zip(array.shape, pad_width) + ) + padded[original_area_slice] = array - # Ramp values will most likely be float, cast them to the same type as arr - return _do_append(arr, ramp_arr, axis) + return padded, original_area_slice -def _prepend_max(arr, pad_amt, num, axis=-1): +def _set_pad_area(padded, axis, width_pair, value_pair): """ - Prepend `pad_amt` maximum values along `axis`. + Set empty-padded area in given dimension. Parameters ---------- - arr : ndarray - Input array of arbitrary shape. - pad_amt : int - Amount of padding to prepend. - num : int - Depth into `arr` along `axis` to calculate maximum. - Range: [1, `arr.shape[axis]`] or None (entire axis) + padded : ndarray + Array with the pad area which is modified inplace. axis : int - Axis along which to pad `arr`. - - Returns - ------- - padarr : ndarray - Output array, with `pad_amt` values appended along `axis`. The - prepended region is the maximum of the first `num` values along - `axis`. - + Dimension with the pad area to set. + width_pair : (int, int) + Pair of widths that mark the pad area on both sides in the given + dimension. + value_pair : tuple of scalars or ndarrays + Values inserted into the pad area on each side. It must match or be + broadcastable to the shape of `arr`. """ - if pad_amt == 0: - return arr - - # Equivalent to edge padding for single value, so do that instead - if num == 1: - return _prepend_edge(arr, pad_amt, axis) + left_slice = _slice_at_axis(slice(None, width_pair[0]), axis) + padded[left_slice] = value_pair[0] - # Use entire array if `num` is too large - if num is not None: - if num >= arr.shape[axis]: - num = None + right_slice = _slice_at_axis( + slice(padded.shape[axis] - width_pair[1], None), axis) + padded[right_slice] = value_pair[1] - # Slice a chunk from the edge to calculate stats on - max_slice = _slice_first(arr.shape, num, axis=axis) - # Extract slice, calculate max - max_chunk = arr[max_slice].max(axis=axis, keepdims=True) - - # Concatenate `arr` with `max_chunk`, extended along `axis` by `pad_amt` - return _do_prepend(arr, max_chunk.repeat(pad_amt, axis=axis), axis) - - -def _append_max(arr, pad_amt, num, axis=-1): +def _get_edges(padded, axis, width_pair): """ - Pad one `axis` of `arr` with the maximum of the last `num` elements. + Retrieve edge values from empty-padded array in given dimension. Parameters ---------- - arr : ndarray - Input array of arbitrary shape. - pad_amt : int - Amount of padding to append. - num : int - Depth into `arr` along `axis` to calculate maximum. - Range: [1, `arr.shape[axis]`] or None (entire axis) + padded : ndarray + Empty-padded array. axis : int - Axis along which to pad `arr`. + Dimension in which the edges are considered. + width_pair : (int, int) + Pair of widths that mark the pad area on both sides in the given + dimension. Returns ------- - padarr : ndarray - Output array, with `pad_amt` values appended along `axis`. The - appended region is the maximum of the final `num` values along `axis`. - + left_edge, right_edge : ndarray + Edge values of the valid area in `padded` in the given dimension. Its + shape will always match `padded` except for the dimension given by + `axis` which will have a length of 1. """ - if pad_amt == 0: - return arr - - # Equivalent to edge padding for single value, so do that instead - if num == 1: - return _append_edge(arr, pad_amt, axis) - - # Use entire array if `num` is too large - if num is not None: - if num >= arr.shape[axis]: - num = None + left_index = width_pair[0] + left_slice = _slice_at_axis(slice(left_index, left_index + 1), axis) + left_edge = padded[left_slice] - # Slice a chunk from the edge to calculate stats on - if num is not None: - max_slice = _slice_last(arr.shape, num, axis=axis) - else: - max_slice = tuple(slice(None) for x in arr.shape) - - # Extract slice, calculate max - max_chunk = arr[max_slice].max(axis=axis, keepdims=True) + right_index = padded.shape[axis] - width_pair[1] + right_slice = _slice_at_axis(slice(right_index - 1, right_index), axis) + right_edge = padded[right_slice] - # Concatenate `arr` with `max_chunk`, extended along `axis` by `pad_amt` - return _do_append(arr, max_chunk.repeat(pad_amt, axis=axis), axis) + return left_edge, right_edge -def _prepend_mean(arr, pad_amt, num, axis=-1): +def _get_linear_ramps(padded, axis, width_pair, end_value_pair): """ - Prepend `pad_amt` mean values along `axis`. + Construct linear ramps for empty-padded array in given dimension. Parameters ---------- - arr : ndarray - Input array of arbitrary shape. - pad_amt : int - Amount of padding to prepend. - num : int - Depth into `arr` along `axis` to calculate mean. - Range: [1, `arr.shape[axis]`] or None (entire axis) + padded : ndarray + Empty-padded array. axis : int - Axis along which to pad `arr`. + Dimension in which the ramps are constructed. + width_pair : (int, int) + Pair of widths that mark the pad area on both sides in the given + dimension. + end_value_pair : (scalar, scalar) + End values for the linear ramps which form the edge of the fully padded + array. These values are included in the linear ramps. Returns ------- - padarr : ndarray - Output array, with `pad_amt` values prepended along `axis`. The - prepended region is the mean of the first `num` values along `axis`. - + left_ramp, right_ramp : ndarray + Linear ramps to set on both sides of `padded`. """ - if pad_amt == 0: - return arr - - # Equivalent to edge padding for single value, so do that instead - if num == 1: - return _prepend_edge(arr, pad_amt, axis) - - # Use entire array if `num` is too large - if num is not None: - if num >= arr.shape[axis]: - num = None + edge_pair = _get_edges(padded, axis, width_pair) - # Slice a chunk from the edge to calculate stats on - mean_slice = _slice_first(arr.shape, num, axis=axis) + left_ramp = _linear_ramp( + padded.ndim, axis, start=end_value_pair[0], stop=edge_pair[0], + size=width_pair[0], reverse=False + ) + _round_if_needed(left_ramp, padded.dtype) - # Extract slice, calculate mean - mean_chunk = arr[mean_slice].mean(axis, keepdims=True) - _round_ifneeded(mean_chunk, arr.dtype) + right_ramp = _linear_ramp( + padded.ndim, axis, start=end_value_pair[1], stop=edge_pair[1], + size=width_pair[1], reverse=True + ) + _round_if_needed(right_ramp, padded.dtype) - # Concatenate `arr` with `mean_chunk`, extended along `axis` by `pad_amt` - return _do_prepend(arr, mean_chunk.repeat(pad_amt, axis), axis=axis) + return left_ramp, right_ramp -def _append_mean(arr, pad_amt, num, axis=-1): +def _get_stats(padded, axis, width_pair, length_pair, stat_func): """ - Append `pad_amt` mean values along `axis`. + Calculate statistic for the empty-padded array in given dimnsion. Parameters ---------- - arr : ndarray - Input array of arbitrary shape. - pad_amt : int - Amount of padding to append. - num : int - Depth into `arr` along `axis` to calculate mean. - Range: [1, `arr.shape[axis]`] or None (entire axis) + padded : ndarray + Empty-padded array. axis : int - Axis along which to pad `arr`. + Dimension in which the statistic is calculated. + width_pair : (int, int) + Pair of widths that mark the pad area on both sides in the given + dimension. + length_pair : 2-element sequence of None or int + Gives the number of values in valid area from each side that is + taken into account when calculating the statistic. If None the entire + valid area in `padded` is considered. + stat_func : function + Function to compute statistic. The expected signature is + ``stat_func(x: ndarray, axis: int, keepdims: bool) -> ndarray``. Returns ------- - padarr : ndarray - Output array, with `pad_amt` values appended along `axis`. The - appended region is the maximum of the final `num` values along `axis`. - - """ - if pad_amt == 0: - return arr - - # Equivalent to edge padding for single value, so do that instead - if num == 1: - return _append_edge(arr, pad_amt, axis) - - # Use entire array if `num` is too large - if num is not None: - if num >= arr.shape[axis]: - num = None - - # Slice a chunk from the edge to calculate stats on - if num is not None: - mean_slice = _slice_last(arr.shape, num, axis=axis) - else: - mean_slice = tuple(slice(None) for x in arr.shape) - - # Extract slice, calculate mean - mean_chunk = arr[mean_slice].mean(axis=axis, keepdims=True) - _round_ifneeded(mean_chunk, arr.dtype) - - # Concatenate `arr` with `mean_chunk`, extended along `axis` by `pad_amt` - return _do_append(arr, mean_chunk.repeat(pad_amt, axis), axis=axis) - - -def _prepend_med(arr, pad_amt, num, axis=-1): - """ - Prepend `pad_amt` median values along `axis`. + left_stat, right_stat : ndarray + Calculated statistic for both sides of `padded`. + """ + # Calculate indices of the edges of the area with original values + left_index = width_pair[0] + right_index = padded.shape[axis] - width_pair[1] + # as well as its length + max_length = right_index - left_index + + # Limit stat_lengths to max_length + left_length, right_length = length_pair + if left_length is None or max_length < left_length: + left_length = max_length + if right_length is None or max_length < right_length: + right_length = max_length + + # Calculate statistic for the left side + left_slice = _slice_at_axis( + slice(left_index, left_index + left_length), axis) + left_chunk = padded[left_slice] + left_stat = stat_func(left_chunk, axis=axis, keepdims=True) + _round_if_needed(left_stat, padded.dtype) + + if left_length == right_length == max_length: + # return early as right_stat must be identical to left_stat + return left_stat, left_stat + + # Calculate statistic for the right side + right_slice = _slice_at_axis( + slice(right_index - right_length, right_index), axis) + right_chunk = padded[right_slice] + right_stat = stat_func(right_chunk, axis=axis, keepdims=True) + _round_if_needed(right_stat, padded.dtype) + return left_stat, right_stat + + +def _set_reflect_both(padded, axis, width_pair, method, include_edge=False): + """ + Pad `axis` of `arr` with reflection. Parameters ---------- - arr : ndarray + padded : ndarray Input array of arbitrary shape. - pad_amt : int - Amount of padding to prepend. - num : int - Depth into `arr` along `axis` to calculate median. - Range: [1, `arr.shape[axis]`] or None (entire axis) axis : int Axis along which to pad `arr`. - - Returns - ------- - padarr : ndarray - Output array, with `pad_amt` values prepended along `axis`. The - prepended region is the median of the first `num` values along `axis`. - - """ - if pad_amt == 0: - return arr - - # Equivalent to edge padding for single value, so do that instead - if num == 1: - return _prepend_edge(arr, pad_amt, axis) - - # Use entire array if `num` is too large - if num is not None: - if num >= arr.shape[axis]: - num = None - - # Slice a chunk from the edge to calculate stats on - med_slice = _slice_first(arr.shape, num, axis=axis) - - # Extract slice, calculate median - med_chunk = np.median(arr[med_slice], axis=axis, keepdims=True) - _round_ifneeded(med_chunk, arr.dtype) - - # Concatenate `arr` with `med_chunk`, extended along `axis` by `pad_amt` - return _do_prepend(arr, med_chunk.repeat(pad_amt, axis), axis=axis) - - -def _append_med(arr, pad_amt, num, axis=-1): - """ - Append `pad_amt` median values along `axis`. - - Parameters - ---------- - arr : ndarray - Input array of arbitrary shape. - pad_amt : int - Amount of padding to append. - num : int - Depth into `arr` along `axis` to calculate median. - Range: [1, `arr.shape[axis]`] or None (entire axis) - axis : int - Axis along which to pad `arr`. - - Returns - ------- - padarr : ndarray - Output array, with `pad_amt` values appended along `axis`. The - appended region is the median of the final `num` values along `axis`. - - """ - if pad_amt == 0: - return arr - - # Equivalent to edge padding for single value, so do that instead - if num == 1: - return _append_edge(arr, pad_amt, axis) - - # Use entire array if `num` is too large - if num is not None: - if num >= arr.shape[axis]: - num = None - - # Slice a chunk from the edge to calculate stats on - if num is not None: - med_slice = _slice_last(arr.shape, num, axis=axis) - else: - med_slice = tuple(slice(None) for x in arr.shape) - - # Extract slice, calculate median - med_chunk = np.median(arr[med_slice], axis=axis, keepdims=True) - _round_ifneeded(med_chunk, arr.dtype) - - # Concatenate `arr` with `med_chunk`, extended along `axis` by `pad_amt` - return _do_append(arr, med_chunk.repeat(pad_amt, axis), axis=axis) - - -def _prepend_min(arr, pad_amt, num, axis=-1): - """ - Prepend `pad_amt` minimum values along `axis`. - - Parameters - ---------- - arr : ndarray - Input array of arbitrary shape. - pad_amt : int - Amount of padding to prepend. - num : int - Depth into `arr` along `axis` to calculate minimum. - Range: [1, `arr.shape[axis]`] or None (entire axis) - axis : int - Axis along which to pad `arr`. - - Returns - ------- - padarr : ndarray - Output array, with `pad_amt` values prepended along `axis`. The - prepended region is the minimum of the first `num` values along - `axis`. - - """ - if pad_amt == 0: - return arr - - # Equivalent to edge padding for single value, so do that instead - if num == 1: - return _prepend_edge(arr, pad_amt, axis) - - # Use entire array if `num` is too large - if num is not None: - if num >= arr.shape[axis]: - num = None - - # Slice a chunk from the edge to calculate stats on - min_slice = _slice_first(arr.shape, num, axis=axis) - - # Extract slice, calculate min - min_chunk = arr[min_slice].min(axis=axis, keepdims=True) - - # Concatenate `arr` with `min_chunk`, extended along `axis` by `pad_amt` - return _do_prepend(arr, min_chunk.repeat(pad_amt, axis), axis=axis) - - -def _append_min(arr, pad_amt, num, axis=-1): - """ - Append `pad_amt` median values along `axis`. - - Parameters - ---------- - arr : ndarray - Input array of arbitrary shape. - pad_amt : int - Amount of padding to append. - num : int - Depth into `arr` along `axis` to calculate minimum. - Range: [1, `arr.shape[axis]`] or None (entire axis) - axis : int - Axis along which to pad `arr`. - - Returns - ------- - padarr : ndarray - Output array, with `pad_amt` values appended along `axis`. The - appended region is the minimum of the final `num` values along `axis`. - - """ - if pad_amt == 0: - return arr - - # Equivalent to edge padding for single value, so do that instead - if num == 1: - return _append_edge(arr, pad_amt, axis) - - # Use entire array if `num` is too large - if num is not None: - if num >= arr.shape[axis]: - num = None - - # Slice a chunk from the edge to calculate stats on - if num is not None: - min_slice = _slice_last(arr.shape, num, axis=axis) - else: - min_slice = tuple(slice(None) for x in arr.shape) - - # Extract slice, calculate min - min_chunk = arr[min_slice].min(axis=axis, keepdims=True) - - # Concatenate `arr` with `min_chunk`, extended along `axis` by `pad_amt` - return _do_append(arr, min_chunk.repeat(pad_amt, axis), axis=axis) - - -def _pad_ref(arr, pad_amt, method, axis=-1): - """ - Pad `axis` of `arr` by reflection. - - Parameters - ---------- - arr : ndarray - Input array of arbitrary shape. - pad_amt : tuple of ints, length 2 - Padding to (prepend, append) along `axis`. + width_pair : (int, int) + Pair of widths that mark the pad area on both sides in the given + dimension. method : str Controls method of reflection; options are 'even' or 'odd'. - axis : int - Axis along which to pad `arr`. + include_edge : bool + If true, edge value is included in reflection, otherwise the edge + value forms the symmetric axis to the reflection. Returns ------- - padarr : ndarray - Output array, with `pad_amt[0]` values prepended and `pad_amt[1]` - values appended along `axis`. Both regions are padded with reflected - values from the original array. - - Notes - ----- - This algorithm does not pad with repetition, i.e. the edges are not - repeated in the reflection. For that behavior, use `mode='symmetric'`. - - The modes 'reflect', 'symmetric', and 'wrap' must be padded with a - single function, lest the indexing tricks in non-integer multiples of the - original shape would violate repetition in the final iteration. - - """ - # Implicit booleanness to test for zero (or None) in any scalar type - if pad_amt[0] == 0 and pad_amt[1] == 0: - return arr - - ########################################################################## - # Prepended region - - # Slice off a reverse indexed chunk from near edge to pad `arr` before - ref_slice = _slice_at_axis(arr.shape, slice(pad_amt[0], 0, -1), axis=axis) - - ref_chunk1 = arr[ref_slice] - - # Memory/computationally more expensive, only do this if `method='odd'` - if 'odd' in method and pad_amt[0] > 0: - edge_slice1 = _slice_first(arr.shape, 1, axis=axis) - edge_chunk = arr[edge_slice1] - ref_chunk1 = 2 * edge_chunk - ref_chunk1 - del edge_chunk - - ########################################################################## - # Appended region - - # Slice off a reverse indexed chunk from far edge to pad `arr` after - start = arr.shape[axis] - pad_amt[1] - 1 - end = arr.shape[axis] - 1 - ref_slice = _slice_at_axis(arr.shape, slice(start, end), axis=axis) - rev_idx = _slice_at_axis(arr.shape, slice(None, None, -1), axis=axis) - ref_chunk2 = arr[ref_slice][rev_idx] - - if 'odd' in method: - edge_slice2 = _slice_last(arr.shape, 1, axis=axis) - edge_chunk = arr[edge_slice2] - ref_chunk2 = 2 * edge_chunk - ref_chunk2 - del edge_chunk - - # Concatenate `arr` with both chunks, extending along `axis` - return np.concatenate((ref_chunk1, arr, ref_chunk2), axis=axis) - - -def _pad_sym(arr, pad_amt, method, axis=-1): - """ - Pad `axis` of `arr` by symmetry. - - Parameters - ---------- - arr : ndarray - Input array of arbitrary shape. pad_amt : tuple of ints, length 2 - Padding to (prepend, append) along `axis`. - method : str - Controls method of symmetry; options are 'even' or 'odd'. - axis : int - Axis along which to pad `arr`. - - Returns - ------- - padarr : ndarray - Output array, with `pad_amt[0]` values prepended and `pad_amt[1]` - values appended along `axis`. Both regions are padded with symmetric - values from the original array. - - Notes - ----- - This algorithm DOES pad with repetition, i.e. the edges are repeated. - For padding without repeated edges, use `mode='reflect'`. - - The modes 'reflect', 'symmetric', and 'wrap' must be padded with a - single function, lest the indexing tricks in non-integer multiples of the - original shape would violate repetition in the final iteration. - + New index positions of padding to do along the `axis`. If these are + both 0, padding is done in this dimension. """ - # Implicit booleanness to test for zero (or None) in any scalar type - if pad_amt[0] == 0 and pad_amt[1] == 0: - return arr - - ########################################################################## - # Prepended region - - # Slice off a reverse indexed chunk from near edge to pad `arr` before - sym_slice = _slice_first(arr.shape, pad_amt[0], axis=axis) - rev_idx = _slice_at_axis(arr.shape, slice(None, None, -1), axis=axis) - sym_chunk1 = arr[sym_slice][rev_idx] - - # Memory/computationally more expensive, only do this if `method='odd'` - if 'odd' in method and pad_amt[0] > 0: - edge_slice1 = _slice_first(arr.shape, 1, axis=axis) - edge_chunk = arr[edge_slice1] - sym_chunk1 = 2 * edge_chunk - sym_chunk1 - del edge_chunk + left_pad, right_pad = width_pair + old_length = padded.shape[axis] - right_pad - left_pad - ########################################################################## - # Appended region - - # Slice off a reverse indexed chunk from far edge to pad `arr` after - sym_slice = _slice_last(arr.shape, pad_amt[1], axis=axis) - sym_chunk2 = arr[sym_slice][rev_idx] - - if 'odd' in method: - edge_slice2 = _slice_last(arr.shape, 1, axis=axis) - edge_chunk = arr[edge_slice2] - sym_chunk2 = 2 * edge_chunk - sym_chunk2 - del edge_chunk - - # Concatenate `arr` with both chunks, extending along `axis` - return np.concatenate((sym_chunk1, arr, sym_chunk2), axis=axis) - - -def _pad_wrap(arr, pad_amt, axis=-1): - """ - Pad `axis` of `arr` via wrapping. + if include_edge: + # Edge is included, we need to offset the pad amount by 1 + edge_offset = 1 + else: + edge_offset = 0 # Edge is not included, no need to offset pad amount + old_length -= 1 # but must be omitted from the chunk + + if left_pad > 0: + # Pad with reflected values on left side: + # First limit chunk size which can't be larger than pad area + chunk_length = min(old_length, left_pad) + # Slice right to left, stop on or next to edge, start relative to stop + stop = left_pad - edge_offset + start = stop + chunk_length + left_slice = _slice_at_axis(slice(start, stop, -1), axis) + left_chunk = padded[left_slice] + + if method == "odd": + # Negate chunk and align with edge + edge_slice = _slice_at_axis(slice(left_pad, left_pad + 1), axis) + left_chunk = 2 * padded[edge_slice] - left_chunk + + # Insert chunk into padded area + start = left_pad - chunk_length + stop = left_pad + pad_area = _slice_at_axis(slice(start, stop), axis) + padded[pad_area] = left_chunk + # Adjust pointer to left edge for next iteration + left_pad -= chunk_length + + if right_pad > 0: + # Pad with reflected values on right side: + # First limit chunk size which can't be larger than pad area + chunk_length = min(old_length, right_pad) + # Slice right to left, start on or next to edge, stop relative to start + start = -right_pad + edge_offset - 2 + stop = start - chunk_length + right_slice = _slice_at_axis(slice(start, stop, -1), axis) + right_chunk = padded[right_slice] + + if method == "odd": + # Negate chunk and align with edge + edge_slice = _slice_at_axis( + slice(-right_pad - 1, -right_pad), axis) + right_chunk = 2 * padded[edge_slice] - right_chunk + + # Insert chunk into padded area + start = padded.shape[axis] - right_pad + stop = start + chunk_length + pad_area = _slice_at_axis(slice(start, stop), axis) + padded[pad_area] = right_chunk + # Adjust pointer to right edge for next iteration + right_pad -= chunk_length + + return left_pad, right_pad + + +def _set_wrap_both(padded, axis, width_pair): + """ + Pad `axis` of `arr` with wrapped values. Parameters ---------- - arr : ndarray + padded : ndarray Input array of arbitrary shape. - pad_amt : tuple of ints, length 2 - Padding to (prepend, append) along `axis`. axis : int Axis along which to pad `arr`. + width_pair : (int, int) + Pair of widths that mark the pad area on both sides in the given + dimension. Returns ------- - padarr : ndarray - Output array, with `pad_amt[0]` values prepended and `pad_amt[1]` - values appended along `axis`. Both regions are padded wrapped values - from the opposite end of `axis`. - - Notes - ----- - This method of padding is also known as 'tile' or 'tiling'. - - The modes 'reflect', 'symmetric', and 'wrap' must be padded with a - single function, lest the indexing tricks in non-integer multiples of the - original shape would violate repetition in the final iteration. - - """ - # Implicit booleanness to test for zero (or None) in any scalar type - if pad_amt[0] == 0 and pad_amt[1] == 0: - return arr - - ########################################################################## - # Prepended region - - # Slice off a reverse indexed chunk from near edge to pad `arr` before - wrap_slice = _slice_last(arr.shape, pad_amt[0], axis=axis) - wrap_chunk1 = arr[wrap_slice] - - ########################################################################## - # Appended region - - # Slice off a reverse indexed chunk from far edge to pad `arr` after - wrap_slice = _slice_first(arr.shape, pad_amt[1], axis=axis) - wrap_chunk2 = arr[wrap_slice] - - # Concatenate `arr` with both chunks, extending along `axis` - return np.concatenate((wrap_chunk1, arr, wrap_chunk2), axis=axis) + pad_amt : tuple of ints, length 2 + New index positions of padding to do along the `axis`. If these are + both 0, padding is done in this dimension. + """ + left_pad, right_pad = width_pair + period = padded.shape[axis] - right_pad - left_pad + + # If the current dimension of `arr` doesn't contain enough valid values + # (not part of the undefined pad area) we need to pad multiple times. + # Each time the pad area shrinks on both sides which is communicated with + # these variables. + new_left_pad = 0 + new_right_pad = 0 + + if left_pad > 0: + # Pad with wrapped values on left side + # First slice chunk from right side of the non-pad area. + # Use min(period, left_pad) to ensure that chunk is not larger than + # pad area + right_slice = _slice_at_axis( + slice(-right_pad - min(period, left_pad), + -right_pad if right_pad != 0 else None), + axis + ) + right_chunk = padded[right_slice] + + if left_pad > period: + # Chunk is smaller than pad area + pad_area = _slice_at_axis(slice(left_pad - period, left_pad), axis) + new_left_pad = left_pad - period + else: + # Chunk matches pad area + pad_area = _slice_at_axis(slice(None, left_pad), axis) + padded[pad_area] = right_chunk + + if right_pad > 0: + # Pad with wrapped values on right side + # First slice chunk from left side of the non-pad area. + # Use min(period, right_pad) to ensure that chunk is not larger than + # pad area + left_slice = _slice_at_axis( + slice(left_pad, left_pad + min(period, right_pad),), axis) + left_chunk = padded[left_slice] + + if right_pad > period: + # Chunk is smaller than pad area + pad_area = _slice_at_axis( + slice(-right_pad, -right_pad + period), axis) + new_right_pad = right_pad - period + else: + # Chunk matches pad area + pad_area = _slice_at_axis(slice(-right_pad, None), axis) + padded[pad_area] = left_chunk + + return new_left_pad, new_right_pad def _as_pairs(x, ndim, as_index=False): @@ -953,23 +567,23 @@ def _as_pairs(x, ndim, as_index=False): return np.broadcast_to(x, (ndim, 2)).tolist() -############################################################################### -# Public functions +def _pad_dispatcher(array, pad_width, mode=None, **kwargs): + return (array,) -def _pad_dispatcher(array, pad_width, mode, **kwargs): - return (array,) +############################################################################### +# Public functions @array_function_dispatch(_pad_dispatcher, module='numpy') -def pad(array, pad_width, mode, **kwargs): +def pad(array, pad_width, mode='constant', **kwargs): """ - Pads an array. + Pad an array. Parameters ---------- array : array_like of rank N - Input array + The array to pad. pad_width : {sequence, array_like, int} Number of values padded to the edges of each axis. ((before_1, after_1), ... (before_N, after_N)) unique pad widths @@ -977,10 +591,10 @@ def pad(array, pad_width, mode, **kwargs): ((before, after),) yields same before and after pad for each axis. (pad,) or int is a shortcut for before = after = pad width for all axes. - mode : str or function + mode : str or function, optional One of the following string values or a user supplied function. - 'constant' + 'constant' (default) Pads with a constant value. 'edge' Pads with the edge values of array. @@ -1010,6 +624,11 @@ def pad(array, pad_width, mode, **kwargs): Pads with the wrap of the vector along the axis. The first values are used to pad the end and the end values are used to pad the beginning. + 'empty' + Pads with undefined values. + + .. versionadded:: 1.17 + <function> Padding function, see Notes. stat_length : sequence or int, optional @@ -1026,31 +645,31 @@ def pad(array, pad_width, mode, **kwargs): length for all axes. Default is ``None``, to use the entire axis. - constant_values : sequence or int, optional + constant_values : sequence or scalar, optional Used in 'constant'. The values to set the padded values for each axis. - ((before_1, after_1), ... (before_N, after_N)) unique pad constants + ``((before_1, after_1), ... (before_N, after_N))`` unique pad constants for each axis. - ((before, after),) yields same before and after constants for each + ``((before, after),)`` yields same before and after constants for each axis. - (constant,) or int is a shortcut for before = after = constant for + ``(constant,)`` or ``constant`` is a shortcut for ``before = after = constant`` for all axes. Default is 0. - end_values : sequence or int, optional + end_values : sequence or scalar, optional Used in 'linear_ramp'. The values used for the ending value of the linear_ramp and that will form the edge of the padded array. - ((before_1, after_1), ... (before_N, after_N)) unique end values + ``((before_1, after_1), ... (before_N, after_N))`` unique end values for each axis. - ((before, after),) yields same before and after end values for each + ``((before, after),)`` yields same before and after end values for each axis. - (constant,) or int is a shortcut for before = after = end value for + ``(constant,)`` or ``constant`` is a shortcut for ``before = after = constant`` for all axes. Default is 0. @@ -1075,9 +694,8 @@ def pad(array, pad_width, mode, **kwargs): think about with a rank 2 array where the corners of the padded array are calculated by using padded values from the first axis. - The padding function, if used, should return a rank 1 array equal in - length to the vector argument with padded values replaced. It has the - following signature:: + The padding function, if used, should modify a rank 1 array in-place. It + has the following signature:: padding_func(vector, iaxis_pad_width, iaxis, kwargs) @@ -1085,7 +703,7 @@ def pad(array, pad_width, mode, **kwargs): vector : ndarray A rank 1 array already padded with zeros. Padded values are - vector[:pad_tuple[0]] and vector[-pad_tuple[1]:]. + vector[:iaxis_pad_width[0]] and vector[-iaxis_pad_width[1]:]. iaxis_pad_width : tuple A 2-tuple of ints, iaxis_pad_width[0] represents the number of values padded at the beginning of vector where @@ -1099,7 +717,7 @@ def pad(array, pad_width, mode, **kwargs): Examples -------- >>> a = [1, 2, 3, 4, 5] - >>> np.pad(a, (2,3), 'constant', constant_values=(4, 6)) + >>> np.pad(a, (2, 3), 'constant', constant_values=(4, 6)) array([4, 4, 1, ..., 6, 6, 6]) >>> np.pad(a, (2, 3), 'edge') @@ -1147,7 +765,6 @@ def pad(array, pad_width, mode, **kwargs): ... pad_value = kwargs.get('padder', 10) ... vector[:pad_width[0]] = pad_value ... vector[-pad_width[1]:] = pad_value - ... return vector >>> a = np.arange(6) >>> a = a.reshape((2, 3)) >>> np.pad(a, 2, pad_with) @@ -1165,15 +782,30 @@ def pad(array, pad_width, mode, **kwargs): [100, 100, 100, 100, 100, 100, 100], [100, 100, 100, 100, 100, 100, 100]]) """ - if not np.asarray(pad_width).dtype.kind == 'i': + array = np.asarray(array) + pad_width = np.asarray(pad_width) + + if not pad_width.dtype.kind == 'i': raise TypeError('`pad_width` must be of integral type.') - narray = np.array(array) - pad_width = _as_pairs(pad_width, narray.ndim, as_index=True) + # Broadcast to shape (array.ndim, 2) + pad_width = _as_pairs(pad_width, array.ndim, as_index=True) - allowedkwargs = { + if callable(mode): + # Old behavior: Use user-supplied function with np.apply_along_axis + function = mode + # Create a new zero padded array + padded, _ = _pad_simple(array, pad_width, fill_value=0) + # And apply along each axis + for axis in range(padded.ndim): + np.apply_along_axis( + function, axis, padded, pad_width[axis], axis, kwargs) + return padded + + # Make sure that no unsupported keywords were passed for the current mode + allowed_kwargs = { + 'empty': [], 'edge': [], 'wrap': [], 'constant': ['constant_values'], - 'edge': [], 'linear_ramp': ['end_values'], 'maximum': ['stat_length'], 'mean': ['stat_length'], @@ -1181,175 +813,101 @@ def pad(array, pad_width, mode, **kwargs): 'minimum': ['stat_length'], 'reflect': ['reflect_type'], 'symmetric': ['reflect_type'], - 'wrap': [], - } - - kwdefaults = { - 'stat_length': None, - 'constant_values': 0, - 'end_values': 0, - 'reflect_type': 'even', - } - - if isinstance(mode, np.compat.basestring): - # Make sure have allowed kwargs appropriate for mode - for key in kwargs: - if key not in allowedkwargs[mode]: - raise ValueError('%s keyword not in allowed keywords %s' % - (key, allowedkwargs[mode])) - - # Set kwarg defaults - for kw in allowedkwargs[mode]: - kwargs.setdefault(kw, kwdefaults[kw]) - - # Need to only normalize particular keywords. - for i in kwargs: - if i == 'stat_length': - kwargs[i] = _as_pairs(kwargs[i], narray.ndim, as_index=True) - if i in ['end_values', 'constant_values']: - kwargs[i] = _as_pairs(kwargs[i], narray.ndim) - else: - # Drop back to old, slower np.apply_along_axis mode for user-supplied - # vector function - function = mode - - # Create a new padded array - rank = list(range(narray.ndim)) - total_dim_increase = [np.sum(pad_width[i]) for i in rank] - offset_slices = tuple( - slice(pad_width[i][0], pad_width[i][0] + narray.shape[i]) - for i in rank) - new_shape = np.array(narray.shape) + total_dim_increase - newmat = np.zeros(new_shape, narray.dtype) - - # Insert the original array into the padded array - newmat[offset_slices] = narray - - # This is the core of pad ... - for iaxis in rank: - np.apply_along_axis(function, - iaxis, - newmat, - pad_width[iaxis], - iaxis, - kwargs) - return newmat - - # If we get here, use new padding method - newmat = narray.copy() - - # API preserved, but completely new algorithm which pads by building the - # entire block to pad before/after `arr` with in one step, for each axis. - if mode == 'constant': - for axis, ((pad_before, pad_after), (before_val, after_val)) \ - in enumerate(zip(pad_width, kwargs['constant_values'])): - newmat = _prepend_const(newmat, pad_before, before_val, axis) - newmat = _append_const(newmat, pad_after, after_val, axis) - - elif mode == 'edge': - for axis, (pad_before, pad_after) in enumerate(pad_width): - newmat = _prepend_edge(newmat, pad_before, axis) - newmat = _append_edge(newmat, pad_after, axis) - - elif mode == 'linear_ramp': - for axis, ((pad_before, pad_after), (before_val, after_val)) \ - in enumerate(zip(pad_width, kwargs['end_values'])): - newmat = _prepend_ramp(newmat, pad_before, before_val, axis) - newmat = _append_ramp(newmat, pad_after, after_val, axis) - - elif mode == 'maximum': - for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \ - in enumerate(zip(pad_width, kwargs['stat_length'])): - newmat = _prepend_max(newmat, pad_before, chunk_before, axis) - newmat = _append_max(newmat, pad_after, chunk_after, axis) - - elif mode == 'mean': - for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \ - in enumerate(zip(pad_width, kwargs['stat_length'])): - newmat = _prepend_mean(newmat, pad_before, chunk_before, axis) - newmat = _append_mean(newmat, pad_after, chunk_after, axis) - - elif mode == 'median': - for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \ - in enumerate(zip(pad_width, kwargs['stat_length'])): - newmat = _prepend_med(newmat, pad_before, chunk_before, axis) - newmat = _append_med(newmat, pad_after, chunk_after, axis) - - elif mode == 'minimum': - for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \ - in enumerate(zip(pad_width, kwargs['stat_length'])): - newmat = _prepend_min(newmat, pad_before, chunk_before, axis) - newmat = _append_min(newmat, pad_after, chunk_after, axis) - - elif mode == 'reflect': - for axis, (pad_before, pad_after) in enumerate(pad_width): - if narray.shape[axis] == 0: - # Axes with non-zero padding cannot be empty. - if pad_before > 0 or pad_after > 0: - raise ValueError("There aren't any elements to reflect" - " in axis {} of `array`".format(axis)) - # Skip zero padding on empty axes. - continue - - # Recursive padding along any axis where `pad_amt` is too large - # for indexing tricks. We can only safely pad the original axis - # length, to keep the period of the reflections consistent. - if ((pad_before > 0) or - (pad_after > 0)) and newmat.shape[axis] == 1: + } + try: + unsupported_kwargs = set(kwargs) - set(allowed_kwargs[mode]) + except KeyError: + raise ValueError("mode '{}' is not supported".format(mode)) + if unsupported_kwargs: + raise ValueError("unsupported keyword arguments for mode '{}': {}" + .format(mode, unsupported_kwargs)) + + stat_functions = {"maximum": np.max, "minimum": np.min, + "mean": np.mean, "median": np.median} + + # Create array with final shape and original values + # (padded area is undefined) + padded, original_area_slice = _pad_simple(array, pad_width) + # And prepare iteration over all dimensions + # (zipping may be more readable than using enumerate) + axes = range(padded.ndim) + + if mode == "constant": + values = kwargs.get("constant_values", 0) + values = _as_pairs(values, padded.ndim) + for axis, width_pair, value_pair in zip(axes, pad_width, values): + roi = _view_roi(padded, original_area_slice, axis) + _set_pad_area(roi, axis, width_pair, value_pair) + + elif mode == "empty": + pass # Do nothing as _pad_simple already returned the correct result + + elif array.size == 0: + # Only modes "constant" and "empty" can extend empty axes, all other + # modes depend on `array` not being empty + # -> ensure every empty axis is only "padded with 0" + for axis, width_pair in zip(axes, pad_width): + if array.shape[axis] == 0 and any(width_pair): + raise ValueError( + "can't extend empty axis {} using modes other than " + "'constant' or 'empty'".format(axis) + ) + # passed, don't need to do anything more as _pad_simple already + # returned the correct result + + elif mode == "edge": + for axis, width_pair in zip(axes, pad_width): + roi = _view_roi(padded, original_area_slice, axis) + edge_pair = _get_edges(roi, axis, width_pair) + _set_pad_area(roi, axis, width_pair, edge_pair) + + elif mode == "linear_ramp": + end_values = kwargs.get("end_values", 0) + end_values = _as_pairs(end_values, padded.ndim) + for axis, width_pair, value_pair in zip(axes, pad_width, end_values): + roi = _view_roi(padded, original_area_slice, axis) + ramp_pair = _get_linear_ramps(roi, axis, width_pair, value_pair) + _set_pad_area(roi, axis, width_pair, ramp_pair) + + elif mode in stat_functions: + func = stat_functions[mode] + length = kwargs.get("stat_length", None) + length = _as_pairs(length, padded.ndim, as_index=True) + for axis, width_pair, length_pair in zip(axes, pad_width, length): + roi = _view_roi(padded, original_area_slice, axis) + stat_pair = _get_stats(roi, axis, width_pair, length_pair, func) + _set_pad_area(roi, axis, width_pair, stat_pair) + + elif mode in {"reflect", "symmetric"}: + method = kwargs.get("reflect_type", "even") + include_edge = True if mode == "symmetric" else False + for axis, (left_index, right_index) in zip(axes, pad_width): + if array.shape[axis] == 1 and (left_index > 0 or right_index > 0): # Extending singleton dimension for 'reflect' is legacy # behavior; it really should raise an error. - newmat = _prepend_edge(newmat, pad_before, axis) - newmat = _append_edge(newmat, pad_after, axis) + edge_pair = _get_edges(padded, axis, (left_index, right_index)) + _set_pad_area( + padded, axis, (left_index, right_index), edge_pair) continue - method = kwargs['reflect_type'] - safe_pad = newmat.shape[axis] - 1 - while ((pad_before > safe_pad) or (pad_after > safe_pad)): - pad_iter_b = min(safe_pad, - safe_pad * (pad_before // safe_pad)) - pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad)) - newmat = _pad_ref(newmat, (pad_iter_b, - pad_iter_a), method, axis) - pad_before -= pad_iter_b - pad_after -= pad_iter_a - safe_pad += pad_iter_b + pad_iter_a - newmat = _pad_ref(newmat, (pad_before, pad_after), method, axis) - - elif mode == 'symmetric': - for axis, (pad_before, pad_after) in enumerate(pad_width): - # Recursive padding along any axis where `pad_amt` is too large - # for indexing tricks. We can only safely pad the original axis - # length, to keep the period of the reflections consistent. - method = kwargs['reflect_type'] - safe_pad = newmat.shape[axis] - while ((pad_before > safe_pad) or - (pad_after > safe_pad)): - pad_iter_b = min(safe_pad, - safe_pad * (pad_before // safe_pad)) - pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad)) - newmat = _pad_sym(newmat, (pad_iter_b, - pad_iter_a), method, axis) - pad_before -= pad_iter_b - pad_after -= pad_iter_a - safe_pad += pad_iter_b + pad_iter_a - newmat = _pad_sym(newmat, (pad_before, pad_after), method, axis) - - elif mode == 'wrap': - for axis, (pad_before, pad_after) in enumerate(pad_width): - # Recursive padding along any axis where `pad_amt` is too large - # for indexing tricks. We can only safely pad the original axis - # length, to keep the period of the reflections consistent. - safe_pad = newmat.shape[axis] - while ((pad_before > safe_pad) or - (pad_after > safe_pad)): - pad_iter_b = min(safe_pad, - safe_pad * (pad_before // safe_pad)) - pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad)) - newmat = _pad_wrap(newmat, (pad_iter_b, pad_iter_a), axis) - - pad_before -= pad_iter_b - pad_after -= pad_iter_a - safe_pad += pad_iter_b + pad_iter_a - newmat = _pad_wrap(newmat, (pad_before, pad_after), axis) - - return newmat + roi = _view_roi(padded, original_area_slice, axis) + while left_index > 0 or right_index > 0: + # Iteratively pad until dimension is filled with reflected + # values. This is necessary if the pad area is larger than + # the length of the original values in the current dimension. + left_index, right_index = _set_reflect_both( + roi, axis, (left_index, right_index), + method, include_edge + ) + + elif mode == "wrap": + for axis, (left_index, right_index) in zip(axes, pad_width): + roi = _view_roi(padded, original_area_slice, axis) + while left_index > 0 or right_index > 0: + # Iteratively pad until dimension is filled with wrapped + # values. This is necessary if the pad area is larger than + # the length of the original values in the current dimension. + left_index, right_index = _set_wrap_both( + roi, axis, (left_index, right_index)) + + return padded diff --git a/numpy/lib/format.py b/numpy/lib/format.py index 7648be615..4da1022ca 100644 --- a/numpy/lib/format.py +++ b/numpy/lib/format.py @@ -524,7 +524,7 @@ def _read_array_header(fp, version): elif version == (2, 0): hlength_type = '<I' else: - raise ValueError("Invalid version %r" % version) + raise ValueError("Invalid version {!r}".format(version)) hlength_str = _read_bytes(fp, struct.calcsize(hlength_type), "array header length") header_length = struct.unpack(hlength_type, hlength_str)[0] @@ -540,29 +540,29 @@ def _read_array_header(fp, version): try: d = safe_eval(header) except SyntaxError as e: - msg = "Cannot parse header: %r\nException: %r" - raise ValueError(msg % (header, e)) + msg = "Cannot parse header: {!r}\nException: {!r}" + raise ValueError(msg.format(header, e)) if not isinstance(d, dict): - msg = "Header is not a dictionary: %r" - raise ValueError(msg % d) + msg = "Header is not a dictionary: {!r}" + raise ValueError(msg.format(d)) keys = sorted(d.keys()) if keys != ['descr', 'fortran_order', 'shape']: - msg = "Header does not contain the correct keys: %r" - raise ValueError(msg % (keys,)) + msg = "Header does not contain the correct keys: {!r}" + raise ValueError(msg.format(keys)) # Sanity-check the values. if (not isinstance(d['shape'], tuple) or not numpy.all([isinstance(x, (int, long)) for x in d['shape']])): - msg = "shape is not valid: %r" - raise ValueError(msg % (d['shape'],)) + msg = "shape is not valid: {!r}" + raise ValueError(msg.format(d['shape'])) if not isinstance(d['fortran_order'], bool): - msg = "fortran_order is not a valid bool: %r" - raise ValueError(msg % (d['fortran_order'],)) + msg = "fortran_order is not a valid bool: {!r}" + raise ValueError(msg.format(d['fortran_order'])) try: dtype = descr_to_dtype(d['descr']) except TypeError as e: - msg = "descr is not a valid dtype descriptor: %r" - raise ValueError(msg % (d['descr'],)) + msg = "descr is not a valid dtype descriptor: {!r}" + raise ValueError(msg.format(d['descr'])) return d['shape'], d['fortran_order'], dtype diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index b61a64b8e..fb40ef927 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -31,7 +31,6 @@ from numpy.core.overrides import set_module from numpy.core import overrides from numpy.core.function_base import add_newdoc from numpy.lib.twodim_base import diag -from .utils import deprecate from numpy.core.multiarray import ( _insert, add_docstring, bincount, normalize_axis_index, _monotonicity, interp as compiled_interp, interp_complex as compiled_interp_complex diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py index 1e43fdb34..d702859fa 100644 --- a/numpy/lib/npyio.py +++ b/numpy/lib/npyio.py @@ -23,8 +23,7 @@ from ._iotools import ( ) from numpy.compat import ( - asbytes, asstr, asunicode, asbytes_nested, bytes, basestring, unicode, - os_fspath, os_PathLike, pickle + asbytes, asstr, asunicode, bytes, basestring, os_fspath, os_PathLike, pickle ) if sys.version_info[0] >= 3: @@ -1123,7 +1122,6 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None, if type(x) is bytes: return conv(x) return conv(x.encode("latin1")) - import functools converters[i] = functools.partial(tobytes_first, conv=conv) else: converters[i] = conv @@ -1974,7 +1972,6 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, if type(x) is bytes: return conv(x) return conv(x.encode("latin1")) - import functools user_conv = functools.partial(tobytes_first, conv=conv) else: user_conv = conv diff --git a/numpy/lib/tests/test_arraypad.py b/numpy/lib/tests/test_arraypad.py index a9030e737..b7630cdcd 100644 --- a/numpy/lib/tests/test_arraypad.py +++ b/numpy/lib/tests/test_arraypad.py @@ -2,6 +2,7 @@ """ from __future__ import division, absolute_import, print_function +from itertools import chain import pytest @@ -21,6 +22,7 @@ _all_modes = { 'reflect': {'reflect_type': 'even'}, 'symmetric': {'reflect_type': 'even'}, 'wrap': {}, + 'empty': {} } @@ -108,46 +110,25 @@ class TestAsPairs(object): class TestConditionalShortcuts(object): - def test_zero_padding_shortcuts(self): + @pytest.mark.parametrize("mode", _all_modes.keys()) + def test_zero_padding_shortcuts(self, mode): test = np.arange(120).reshape(4, 5, 6) - pad_amt = [(0, 0) for axis in test.shape] - modes = ['constant', - 'edge', - 'linear_ramp', - 'maximum', - 'mean', - 'median', - 'minimum', - 'reflect', - 'symmetric', - 'wrap', - ] - for mode in modes: - assert_array_equal(test, np.pad(test, pad_amt, mode=mode)) - - def test_shallow_statistic_range(self): + pad_amt = [(0, 0) for _ in test.shape] + assert_array_equal(test, np.pad(test, pad_amt, mode=mode)) + + @pytest.mark.parametrize("mode", ['maximum', 'mean', 'median', 'minimum',]) + def test_shallow_statistic_range(self, mode): test = np.arange(120).reshape(4, 5, 6) - pad_amt = [(1, 1) for axis in test.shape] - modes = ['maximum', - 'mean', - 'median', - 'minimum', - ] - for mode in modes: - assert_array_equal(np.pad(test, pad_amt, mode='edge'), - np.pad(test, pad_amt, mode=mode, stat_length=1)) - - def test_clip_statistic_range(self): + pad_amt = [(1, 1) for _ in test.shape] + assert_array_equal(np.pad(test, pad_amt, mode='edge'), + np.pad(test, pad_amt, mode=mode, stat_length=1)) + + @pytest.mark.parametrize("mode", ['maximum', 'mean', 'median', 'minimum',]) + def test_clip_statistic_range(self, mode): test = np.arange(30).reshape(5, 6) - pad_amt = [(3, 3) for axis in test.shape] - modes = ['maximum', - 'mean', - 'median', - 'minimum', - ] - for mode in modes: - assert_array_equal(np.pad(test, pad_amt, mode=mode), - np.pad(test, pad_amt, mode=mode, stat_length=30)) + pad_amt = [(3, 3) for _ in test.shape] + assert_array_equal(np.pad(test, pad_amt, mode=mode), + np.pad(test, pad_amt, mode=mode, stat_length=30)) class TestStatistic(object): @@ -444,7 +425,7 @@ class TestStatistic(object): assert_array_equal(a, b) @pytest.mark.parametrize("mode", [ - pytest.param("mean", marks=pytest.mark.xfail(reason="gh-11216")), + "mean", "median", "minimum", "maximum" @@ -662,6 +643,11 @@ class TestConstant(object): assert_array_equal(arr, expected) + def test_pad_empty_dimension(self): + arr = np.zeros((3, 0, 2)) + result = np.pad(arr, [(0,), (2,), (1,)], mode="constant") + assert result.shape == (3, 4, 4) + class TestLinearRamp(object): def test_check_simple(self): @@ -721,6 +707,14 @@ class TestLinearRamp(object): ]) assert_equal(actual, expected) + def test_end_values(self): + """Ensure that end values are exact.""" + a = np.pad(np.ones(10).reshape(2, 5), (223, 123), mode="linear_ramp") + assert_equal(a[:, 0], 0.) + assert_equal(a[:, -1], 0.) + assert_equal(a[0, :], 0.) + assert_equal(a[-1, :], 0.) + class TestReflect(object): def test_check_simple(self): @@ -831,19 +825,29 @@ class TestReflect(object): b = np.array([1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3]) assert_array_equal(a, b) - def test_check_padding_an_empty_array(self): - a = np.pad(np.zeros((0, 3)), ((0,), (1,)), mode='reflect') - b = np.zeros((0, 5)) - assert_array_equal(a, b) - def test_padding_empty_dimension(self): - match = "There aren't any elements to reflect in axis 0" +class TestEmptyArray(object): + """Check how padding behaves on arrays with an empty dimension.""" + + @pytest.mark.parametrize( + # Keep parametrization ordered, otherwise pytest-xdist might believe + # that different tests were collected during parallelization + "mode", sorted(_all_modes.keys() - {"constant", "empty"}) + ) + def test_pad_empty_dimension(self, mode): + match = ("can't extend empty axis 0 using modes other than 'constant' " + "or 'empty'") with pytest.raises(ValueError, match=match): - np.pad([], 4, mode='reflect') + np.pad([], 4, mode=mode) with pytest.raises(ValueError, match=match): - np.pad(np.ndarray(0), 4, mode='reflect') + np.pad(np.ndarray(0), 4, mode=mode) with pytest.raises(ValueError, match=match): - np.pad(np.zeros((0, 3)), ((1,), (0,)), mode='reflect') + np.pad(np.zeros((0, 3)), ((1,), (0,)), mode=mode) + + @pytest.mark.parametrize("mode", _all_modes.keys()) + def test_pad_non_empty_dimension(self, mode): + result = np.pad(np.ones((2, 0, 2)), ((3,), (0,), (1,)), mode=mode) + assert result.shape == (8, 0, 4) class TestSymmetric(object): @@ -1080,6 +1084,19 @@ class TestWrap(object): b = np.pad(a, (0, 5), mode="wrap") assert_array_equal(a, b[:-5, :-5]) + def test_repeated_wrapping(self): + """ + Check wrapping on each side individually if the wrapped area is longer + than the original array. + """ + a = np.arange(5) + b = np.pad(a, (12, 0), mode="wrap") + assert_array_equal(np.r_[a, a, a, a][3:], b) + + a = np.arange(5) + b = np.pad(a, (0, 12), mode="wrap") + assert_array_equal(np.r_[a, a, a, a][:-3], b) + class TestEdge(object): def test_check_simple(self): @@ -1120,11 +1137,23 @@ class TestEdge(object): assert_array_equal(padded, expected) +class TestEmpty(object): + def test_simple(self): + arr = np.arange(24).reshape(4, 6) + result = np.pad(arr, [(2, 3), (3, 1)], mode="empty") + assert result.shape == (9, 10) + assert_equal(arr, result[2:-3, 3:-1]) + + def test_pad_empty_dimension(self): + arr = np.zeros((3, 0, 2)) + result = np.pad(arr, [(0,), (2,), (1,)], mode="empty") + assert result.shape == (3, 4, 4) + + def test_legacy_vector_functionality(): def _padwithtens(vector, pad_width, iaxis, kwargs): vector[:pad_width[0]] = 10 vector[-pad_width[1]:] = 10 - return vector a = np.arange(6).reshape(2, 3) a = np.pad(a, 2, _padwithtens) @@ -1244,15 +1273,21 @@ def test_kwargs(mode): np.pad([1, 2, 3], 1, mode, **allowed) # Test if prohibited keyword arguments of other modes raise an error for key, value in not_allowed.items(): - match = 'keyword not in allowed keywords' + match = "unsupported keyword arguments for mode '{}'".format(mode) with pytest.raises(ValueError, match=match): np.pad([1, 2, 3], 1, mode, **{key: value}) -def test_missing_mode(): - match = "missing 1 required positional argument: 'mode'" - with pytest.raises(TypeError, match=match): - np.pad(np.ones((5, 6)), 4) +def test_constant_zero_default(): + arr = np.array([1, 1]) + assert_array_equal(np.pad(arr, 2), [0, 0, 1, 1, 0, 0]) + + +@pytest.mark.parametrize("mode", [1, "const", object(), None, True, False]) +def test_unsupported_mode(mode): + match= "mode '{}' is not supported".format(mode) + with pytest.raises(ValueError, match=match): + np.pad([1, 2, 3], 4, mode=mode) @pytest.mark.parametrize("mode", _all_modes.keys()) @@ -1261,3 +1296,26 @@ def test_non_contiguous_array(mode): result = np.pad(arr, (2, 3), mode) assert result.shape == (7, 8) assert_equal(result[2:-3, 2:-3], arr) + + +@pytest.mark.parametrize("mode", _all_modes.keys()) +def test_memory_layout_persistence(mode): + """Test if C and F order is preserved for all pad modes.""" + x = np.ones((5, 10), order='C') + assert np.pad(x, 5, mode).flags["C_CONTIGUOUS"] + x = np.ones((5, 10), order='F') + assert np.pad(x, 5, mode).flags["F_CONTIGUOUS"] + + +@pytest.mark.parametrize("dtype", chain( + # Skip "other" dtypes as they are not supported by all modes + np.sctypes["int"], + np.sctypes["uint"], + np.sctypes["float"], + np.sctypes["complex"] +)) +@pytest.mark.parametrize("mode", _all_modes.keys()) +def test_dtype_persistence(dtype, mode): + arr = np.zeros((3, 2, 1), dtype=dtype) + result = np.pad(arr, 1, mode=mode) + assert result.dtype == dtype diff --git a/numpy/lib/tests/test_type_check.py b/numpy/lib/tests/test_type_check.py index 2982ca31a..b3f114b92 100644 --- a/numpy/lib/tests/test_type_check.py +++ b/numpy/lib/tests/test_type_check.py @@ -360,6 +360,14 @@ class TestNanToNum(object): assert_(vals[1] == 0) assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2])) assert_equal(type(vals), np.ndarray) + + # perform the same tests but with nan, posinf and neginf keywords + with np.errstate(divide='ignore', invalid='ignore'): + vals = nan_to_num(np.array((-1., 0, 1))/0., + nan=10, posinf=20, neginf=30) + assert_equal(vals, [30, 10, 20]) + assert_all(np.isfinite(vals[[0, 2]])) + assert_equal(type(vals), np.ndarray) # perform the same test but in-place with np.errstate(divide='ignore', invalid='ignore'): @@ -371,26 +379,48 @@ class TestNanToNum(object): assert_(vals[1] == 0) assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2])) assert_equal(type(vals), np.ndarray) + + # perform the same test but in-place + with np.errstate(divide='ignore', invalid='ignore'): + vals = np.array((-1., 0, 1))/0. + result = nan_to_num(vals, copy=False, nan=10, posinf=20, neginf=30) + + assert_(result is vals) + assert_equal(vals, [30, 10, 20]) + assert_all(np.isfinite(vals[[0, 2]])) + assert_equal(type(vals), np.ndarray) def test_array(self): vals = nan_to_num([1]) assert_array_equal(vals, np.array([1], int)) assert_equal(type(vals), np.ndarray) + vals = nan_to_num([1], nan=10, posinf=20, neginf=30) + assert_array_equal(vals, np.array([1], int)) + assert_equal(type(vals), np.ndarray) def test_integer(self): vals = nan_to_num(1) assert_all(vals == 1) assert_equal(type(vals), np.int_) + vals = nan_to_num(1, nan=10, posinf=20, neginf=30) + assert_all(vals == 1) + assert_equal(type(vals), np.int_) def test_float(self): vals = nan_to_num(1.0) assert_all(vals == 1.0) assert_equal(type(vals), np.float_) + vals = nan_to_num(1.1, nan=10, posinf=20, neginf=30) + assert_all(vals == 1.1) + assert_equal(type(vals), np.float_) def test_complex_good(self): vals = nan_to_num(1+1j) assert_all(vals == 1+1j) assert_equal(type(vals), np.complex_) + vals = nan_to_num(1+1j, nan=10, posinf=20, neginf=30) + assert_all(vals == 1+1j) + assert_equal(type(vals), np.complex_) def test_complex_bad(self): with np.errstate(divide='ignore', invalid='ignore'): @@ -414,6 +444,16 @@ class TestNanToNum(object): # !! inf. Comment out for now, and see if it # !! changes #assert_all(vals.real < -1e10) and assert_all(np.isfinite(vals)) + + def test_do_not_rewrite_previous_keyword(self): + # This is done to test that when, for instance, nan=np.inf then these + # values are not rewritten by posinf keyword to the posinf value. + with np.errstate(divide='ignore', invalid='ignore'): + vals = nan_to_num(np.array((-1., 0, 1))/0., nan=np.inf, posinf=999) + assert_all(np.isfinite(vals[[0, 2]])) + assert_all(vals[0] < -1e10) + assert_equal(vals[[1, 2]], [np.inf, 999]) + assert_equal(type(vals), np.ndarray) class TestRealIfClose(object): diff --git a/numpy/lib/tests/test_utils.py b/numpy/lib/tests/test_utils.py index 2723f3440..9673a05fa 100644 --- a/numpy/lib/tests/test_utils.py +++ b/numpy/lib/tests/test_utils.py @@ -1,5 +1,6 @@ from __future__ import division, absolute_import, print_function +import inspect import sys import pytest @@ -38,6 +39,32 @@ def old_func3(self, x): new_func3 = deprecate(old_func3, old_name="old_func3", new_name="new_func3") +def old_func4(self, x): + """Summary. + + Further info. + """ + return x +new_func4 = deprecate(old_func4) + + +def old_func5(self, x): + """Summary. + + Bizarre indentation. + """ + return x +new_func5 = deprecate(old_func5) + + +def old_func6(self, x): + """ + Also in PEP-257. + """ + return x +new_func6 = deprecate(old_func6) + + def test_deprecate_decorator(): assert_('deprecated' in old_func.__doc__) @@ -51,6 +78,25 @@ def test_deprecate_fn(): assert_('new_func3' in new_func3.__doc__) +@pytest.mark.skipif(sys.flags.optimize == 2, reason="-OO discards docstrings") +def test_deprecate_help_indentation(): + _compare_docs(old_func4, new_func4) + _compare_docs(old_func5, new_func5) + _compare_docs(old_func6, new_func6) + + +def _compare_docs(old_func, new_func): + old_doc = inspect.getdoc(old_func) + new_doc = inspect.getdoc(new_func) + index = new_doc.index('\n\n') + 2 + assert_equal(new_doc[index:], old_doc) + + +@pytest.mark.skipif(sys.flags.optimize == 2, reason="-OO discards docstrings") +def test_deprecate_preserve_whitespace(): + assert_('\n Bizarre' in new_func5.__doc__) + + def test_safe_eval_nameconstant(): # Test if safe_eval supports Python 3.4 _ast.NameConstant utils.safe_eval('None') diff --git a/numpy/lib/type_check.py b/numpy/lib/type_check.py index f55517732..2b254b6c0 100644 --- a/numpy/lib/type_check.py +++ b/numpy/lib/type_check.py @@ -363,18 +363,23 @@ def _getmaxmin(t): return f.max, f.min -def _nan_to_num_dispatcher(x, copy=None): +def _nan_to_num_dispatcher(x, copy=None, nan=None, posinf=None, neginf=None): return (x,) @array_function_dispatch(_nan_to_num_dispatcher) -def nan_to_num(x, copy=True): +def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None): """ - Replace NaN with zero and infinity with large finite numbers. + Replace NaN with zero and infinity with large finite numbers (default + behaviour) or with the numbers defined by the user using the `nan`, + `posinf` and/or `neginf` keywords. - If `x` is inexact, NaN is replaced by zero, and infinity and -infinity - replaced by the respectively largest and most negative finite floating - point values representable by ``x.dtype``. + If `x` is inexact, NaN is replaced by zero or by the user defined value in + `nan` keyword, infinity is replaced by the largest finite floating point + values representable by ``x.dtype`` or by the user defined value in + `posinf` keyword and -infinity is replaced by the most negative finite + floating point values representable by ``x.dtype`` or by the user defined + value in `neginf` keyword. For complex dtypes, the above is applied to each of the real and imaginary components of `x` separately. @@ -390,6 +395,17 @@ def nan_to_num(x, copy=True): in-place (False). The in-place operation only occurs if casting to an array does not require a copy. Default is True. + nan : int, float, optional + Value to be used to fill NaN values. If no value is passed + then NaN values will be replaced with 0.0. + posinf : int, float, optional + Value to be used to fill positive infinity values. If no value is + passed then positive infinity values will be replaced with a very + large number. + neginf : int, float, optional + Value to be used to fill negative infinity values. If no value is + passed then negative infinity values will be replaced with a very + small (or negative) number. .. versionadded:: 1.13 @@ -424,6 +440,9 @@ def nan_to_num(x, copy=True): >>> np.nan_to_num(x) array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary -1.28000000e+002, 1.28000000e+002]) + >>> np.nan_to_num(x, nan=-9999, posinf=33333333, neginf=33333333) + array([ 3.3333333e+07, 3.3333333e+07, -9.9990000e+03, + -1.2800000e+02, 1.2800000e+02]) >>> y = np.array([complex(np.inf, np.nan), np.nan, complex(np.nan, np.inf)]) array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary -1.28000000e+002, 1.28000000e+002]) @@ -431,6 +450,8 @@ def nan_to_num(x, copy=True): array([ 1.79769313e+308 +0.00000000e+000j, # may vary 0.00000000e+000 +0.00000000e+000j, 0.00000000e+000 +1.79769313e+308j]) + >>> np.nan_to_num(y, nan=111111, posinf=222222) + array([222222.+111111.j, 111111. +0.j, 111111.+222222.j]) """ x = _nx.array(x, subok=True, copy=copy) xtype = x.dtype.type @@ -444,10 +465,17 @@ def nan_to_num(x, copy=True): dest = (x.real, x.imag) if iscomplex else (x,) maxf, minf = _getmaxmin(x.real.dtype) + if posinf is not None: + maxf = posinf + if neginf is not None: + minf = neginf for d in dest: - _nx.copyto(d, 0.0, where=isnan(d)) - _nx.copyto(d, maxf, where=isposinf(d)) - _nx.copyto(d, minf, where=isneginf(d)) + idx_nan = isnan(d) + idx_posinf = isposinf(d) + idx_neginf = isneginf(d) + _nx.copyto(d, nan, where=idx_nan) + _nx.copyto(d, maxf, where=idx_posinf) + _nx.copyto(d, minf, where=idx_neginf) return x[()] if isscalar else x #----------------------------------------------------------------------------- diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py index 6b112f37a..718b55c4b 100644 --- a/numpy/lib/utils.py +++ b/numpy/lib/utils.py @@ -105,6 +105,20 @@ class _Deprecate(object): if doc is None: doc = depdoc else: + lines = doc.expandtabs().split('\n') + indent = _get_indent(lines[1:]) + if lines[0].lstrip(): + # Indent the original first line to let inspect.cleandoc() + # dedent the docstring despite the deprecation notice. + doc = indent * ' ' + doc + else: + # Remove the same leading blank lines as cleandoc() would. + skip = len(lines[0]) + 1 + for line in lines[1:]: + if len(line) > indent: + break + skip += len(line) + 1 + doc = doc[skip:] doc = '\n\n'.join([depdoc, doc]) newfunc.__doc__ = doc try: @@ -115,6 +129,21 @@ class _Deprecate(object): newfunc.__dict__.update(d) return newfunc + +def _get_indent(lines): + """ + Determines the leading whitespace that could be removed from all the lines. + """ + indent = sys.maxsize + for line in lines: + content = len(line.lstrip()) + if content: + indent = min(indent, len(line) - content) + if indent == sys.maxsize: + indent = 0 + return indent + + def deprecate(*args, **kwargs): """ Issues a DeprecationWarning, adds warning to `old_name`'s diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py index 304fce69f..29da77655 100644 --- a/numpy/linalg/linalg.py +++ b/numpy/linalg/linalg.py @@ -357,7 +357,7 @@ def solve(a, b): Broadcasting rules apply, see the `numpy.linalg` documentation for details. - The solutions are computed using LAPACK routine _gesv + The solutions are computed using LAPACK routine ``_gesv``. `a` must be square and of full-rank, i.e., all rows (or, equivalently, columns) must be linearly independent; if either is not true, use @@ -820,8 +820,8 @@ def qr(a, mode='reduced'): Notes ----- - This is an interface to the LAPACK routines dgeqrf, zgeqrf, - dorgqr, and zungqr. + This is an interface to the LAPACK routines ``dgeqrf``, ``zgeqrf``, + ``dorgqr``, and ``zungqr``. For more information on the qr factorization, see for example: https://en.wikipedia.org/wiki/QR_factorization @@ -1023,7 +1023,7 @@ def eigvals(a): Broadcasting rules apply, see the `numpy.linalg` documentation for details. - This is implemented using the _geev LAPACK routines which compute + This is implemented using the ``_geev`` LAPACK routines which compute the eigenvalues and eigenvectors of general square arrays. Examples @@ -1041,7 +1041,7 @@ def eigvals(a): >>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :]) (1.0, 1.0, 0.0) - Now multiply a diagonal matrix by Q on one side and by Q.T on the other: + Now multiply a diagonal matrix by ``Q`` on one side and by ``Q.T`` on the other: >>> D = np.diag((-1,1)) >>> LA.eigvals(D) @@ -1124,7 +1124,7 @@ def eigvalsh(a, UPLO='L'): Broadcasting rules apply, see the `numpy.linalg` documentation for details. - The eigenvalues are computed using LAPACK routines _syevd, _heevd + The eigenvalues are computed using LAPACK routines ``_syevd``, ``_heevd``. Examples -------- @@ -1228,7 +1228,7 @@ def eig(a): Broadcasting rules apply, see the `numpy.linalg` documentation for details. - This is implemented using the _geev LAPACK routines which compute + This is implemented using the ``_geev`` LAPACK routines which compute the eigenvalues and eigenvectors of general square arrays. The number `w` is an eigenvalue of `a` if there exists a vector @@ -1279,7 +1279,7 @@ def eig(a): [0. -0.70710678j, 0. +0.70710678j]]) Complex-valued matrix with real e-values (but complex-valued e-vectors); - note that a.conj().T = a, i.e., a is Hermitian. + note that ``a.conj().T == a``, i.e., `a` is Hermitian. >>> a = np.array([[1, 1j], [-1j, 1]]) >>> w, v = LA.eig(a) @@ -1374,8 +1374,8 @@ def eigh(a, UPLO='L'): Broadcasting rules apply, see the `numpy.linalg` documentation for details. - The eigenvalues/eigenvectors are computed using LAPACK routines _syevd, - _heevd + The eigenvalues/eigenvectors are computed using LAPACK routines ``_syevd``, + ``_heevd``. The eigenvalues of real symmetric or complex Hermitian matrices are always real. [1]_ The array `v` of (column) eigenvectors is unitary @@ -2019,7 +2019,7 @@ def slogdet(a): .. versionadded:: 1.6.0 The determinant is computed via LU factorization using the LAPACK - routine z/dgetrf. + routine ``z/dgetrf``. Examples @@ -2093,7 +2093,7 @@ def det(a): details. The determinant is computed via LU factorization using the LAPACK - routine z/dgetrf. + routine ``z/dgetrf``. Examples -------- @@ -2288,7 +2288,7 @@ def lstsq(a, b, rcond="warn"): def _multi_svd_norm(x, row_axis, col_axis, op): """Compute a function of the singular values of the 2-D matrices in `x`. - This is a private utility function used by numpy.linalg.norm(). + This is a private utility function used by `numpy.linalg.norm()`. Parameters ---------- @@ -2296,7 +2296,7 @@ def _multi_svd_norm(x, row_axis, col_axis, op): row_axis, col_axis : int The axes of `x` that hold the 2-D matrices. op : callable - This should be either numpy.amin or numpy.amax or numpy.sum. + This should be either numpy.amin or `numpy.amax` or `numpy.sum`. Returns ------- @@ -2621,10 +2621,8 @@ def multi_dot(arrays): instead of:: >>> _ = np.dot(np.dot(np.dot(A, B), C), D) - ... >>> # or >>> _ = A.dot(B).dot(C).dot(D) - ... Notes ----- diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 2b17fa343..90aff6ec8 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -44,7 +44,6 @@ from numpy.compat import ( getargspec, formatargspec, long, basestring, unicode, bytes ) from numpy import expand_dims -from numpy.core.multiarray import normalize_axis_index from numpy.core.numeric import normalize_axis_tuple from numpy.core._internal import recursive from numpy.compat import pickle @@ -1198,7 +1197,6 @@ exp = _MaskedUnaryOperation(umath.exp) conjugate = _MaskedUnaryOperation(umath.conjugate) sin = _MaskedUnaryOperation(umath.sin) cos = _MaskedUnaryOperation(umath.cos) -tan = _MaskedUnaryOperation(umath.tan) arctan = _MaskedUnaryOperation(umath.arctan) arcsinh = _MaskedUnaryOperation(umath.arcsinh) sinh = _MaskedUnaryOperation(umath.sinh) @@ -3445,39 +3443,41 @@ class MaskedArray(ndarray): _set_mask = __setmask__ - def _get_mask(self): - """Return the current mask. + @property + def mask(self): + """ Current mask. """ - """ # We could try to force a reshape, but that wouldn't work in some # cases. return self._mask - mask = property(fget=_get_mask, fset=__setmask__, doc="Mask") + @mask.setter + def mask(self, value): + self.__setmask__(value) - def _get_recordmask(self): + @property + def recordmask(self): """ - Return the mask of the records. - - A record is masked when all the fields are masked. + Get or set the mask of the array if it has no named fields. For + structured arrays, returns a ndarray of booleans where entries are + ``True`` if **all** the fields are masked, ``False`` otherwise: + >>> x = np.ma.array([(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)], + ... mask=[(0, 0), (1, 0), (1, 1), (0, 1), (0, 0)], + ... dtype=[('a', int), ('b', int)]) + >>> x.recordmask + array([False, False, True, False, False]) """ + _mask = self._mask.view(ndarray) if _mask.dtype.names is None: return _mask return np.all(flatten_structured_array(_mask), axis=-1) - def _set_recordmask(self): - """ - Return the mask of the records. - - A record is masked when all the fields are masked. - - """ + @recordmask.setter + def recordmask(self, mask): raise NotImplementedError("Coming soon: setting the mask per records!") - recordmask = property(fget=_get_recordmask) - def harden_mask(self): """ Force the mask to hard. @@ -3508,8 +3508,10 @@ class MaskedArray(ndarray): self._hardmask = False return self - hardmask = property(fget=lambda self: self._hardmask, - doc="Hardness of the mask") + @property + def hardmask(self): + """ Hardness of the mask """ + return self._hardmask def unshare_mask(self): """ @@ -3529,8 +3531,10 @@ class MaskedArray(ndarray): self._sharedmask = False return self - sharedmask = property(fget=lambda self: self._sharedmask, - doc="Share status of the mask (read-only).") + @property + def sharedmask(self): + """ Share status of the mask (read-only). """ + return self._sharedmask def shrink_mask(self): """ @@ -3563,39 +3567,46 @@ class MaskedArray(ndarray): self._mask = _shrink_mask(self._mask) return self - baseclass = property(fget=lambda self: self._baseclass, - doc="Class of the underlying data (read-only).") + @property + def baseclass(self): + """ Class of the underlying data (read-only). """ + return self._baseclass def _get_data(self): - """Return the current data, as a view of the original - underlying data. + """ + Returns the underlying data, as a view of the masked array. + + If the underlying data is a subclass of :class:`numpy.ndarray`, it is + returned as such. + >>> x = np.ma.array(np.matrix([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]]) + >>> x.data + matrix([[1, 2], + [3, 4]]) + + The type of the data can be accessed through the :attr:`baseclass` + attribute. """ return ndarray.view(self, self._baseclass) _data = property(fget=_get_data) data = property(fget=_get_data) - def _get_flat(self): - "Return a flat iterator." + @property + def flat(self): + """ Return a flat iterator, or set a flattened version of self to value. """ return MaskedIterator(self) - def _set_flat(self, value): - "Set a flattened version of self to value." + @flat.setter + def flat(self, value): y = self.ravel() y[:] = value - flat = property(fget=_get_flat, fset=_set_flat, - doc="Flat version of the array.") - - def get_fill_value(self): + @property + def fill_value(self): """ - Return the filling value of the masked array. - - Returns - ------- - fill_value : scalar - The filling value. + The filling value of the masked array is a scalar. When setting, None + will set to a default based on the data type. Examples -------- @@ -3608,8 +3619,17 @@ class MaskedArray(ndarray): (1e+20+0j) >>> x = np.ma.array([0, 1.], fill_value=-np.inf) - >>> x.get_fill_value() + >>> x.fill_value -inf + >>> x.fill_value = np.pi + >>> x.fill_value + 3.1415926535897931 # may vary + + Reset to default: + + >>> x.fill_value = None + >>> x.fill_value + 1e+20 """ if self._fill_value is None: @@ -3623,36 +3643,8 @@ class MaskedArray(ndarray): return self._fill_value[()] return self._fill_value - def set_fill_value(self, value=None): - """ - Set the filling value of the masked array. - - Parameters - ---------- - value : scalar, optional - The new filling value. Default is None, in which case a default - based on the data type is used. - - See Also - -------- - ma.set_fill_value : Equivalent function. - - Examples - -------- - >>> x = np.ma.array([0, 1.], fill_value=-np.inf) - >>> x.fill_value - -inf - >>> x.set_fill_value(np.pi) - >>> x.fill_value - 3.1415926535897931 # may vary - - Reset to default: - - >>> x.set_fill_value() - >>> x.fill_value - 1e+20 - - """ + @fill_value.setter + def fill_value(self, value=None): target = _check_fill_value(value, self.dtype) _fill_value = self._fill_value if _fill_value is None: @@ -3662,8 +3654,9 @@ class MaskedArray(ndarray): # Don't overwrite the attribute, just fill it (for propagation) _fill_value[()] = target - fill_value = property(fget=get_fill_value, fset=set_fill_value, - doc="Filling value.") + # kept for compatibility + get_fill_value = fill_value.fget + set_fill_value = fill_value.fset def filled(self, fill_value=None): """ @@ -4332,31 +4325,21 @@ class MaskedArray(ndarray): raise MaskError('Cannot convert masked element to a Python long.') return long(self.item()) - - def get_imag(self): + @property + def imag(self): """ - Return the imaginary part of the masked array. - - The returned array is a view on the imaginary part of the `MaskedArray` - whose `get_imag` method is called. - - Parameters - ---------- - None + The imaginary part of the masked array. - Returns - ------- - result : MaskedArray - The imaginary part of the masked array. + This property is a view on the imaginary part of this `MaskedArray`. See Also -------- - get_real, real, imag + real Examples -------- >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False]) - >>> x.get_imag() + >>> x.imag masked_array(data=[1.0, --, 1.6], mask=[False, True, False], fill_value=1e+20) @@ -4366,32 +4349,24 @@ class MaskedArray(ndarray): result.__setmask__(self._mask) return result - imag = property(fget=get_imag, doc="Imaginary part.") + # kept for compatibility + get_imag = imag.fget - def get_real(self): + @property + def real(self): """ - Return the real part of the masked array. - - The returned array is a view on the real part of the `MaskedArray` - whose `get_real` method is called. + The real part of the masked array. - Parameters - ---------- - None - - Returns - ------- - result : MaskedArray - The real part of the masked array. + This property is a view on the real part of this `MaskedArray`. See Also -------- - get_imag, real, imag + imag Examples -------- >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False]) - >>> x.get_real() + >>> x.real masked_array(data=[1.0, --, 3.45], mask=[False, True, False], fill_value=1e+20) @@ -4400,7 +4375,9 @@ class MaskedArray(ndarray): result = self._data.real.view(type(self)) result.__setmask__(self._mask) return result - real = property(fget=get_real, doc="Real part") + + # kept for compatibility + get_real = real.fget def count(self, axis=None, keepdims=np._NoValue): """ @@ -6156,12 +6133,11 @@ class mvoid(MaskedArray): _data.fill_value = fill_value return _data - def _get_data(self): + @property + def _data(self): # Make sure that the _data part is a np.void return super(mvoid, self)._data[()] - _data = property(fget=_get_data) - def __getitem__(self, indx): """ Get the index. diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index 2e3b84e1c..a51d83578 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -390,7 +390,6 @@ def apply_along_axis(func1d, axis, arr, *args, **kwargs): i[axis] = slice(None, None) outshape = np.asarray(arr.shape).take(indlist) i.put(indlist, ind) - j = i.copy() res = func1d(arr[tuple(i.tolist())], *args, **kwargs) # if res is a number, then we have a smaller output array asscalar = np.isscalar(res) diff --git a/numpy/ma/mrecords.py b/numpy/ma/mrecords.py index daf2f8770..9bcb3947d 100644 --- a/numpy/ma/mrecords.py +++ b/numpy/ma/mrecords.py @@ -19,7 +19,6 @@ import sys import warnings import numpy as np -import numpy.core.numerictypes as ntypes from numpy.compat import basestring from numpy import ( bool_, dtype, ndarray, recarray, array as narray @@ -167,24 +166,22 @@ class MaskedRecords(MaskedArray, object): _dict['_baseclass'] = recarray return - def _getdata(self): + @property + def _data(self): """ Returns the data as a recarray. """ return ndarray.view(self, recarray) - _data = property(fget=_getdata) - - def _getfieldmask(self): + @property + def _fieldmask(self): """ Alias to mask. """ return self._mask - _fieldmask = property(fget=_getfieldmask) - def __len__(self): """ Returns the length diff --git a/numpy/ma/tests/test_subclassing.py b/numpy/ma/tests/test_subclassing.py index f8ab52bb9..440b36722 100644 --- a/numpy/ma/tests/test_subclassing.py +++ b/numpy/ma/tests/test_subclassing.py @@ -66,11 +66,11 @@ class MSubArray(SubArray, MaskedArray): _data.info = subarr.info return _data - def _get_series(self): + @property + def _series(self): _view = self.view(MaskedArray) _view._sharedmask = False return _view - _series = property(fget=_get_series) msubarray = MSubArray diff --git a/numpy/ma/timer_comparison.py b/numpy/ma/timer_comparison.py index 68104ed0a..4ad635e38 100644 --- a/numpy/ma/timer_comparison.py +++ b/numpy/ma/timer_comparison.py @@ -430,11 +430,10 @@ if __name__ == '__main__': setup_cur = "import numpy.ma.core as module\n" + setup_base (nrepeat, nloop) = (10, 10) - if 1: - for i in range(1, 8): - func = 'tester.test_%i()' % i - cur = timeit.Timer(func, setup_cur).repeat(nrepeat, nloop*10) - cur = np.sort(cur) - print("#%i" % i + 50*'.') - print(eval("ModuleTester.test_%i.__doc__" % i)) - print("core_current : %.3f - %.3f" % (cur[0], cur[1])) + for i in range(1, 8): + func = 'tester.test_%i()' % i + cur = timeit.Timer(func, setup_cur).repeat(nrepeat, nloop*10) + cur = np.sort(cur) + print("#%i" % i + 50*'.') + print(eval("ModuleTester.test_%i.__doc__" % i)) + print("core_current : %.3f - %.3f" % (cur[0], cur[1])) diff --git a/numpy/matrixlib/defmatrix.py b/numpy/matrixlib/defmatrix.py index 6f8eadf86..3c7e8ffc2 100644 --- a/numpy/matrixlib/defmatrix.py +++ b/numpy/matrixlib/defmatrix.py @@ -791,7 +791,8 @@ class matrix(N.ndarray): """ return N.ndarray.ptp(self, axis, out)._align(axis) - def getI(self): + @property + def I(self): """ Returns the (multiplicative) inverse of invertible `self`. @@ -835,7 +836,8 @@ class matrix(N.ndarray): from numpy.dual import pinv as func return asmatrix(func(self)) - def getA(self): + @property + def A(self): """ Return `self` as an `ndarray` object. @@ -864,7 +866,8 @@ class matrix(N.ndarray): """ return self.__array__() - def getA1(self): + @property + def A1(self): """ Return `self` as a flattened `ndarray`. @@ -931,8 +934,8 @@ class matrix(N.ndarray): """ return N.ndarray.ravel(self, order=order) - - def getT(self): + @property + def T(self): """ Returns the transpose of the matrix. @@ -964,7 +967,8 @@ class matrix(N.ndarray): """ return self.transpose() - def getH(self): + @property + def H(self): """ Returns the (complex) conjugate transpose of `self`. @@ -998,11 +1002,12 @@ class matrix(N.ndarray): else: return self.transpose() - T = property(getT, None) - A = property(getA, None) - A1 = property(getA1, None) - H = property(getH, None) - I = property(getI, None) + # kept for compatibility + getT = T.fget + getA = A.fget + getA1 = A1.fget + getH = H.fget + getI = I.fget def _from_string(str, gdict, ldict): rows = str.split(';') diff --git a/numpy/matrixlib/tests/test_masked_matrix.py b/numpy/matrixlib/tests/test_masked_matrix.py index 1ecc15d4a..d3911d2e1 100644 --- a/numpy/matrixlib/tests/test_masked_matrix.py +++ b/numpy/matrixlib/tests/test_masked_matrix.py @@ -22,11 +22,11 @@ class MMatrix(MaskedArray, np.matrix,): MaskedArray.__array_finalize__(self, obj) return - def _get_series(self): + @property + def _series(self): _view = self.view(MaskedArray) _view._sharedmask = False return _view - _series = property(fget=_get_series) class TestMaskedMatrix(object): diff --git a/numpy/polynomial/_polybase.py b/numpy/polynomial/_polybase.py index c28e77e69..bfa030714 100644 --- a/numpy/polynomial/_polybase.py +++ b/numpy/polynomial/_polybase.py @@ -8,7 +8,7 @@ abc module from the stdlib, hence it is only available for Python >= 2.6. """ from __future__ import division, absolute_import, print_function -from abc import ABCMeta, abstractmethod, abstractproperty +import abc import numbers import numpy as np @@ -16,7 +16,7 @@ from . import polyutils as pu __all__ = ['ABCPolyBase'] -class ABCPolyBase(object): +class ABCPolyBase(abc.ABC): """An abstract base class for immutable series classes. ABCPolyBase provides the standard Python numerical methods @@ -59,7 +59,6 @@ class ABCPolyBase(object): Default window of the class. """ - __metaclass__ = ABCMeta # Not hashable __hash__ = None @@ -70,68 +69,84 @@ class ABCPolyBase(object): # Limit runaway size. T_n^m has degree n*m maxpower = 100 - @abstractproperty + @property + @abc.abstractmethod def domain(self): pass - @abstractproperty + @property + @abc.abstractmethod def window(self): pass - @abstractproperty + @property + @abc.abstractmethod def nickname(self): pass - @abstractproperty + @property + @abc.abstractmethod def basis_name(self): pass - @abstractmethod - def _add(self): + @staticmethod + @abc.abstractmethod + def _add(c1, c2): pass - @abstractmethod - def _sub(self): + @staticmethod + @abc.abstractmethod + def _sub(c1, c2): pass - @abstractmethod - def _mul(self): + @staticmethod + @abc.abstractmethod + def _mul(c1, c2): pass - @abstractmethod - def _div(self): + @staticmethod + @abc.abstractmethod + def _div(c1, c2): pass - @abstractmethod - def _pow(self): + @staticmethod + @abc.abstractmethod + def _pow(c, pow, maxpower=None): pass - @abstractmethod - def _val(self): + @staticmethod + @abc.abstractmethod + def _val(x, c): pass - @abstractmethod - def _int(self): + @staticmethod + @abc.abstractmethod + def _int(c, m, k, lbnd, scl): pass - @abstractmethod - def _der(self): + @staticmethod + @abc.abstractmethod + def _der(c, m, scl): pass - @abstractmethod - def _fit(self): + @staticmethod + @abc.abstractmethod + def _fit(x, y, deg, rcond, full): pass - @abstractmethod - def _line(self): + @staticmethod + @abc.abstractmethod + def _line(off, scl): pass - @abstractmethod - def _roots(self): + @staticmethod + @abc.abstractmethod + def _roots(c): pass - @abstractmethod - def _fromroots(self): + @staticmethod + @abc.abstractmethod + def _fromroots(r): pass def has_samecoef(self, other): diff --git a/numpy/polynomial/chebyshev.py b/numpy/polynomial/chebyshev.py index e0734e1b8..c37b2c6d6 100644 --- a/numpy/polynomial/chebyshev.py +++ b/numpy/polynomial/chebyshev.py @@ -225,15 +225,15 @@ def _zseries_div(z1, z2): """ z1 = z1.copy() z2 = z2.copy() - len1 = len(z1) - len2 = len(z2) - if len2 == 1: + lc1 = len(z1) + lc2 = len(z2) + if lc2 == 1: z1 /= z2 return z1, z1[:1]*0 - elif len1 < len2: + elif lc1 < lc2: return z1[:1]*0, z1 else: - dlen = len1 - len2 + dlen = lc1 - lc2 scl = z2[0] z2 /= scl quo = np.empty(dlen + 1, dtype=z1.dtype) @@ -244,16 +244,16 @@ def _zseries_div(z1, z2): quo[i] = z1[i] quo[dlen - i] = r tmp = r*z2 - z1[i:i+len2] -= tmp - z1[j:j+len2] -= tmp + z1[i:i+lc2] -= tmp + z1[j:j+lc2] -= tmp i += 1 j -= 1 r = z1[i] quo[i] = r tmp = r*z2 - z1[i:i+len2] -= tmp + z1[i:i+lc2] -= tmp quo /= scl - rem = z1[i+1:i-1+len2].copy() + rem = z1[i+1:i-1+lc2].copy() return quo, rem @@ -541,21 +541,7 @@ def chebfromroots(roots): array([1.5+0.j, 0. +0.j, 0.5+0.j]) """ - if len(roots) == 0: - return np.ones(1) - else: - [roots] = pu.as_series([roots], trim=False) - roots.sort() - p = [chebline(-r, 1) for r in roots] - n = len(p) - while n > 1: - m, r = divmod(n, 2) - tmp = [chebmul(p[i], p[i+m]) for i in range(m)] - if r: - tmp[0] = chebmul(tmp[0], p[-1]) - p = tmp - n = m - return p[0] + return pu._fromroots(chebline, chebmul, roots) def chebadd(c1, c2): @@ -597,15 +583,7 @@ def chebadd(c1, c2): array([4., 4., 4.]) """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2): - c1[:c2.size] += c2 - ret = c1 - else: - c2[:c1.size] += c1 - ret = c2 - return pu.trimseq(ret) + return pu._add(c1, c2) def chebsub(c1, c2): @@ -649,16 +627,7 @@ def chebsub(c1, c2): array([ 2., 0., -2.]) """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2): - c1[:c2.size] -= c2 - ret = c1 - else: - c2 = -c2 - c2[:c1.size] += c1 - ret = c2 - return pu.trimseq(ret) + return pu._sub(c1, c2) def chebmulx(c): @@ -807,6 +776,7 @@ def chebdiv(c1, c2): if c2[-1] == 0: raise ZeroDivisionError() + # note: this is more efficient than `pu._div(chebmul, c1, c2)` lc1 = len(c1) lc2 = len(c2) if lc1 < lc2: @@ -856,6 +826,9 @@ def chebpow(c, pow, maxpower=16): array([15.5, 22. , 16. , ..., 12.5, 12. , 8. ]) """ + # note: this is more efficient than `pu._pow(chebmul, c1, c2)`, as it + # avoids converting between z and c series repeatedly + # c is a trimmed copy [c] = pu.as_series([c]) power = int(pow) @@ -940,14 +913,10 @@ def chebder(c, m=1, scl=1, axis=0): c = np.array(c, ndmin=1, copy=1) if c.dtype.char in '?bBhHiIlLqQpP': c = c.astype(np.double) - cnt, iaxis = [int(t) for t in [m, axis]] - - if cnt != m: - raise ValueError("The order of derivation must be integer") + cnt = pu._deprecate_as_int(m, "the order of derivation") + iaxis = pu._deprecate_as_int(axis, "the axis") if cnt < 0: raise ValueError("The order of derivation must be non-negative") - if iaxis != axis: - raise ValueError("The axis must be integer") iaxis = normalize_axis_index(iaxis, c.ndim) if cnt == 0: @@ -1063,10 +1032,8 @@ def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0): c = c.astype(np.double) if not np.iterable(k): k = [k] - cnt, iaxis = [int(t) for t in [m, axis]] - - if cnt != m: - raise ValueError("The order of integration must be integer") + cnt = pu._deprecate_as_int(m, "the order of integration") + iaxis = pu._deprecate_as_int(axis, "the axis") if cnt < 0: raise ValueError("The order of integration must be non-negative") if len(k) > cnt: @@ -1075,8 +1042,6 @@ def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0): raise ValueError("lbnd must be a scalar.") if np.ndim(scl) != 0: raise ValueError("scl must be a scalar.") - if iaxis != axis: - raise ValueError("The axis must be integer") iaxis = normalize_axis_index(iaxis, c.ndim) if cnt == 0: @@ -1238,14 +1203,7 @@ def chebval2d(x, y, c): .. versionadded:: 1.7.0 """ - try: - x, y = np.array((x, y), copy=0) - except Exception: - raise ValueError('x, y are incompatible') - - c = chebval(x, c) - c = chebval(y, c, tensor=False) - return c + return pu._valnd(chebval, c, x, y) def chebgrid2d(x, y, c): @@ -1298,9 +1256,7 @@ def chebgrid2d(x, y, c): .. versionadded:: 1.7.0 """ - c = chebval(x, c) - c = chebval(y, c) - return c + return pu._gridnd(chebval, c, x, y) def chebval3d(x, y, z, c): @@ -1351,15 +1307,7 @@ def chebval3d(x, y, z, c): .. versionadded:: 1.7.0 """ - try: - x, y, z = np.array((x, y, z), copy=0) - except Exception: - raise ValueError('x, y, z are incompatible') - - c = chebval(x, c) - c = chebval(y, c, tensor=False) - c = chebval(z, c, tensor=False) - return c + return pu._valnd(chebval, c, x, y, z) def chebgrid3d(x, y, z, c): @@ -1415,10 +1363,7 @@ def chebgrid3d(x, y, z, c): .. versionadded:: 1.7.0 """ - c = chebval(x, c) - c = chebval(y, c) - c = chebval(z, c) - return c + return pu._gridnd(chebval, c, x, y, z) def chebvander(x, deg): @@ -1456,9 +1401,7 @@ def chebvander(x, deg): the converted `x`. """ - ideg = int(deg) - if ideg != deg: - raise ValueError("deg must be integer") + ideg = pu._deprecate_as_int(deg, "deg") if ideg < 0: raise ValueError("deg must be non-negative") @@ -1526,17 +1469,7 @@ def chebvander2d(x, y, deg): .. versionadded:: 1.7.0 """ - ideg = [int(d) for d in deg] - is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] - if is_valid != [1, 1]: - raise ValueError("degrees must be non-negative integers") - degx, degy = ideg - x, y = np.array((x, y), copy=0) + 0.0 - - vx = chebvander(x, degx) - vy = chebvander(y, degy) - v = vx[..., None]*vy[..., None,:] - return v.reshape(v.shape[:-2] + (-1,)) + return pu._vander2d(chebvander, x, y, deg) def chebvander3d(x, y, z, deg): @@ -1590,18 +1523,7 @@ def chebvander3d(x, y, z, deg): .. versionadded:: 1.7.0 """ - ideg = [int(d) for d in deg] - is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] - if is_valid != [1, 1, 1]: - raise ValueError("degrees must be non-negative integers") - degx, degy, degz = ideg - x, y, z = np.array((x, y, z), copy=0) + 0.0 - - vx = chebvander(x, degx) - vy = chebvander(y, degy) - vz = chebvander(z, degz) - v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:] - return v.reshape(v.shape[:-3] + (-1,)) + return pu._vander3d(chebvander, x, y, z, deg) def chebfit(x, y, deg, rcond=None, full=False, w=None): @@ -1723,81 +1645,7 @@ def chebfit(x, y, deg, rcond=None, full=False, w=None): -------- """ - x = np.asarray(x) + 0.0 - y = np.asarray(y) + 0.0 - deg = np.asarray(deg) - - # check arguments. - if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0: - raise TypeError("deg must be an int or non-empty 1-D array of int") - if deg.min() < 0: - raise ValueError("expected deg >= 0") - if x.ndim != 1: - raise TypeError("expected 1D vector for x") - if x.size == 0: - raise TypeError("expected non-empty vector for x") - if y.ndim < 1 or y.ndim > 2: - raise TypeError("expected 1D or 2D array for y") - if len(x) != len(y): - raise TypeError("expected x and y to have same length") - - if deg.ndim == 0: - lmax = deg - order = lmax + 1 - van = chebvander(x, lmax) - else: - deg = np.sort(deg) - lmax = deg[-1] - order = len(deg) - van = chebvander(x, lmax)[:, deg] - - # set up the least squares matrices in transposed form - lhs = van.T - rhs = y.T - if w is not None: - w = np.asarray(w) + 0.0 - if w.ndim != 1: - raise TypeError("expected 1D vector for w") - if len(x) != len(w): - raise TypeError("expected x and w to have same length") - # apply weights. Don't use inplace operations as they - # can cause problems with NA. - lhs = lhs * w - rhs = rhs * w - - # set rcond - if rcond is None: - rcond = len(x)*np.finfo(x.dtype).eps - - # Determine the norms of the design matrix columns. - if issubclass(lhs.dtype.type, np.complexfloating): - scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1)) - else: - scl = np.sqrt(np.square(lhs).sum(1)) - scl[scl == 0] = 1 - - # Solve the least squares problem. - c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond) - c = (c.T/scl).T - - # Expand c to include non-fitted coefficients which are set to zero - if deg.ndim > 0: - if c.ndim == 2: - cc = np.zeros((lmax + 1, c.shape[1]), dtype=c.dtype) - else: - cc = np.zeros(lmax + 1, dtype=c.dtype) - cc[deg] = c - c = cc - - # warn on rank reduction - if rank != order and not full: - msg = "The fit may be poorly conditioned" - warnings.warn(msg, pu.RankWarning, stacklevel=2) - - if full: - return c, [resids, rank, s, rcond] - else: - return c + return pu._fit(chebvander, x, y, deg, rcond, full, w) def chebcompanion(c): @@ -1895,7 +1743,8 @@ def chebroots(c): if len(c) == 2: return np.array([-c[0]/c[1]]) - m = chebcompanion(c) + # rotated companion matrix reduces error + m = chebcompanion(c)[::-1,::-1] r = la.eigvals(m) r.sort() return r @@ -2003,9 +1852,9 @@ def chebgauss(deg): .. math:: w_i = \\pi / n """ - ideg = int(deg) - if ideg != deg or ideg < 1: - raise ValueError("deg must be a non-negative integer") + ideg = pu._deprecate_as_int(deg, "deg") + if ideg <= 0: + raise ValueError("deg must be a positive integer") x = np.cos(np.pi * np.arange(1, 2*ideg, 2) / (2.0*ideg)) w = np.ones(ideg)*(np.pi/ideg) diff --git a/numpy/polynomial/hermite.py b/numpy/polynomial/hermite.py index 93c9fc564..3870d1054 100644 --- a/numpy/polynomial/hermite.py +++ b/numpy/polynomial/hermite.py @@ -286,21 +286,7 @@ def hermfromroots(roots): array([0.+0.j, 0.+0.j]) """ - if len(roots) == 0: - return np.ones(1) - else: - [roots] = pu.as_series([roots], trim=False) - roots.sort() - p = [hermline(-r, 1) for r in roots] - n = len(p) - while n > 1: - m, r = divmod(n, 2) - tmp = [hermmul(p[i], p[i+m]) for i in range(m)] - if r: - tmp[0] = hermmul(tmp[0], p[-1]) - p = tmp - n = m - return p[0] + return pu._fromroots(hermline, hermmul, roots) def hermadd(c1, c2): @@ -340,15 +326,7 @@ def hermadd(c1, c2): array([2., 4., 6., 4.]) """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2): - c1[:c2.size] += c2 - ret = c1 - else: - c2[:c1.size] += c1 - ret = c2 - return pu.trimseq(ret) + return pu._add(c1, c2) def hermsub(c1, c2): @@ -388,16 +366,7 @@ def hermsub(c1, c2): array([0., 0., 0., 4.]) """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2): - c1[:c2.size] -= c2 - ret = c1 - else: - c2 = -c2 - c2[:c1.size] += c1 - ret = c2 - return pu.trimseq(ret) + return pu._sub(c1, c2) def hermmulx(c): @@ -564,26 +533,7 @@ def hermdiv(c1, c2): (array([1., 2., 3.]), array([1., 1.])) """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if c2[-1] == 0: - raise ZeroDivisionError() - - lc1 = len(c1) - lc2 = len(c2) - if lc1 < lc2: - return c1[:1]*0, c1 - elif lc2 == 1: - return c1/c2[-1], c1[:1]*0 - else: - quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype) - rem = c1 - for i in range(lc1 - lc2, - 1, -1): - p = hermmul([0]*i + [1], c2) - q = rem[-1]/p[-1] - rem = rem[:-1] - q*p[:-1] - quo[i] = q - return quo, pu.trimseq(rem) + return pu._div(hermmul, c1, c2) def hermpow(c, pow, maxpower=16): @@ -620,24 +570,7 @@ def hermpow(c, pow, maxpower=16): array([81., 52., 82., 12., 9.]) """ - # c is a trimmed copy - [c] = pu.as_series([c]) - power = int(pow) - if power != pow or power < 0: - raise ValueError("Power must be a non-negative integer.") - elif maxpower is not None and power > maxpower: - raise ValueError("Power is too large") - elif power == 0: - return np.array([1], dtype=c.dtype) - elif power == 1: - return c - else: - # This can be made more efficient by using powers of two - # in the usual way. - prd = c - for i in range(2, power + 1): - prd = hermmul(prd, c) - return prd + return pu._pow(hermmul, c, pow, maxpower) def hermder(c, m=1, scl=1, axis=0): @@ -698,14 +631,10 @@ def hermder(c, m=1, scl=1, axis=0): c = np.array(c, ndmin=1, copy=1) if c.dtype.char in '?bBhHiIlLqQpP': c = c.astype(np.double) - cnt, iaxis = [int(t) for t in [m, axis]] - - if cnt != m: - raise ValueError("The order of derivation must be integer") + cnt = pu._deprecate_as_int(m, "the order of derivation") + iaxis = pu._deprecate_as_int(axis, "the axis") if cnt < 0: raise ValueError("The order of derivation must be non-negative") - if iaxis != axis: - raise ValueError("The axis must be integer") iaxis = normalize_axis_index(iaxis, c.ndim) if cnt == 0: @@ -815,10 +744,8 @@ def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0): c = c.astype(np.double) if not np.iterable(k): k = [k] - cnt, iaxis = [int(t) for t in [m, axis]] - - if cnt != m: - raise ValueError("The order of integration must be integer") + cnt = pu._deprecate_as_int(m, "the order of integration") + iaxis = pu._deprecate_as_int(axis, "the axis") if cnt < 0: raise ValueError("The order of integration must be non-negative") if len(k) > cnt: @@ -827,8 +754,6 @@ def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0): raise ValueError("lbnd must be a scalar.") if np.ndim(scl) != 0: raise ValueError("scl must be a scalar.") - if iaxis != axis: - raise ValueError("The axis must be integer") iaxis = normalize_axis_index(iaxis, c.ndim) if cnt == 0: @@ -995,14 +920,7 @@ def hermval2d(x, y, c): .. versionadded:: 1.7.0 """ - try: - x, y = np.array((x, y), copy=0) - except Exception: - raise ValueError('x, y are incompatible') - - c = hermval(x, c) - c = hermval(y, c, tensor=False) - return c + return pu._valnd(hermval, c, x, y) def hermgrid2d(x, y, c): @@ -1055,9 +973,7 @@ def hermgrid2d(x, y, c): .. versionadded:: 1.7.0 """ - c = hermval(x, c) - c = hermval(y, c) - return c + return pu._gridnd(hermval, c, x, y) def hermval3d(x, y, z, c): @@ -1108,15 +1024,7 @@ def hermval3d(x, y, z, c): .. versionadded:: 1.7.0 """ - try: - x, y, z = np.array((x, y, z), copy=0) - except Exception: - raise ValueError('x, y, z are incompatible') - - c = hermval(x, c) - c = hermval(y, c, tensor=False) - c = hermval(z, c, tensor=False) - return c + return pu._valnd(hermval, c, x, y, z) def hermgrid3d(x, y, z, c): @@ -1172,10 +1080,7 @@ def hermgrid3d(x, y, z, c): .. versionadded:: 1.7.0 """ - c = hermval(x, c) - c = hermval(y, c) - c = hermval(z, c) - return c + return pu._gridnd(hermval, c, x, y, z) def hermvander(x, deg): @@ -1222,9 +1127,7 @@ def hermvander(x, deg): [ 1., 2., 2., -4.]]) """ - ideg = int(deg) - if ideg != deg: - raise ValueError("deg must be integer") + ideg = pu._deprecate_as_int(deg, "deg") if ideg < 0: raise ValueError("deg must be non-negative") @@ -1291,17 +1194,7 @@ def hermvander2d(x, y, deg): .. versionadded:: 1.7.0 """ - ideg = [int(d) for d in deg] - is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] - if is_valid != [1, 1]: - raise ValueError("degrees must be non-negative integers") - degx, degy = ideg - x, y = np.array((x, y), copy=0) + 0.0 - - vx = hermvander(x, degx) - vy = hermvander(y, degy) - v = vx[..., None]*vy[..., None,:] - return v.reshape(v.shape[:-2] + (-1,)) + return pu._vander2d(hermvander, x, y, deg) def hermvander3d(x, y, z, deg): @@ -1355,18 +1248,7 @@ def hermvander3d(x, y, z, deg): .. versionadded:: 1.7.0 """ - ideg = [int(d) for d in deg] - is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] - if is_valid != [1, 1, 1]: - raise ValueError("degrees must be non-negative integers") - degx, degy, degz = ideg - x, y, z = np.array((x, y, z), copy=0) + 0.0 - - vx = hermvander(x, degx) - vy = hermvander(y, degy) - vz = hermvander(z, degz) - v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:] - return v.reshape(v.shape[:-3] + (-1,)) + return pu._vander3d(hermvander, x, y, z, deg) def hermfit(x, y, deg, rcond=None, full=False, w=None): @@ -1493,81 +1375,7 @@ def hermfit(x, y, deg, rcond=None, full=False, w=None): array([1.0218, 1.9986, 2.9999]) # may vary """ - x = np.asarray(x) + 0.0 - y = np.asarray(y) + 0.0 - deg = np.asarray(deg) - - # check arguments. - if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0: - raise TypeError("deg must be an int or non-empty 1-D array of int") - if deg.min() < 0: - raise ValueError("expected deg >= 0") - if x.ndim != 1: - raise TypeError("expected 1D vector for x") - if x.size == 0: - raise TypeError("expected non-empty vector for x") - if y.ndim < 1 or y.ndim > 2: - raise TypeError("expected 1D or 2D array for y") - if len(x) != len(y): - raise TypeError("expected x and y to have same length") - - if deg.ndim == 0: - lmax = deg - order = lmax + 1 - van = hermvander(x, lmax) - else: - deg = np.sort(deg) - lmax = deg[-1] - order = len(deg) - van = hermvander(x, lmax)[:, deg] - - # set up the least squares matrices in transposed form - lhs = van.T - rhs = y.T - if w is not None: - w = np.asarray(w) + 0.0 - if w.ndim != 1: - raise TypeError("expected 1D vector for w") - if len(x) != len(w): - raise TypeError("expected x and w to have same length") - # apply weights. Don't use inplace operations as they - # can cause problems with NA. - lhs = lhs * w - rhs = rhs * w - - # set rcond - if rcond is None: - rcond = len(x)*np.finfo(x.dtype).eps - - # Determine the norms of the design matrix columns. - if issubclass(lhs.dtype.type, np.complexfloating): - scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1)) - else: - scl = np.sqrt(np.square(lhs).sum(1)) - scl[scl == 0] = 1 - - # Solve the least squares problem. - c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond) - c = (c.T/scl).T - - # Expand c to include non-fitted coefficients which are set to zero - if deg.ndim > 0: - if c.ndim == 2: - cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype) - else: - cc = np.zeros(lmax+1, dtype=c.dtype) - cc[deg] = c - c = cc - - # warn on rank reduction - if rank != order and not full: - msg = "The fit may be poorly conditioned" - warnings.warn(msg, pu.RankWarning, stacklevel=2) - - if full: - return c, [resids, rank, s, rcond] - else: - return c + return pu._fit(hermvander, x, y, deg, rcond, full, w) def hermcompanion(c): @@ -1668,7 +1476,8 @@ def hermroots(c): if len(c) == 2: return np.array([-.5*c[0]/c[1]]) - m = hermcompanion(c) + # rotated companion matrix reduces error + m = hermcompanion(c)[::-1,::-1] r = la.eigvals(m) r.sort() return r @@ -1753,9 +1562,9 @@ def hermgauss(deg): the right value when integrating 1. """ - ideg = int(deg) - if ideg != deg or ideg < 1: - raise ValueError("deg must be a non-negative integer") + ideg = pu._deprecate_as_int(deg, "deg") + if ideg <= 0: + raise ValueError("deg must be a positive integer") # first approximation of roots. We use the fact that the companion # matrix is symmetric in this case in order to obtain better zeros. diff --git a/numpy/polynomial/hermite_e.py b/numpy/polynomial/hermite_e.py index bafb4b997..b12c0d792 100644 --- a/numpy/polynomial/hermite_e.py +++ b/numpy/polynomial/hermite_e.py @@ -287,21 +287,7 @@ def hermefromroots(roots): array([0.+0.j, 0.+0.j]) """ - if len(roots) == 0: - return np.ones(1) - else: - [roots] = pu.as_series([roots], trim=False) - roots.sort() - p = [hermeline(-r, 1) for r in roots] - n = len(p) - while n > 1: - m, r = divmod(n, 2) - tmp = [hermemul(p[i], p[i+m]) for i in range(m)] - if r: - tmp[0] = hermemul(tmp[0], p[-1]) - p = tmp - n = m - return p[0] + return pu._fromroots(hermeline, hermemul, roots) def hermeadd(c1, c2): @@ -341,15 +327,7 @@ def hermeadd(c1, c2): array([2., 4., 6., 4.]) """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2): - c1[:c2.size] += c2 - ret = c1 - else: - c2[:c1.size] += c1 - ret = c2 - return pu.trimseq(ret) + return pu._add(c1, c2) def hermesub(c1, c2): @@ -389,16 +367,7 @@ def hermesub(c1, c2): array([0., 0., 0., 4.]) """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2): - c1[:c2.size] -= c2 - ret = c1 - else: - c2 = -c2 - c2[:c1.size] += c1 - ret = c2 - return pu.trimseq(ret) + return pu._sub(c1, c2) def hermemulx(c): @@ -559,26 +528,7 @@ def hermediv(c1, c2): (array([1., 2., 3.]), array([1., 2.])) """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if c2[-1] == 0: - raise ZeroDivisionError() - - lc1 = len(c1) - lc2 = len(c2) - if lc1 < lc2: - return c1[:1]*0, c1 - elif lc2 == 1: - return c1/c2[-1], c1[:1]*0 - else: - quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype) - rem = c1 - for i in range(lc1 - lc2, - 1, -1): - p = hermemul([0]*i + [1], c2) - q = rem[-1]/p[-1] - rem = rem[:-1] - q*p[:-1] - quo[i] = q - return quo, pu.trimseq(rem) + return pu._div(hermemul, c1, c2) def hermepow(c, pow, maxpower=16): @@ -615,24 +565,7 @@ def hermepow(c, pow, maxpower=16): array([23., 28., 46., 12., 9.]) """ - # c is a trimmed copy - [c] = pu.as_series([c]) - power = int(pow) - if power != pow or power < 0: - raise ValueError("Power must be a non-negative integer.") - elif maxpower is not None and power > maxpower: - raise ValueError("Power is too large") - elif power == 0: - return np.array([1], dtype=c.dtype) - elif power == 1: - return c - else: - # This can be made more efficient by using powers of two - # in the usual way. - prd = c - for i in range(2, power + 1): - prd = hermemul(prd, c) - return prd + return pu._pow(hermemul, c, pow, maxpower) def hermeder(c, m=1, scl=1, axis=0): @@ -693,14 +626,10 @@ def hermeder(c, m=1, scl=1, axis=0): c = np.array(c, ndmin=1, copy=1) if c.dtype.char in '?bBhHiIlLqQpP': c = c.astype(np.double) - cnt, iaxis = [int(t) for t in [m, axis]] - - if cnt != m: - raise ValueError("The order of derivation must be integer") + cnt = pu._deprecate_as_int(m, "the order of derivation") + iaxis = pu._deprecate_as_int(axis, "the axis") if cnt < 0: raise ValueError("The order of derivation must be non-negative") - if iaxis != axis: - raise ValueError("The axis must be integer") iaxis = normalize_axis_index(iaxis, c.ndim) if cnt == 0: @@ -810,10 +739,8 @@ def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0): c = c.astype(np.double) if not np.iterable(k): k = [k] - cnt, iaxis = [int(t) for t in [m, axis]] - - if cnt != m: - raise ValueError("The order of integration must be integer") + cnt = pu._deprecate_as_int(m, "the order of integration") + iaxis = pu._deprecate_as_int(axis, "the axis") if cnt < 0: raise ValueError("The order of integration must be non-negative") if len(k) > cnt: @@ -822,8 +749,6 @@ def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0): raise ValueError("lbnd must be a scalar.") if np.ndim(scl) != 0: raise ValueError("scl must be a scalar.") - if iaxis != axis: - raise ValueError("The axis must be integer") iaxis = normalize_axis_index(iaxis, c.ndim) if cnt == 0: @@ -989,14 +914,7 @@ def hermeval2d(x, y, c): .. versionadded:: 1.7.0 """ - try: - x, y = np.array((x, y), copy=0) - except Exception: - raise ValueError('x, y are incompatible') - - c = hermeval(x, c) - c = hermeval(y, c, tensor=False) - return c + return pu._valnd(hermeval, c, x, y) def hermegrid2d(x, y, c): @@ -1049,9 +967,7 @@ def hermegrid2d(x, y, c): .. versionadded:: 1.7.0 """ - c = hermeval(x, c) - c = hermeval(y, c) - return c + return pu._gridnd(hermeval, c, x, y) def hermeval3d(x, y, z, c): @@ -1102,15 +1018,7 @@ def hermeval3d(x, y, z, c): .. versionadded:: 1.7.0 """ - try: - x, y, z = np.array((x, y, z), copy=0) - except Exception: - raise ValueError('x, y, z are incompatible') - - c = hermeval(x, c) - c = hermeval(y, c, tensor=False) - c = hermeval(z, c, tensor=False) - return c + return pu._valnd(hermeval, c, x, y, z) def hermegrid3d(x, y, z, c): @@ -1166,10 +1074,7 @@ def hermegrid3d(x, y, z, c): .. versionadded:: 1.7.0 """ - c = hermeval(x, c) - c = hermeval(y, c) - c = hermeval(z, c) - return c + return pu._gridnd(hermeval, c, x, y, z) def hermevander(x, deg): @@ -1216,9 +1121,7 @@ def hermevander(x, deg): [ 1., 1., 0., -2.]]) """ - ideg = int(deg) - if ideg != deg: - raise ValueError("deg must be integer") + ideg = pu._deprecate_as_int(deg, "deg") if ideg < 0: raise ValueError("deg must be non-negative") @@ -1284,17 +1187,7 @@ def hermevander2d(x, y, deg): .. versionadded:: 1.7.0 """ - ideg = [int(d) for d in deg] - is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] - if is_valid != [1, 1]: - raise ValueError("degrees must be non-negative integers") - degx, degy = ideg - x, y = np.array((x, y), copy=0) + 0.0 - - vx = hermevander(x, degx) - vy = hermevander(y, degy) - v = vx[..., None]*vy[..., None,:] - return v.reshape(v.shape[:-2] + (-1,)) + return pu._vander2d(hermevander, x, y, deg) def hermevander3d(x, y, z, deg): @@ -1348,18 +1241,7 @@ def hermevander3d(x, y, z, deg): .. versionadded:: 1.7.0 """ - ideg = [int(d) for d in deg] - is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] - if is_valid != [1, 1, 1]: - raise ValueError("degrees must be non-negative integers") - degx, degy, degz = ideg - x, y, z = np.array((x, y, z), copy=0) + 0.0 - - vx = hermevander(x, degx) - vy = hermevander(y, degy) - vz = hermevander(z, degz) - v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:] - return v.reshape(v.shape[:-3] + (-1,)) + return pu._vander3d(hermevander, x, y, z, deg) def hermefit(x, y, deg, rcond=None, full=False, w=None): @@ -1487,81 +1369,7 @@ def hermefit(x, y, deg, rcond=None, full=False, w=None): array([ 1.01690445, 1.99951418, 2.99948696]) # may vary """ - x = np.asarray(x) + 0.0 - y = np.asarray(y) + 0.0 - deg = np.asarray(deg) - - # check arguments. - if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0: - raise TypeError("deg must be an int or non-empty 1-D array of int") - if deg.min() < 0: - raise ValueError("expected deg >= 0") - if x.ndim != 1: - raise TypeError("expected 1D vector for x") - if x.size == 0: - raise TypeError("expected non-empty vector for x") - if y.ndim < 1 or y.ndim > 2: - raise TypeError("expected 1D or 2D array for y") - if len(x) != len(y): - raise TypeError("expected x and y to have same length") - - if deg.ndim == 0: - lmax = deg - order = lmax + 1 - van = hermevander(x, lmax) - else: - deg = np.sort(deg) - lmax = deg[-1] - order = len(deg) - van = hermevander(x, lmax)[:, deg] - - # set up the least squares matrices in transposed form - lhs = van.T - rhs = y.T - if w is not None: - w = np.asarray(w) + 0.0 - if w.ndim != 1: - raise TypeError("expected 1D vector for w") - if len(x) != len(w): - raise TypeError("expected x and w to have same length") - # apply weights. Don't use inplace operations as they - # can cause problems with NA. - lhs = lhs * w - rhs = rhs * w - - # set rcond - if rcond is None: - rcond = len(x)*np.finfo(x.dtype).eps - - # Determine the norms of the design matrix columns. - if issubclass(lhs.dtype.type, np.complexfloating): - scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1)) - else: - scl = np.sqrt(np.square(lhs).sum(1)) - scl[scl == 0] = 1 - - # Solve the least squares problem. - c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond) - c = (c.T/scl).T - - # Expand c to include non-fitted coefficients which are set to zero - if deg.ndim > 0: - if c.ndim == 2: - cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype) - else: - cc = np.zeros(lmax+1, dtype=c.dtype) - cc[deg] = c - c = cc - - # warn on rank reduction - if rank != order and not full: - msg = "The fit may be poorly conditioned" - warnings.warn(msg, pu.RankWarning, stacklevel=2) - - if full: - return c, [resids, rank, s, rcond] - else: - return c + return pu._fit(hermevander, x, y, deg, rcond, full, w) def hermecompanion(c): @@ -1663,7 +1471,8 @@ def hermeroots(c): if len(c) == 2: return np.array([-c[0]/c[1]]) - m = hermecompanion(c) + # rotated companion matrix reduces error + m = hermecompanion(c)[::-1,::-1] r = la.eigvals(m) r.sort() return r @@ -1748,9 +1557,9 @@ def hermegauss(deg): the right value when integrating 1. """ - ideg = int(deg) - if ideg != deg or ideg < 1: - raise ValueError("deg must be a non-negative integer") + ideg = pu._deprecate_as_int(deg, "deg") + if ideg <= 0: + raise ValueError("deg must be a positive integer") # first approximation of roots. We use the fact that the companion # matrix is symmetric in this case in order to obtain better zeros. diff --git a/numpy/polynomial/laguerre.py b/numpy/polynomial/laguerre.py index 9207c9afe..e103dde92 100644 --- a/numpy/polynomial/laguerre.py +++ b/numpy/polynomial/laguerre.py @@ -283,21 +283,7 @@ def lagfromroots(roots): array([0.+0.j, 0.+0.j]) """ - if len(roots) == 0: - return np.ones(1) - else: - [roots] = pu.as_series([roots], trim=False) - roots.sort() - p = [lagline(-r, 1) for r in roots] - n = len(p) - while n > 1: - m, r = divmod(n, 2) - tmp = [lagmul(p[i], p[i+m]) for i in range(m)] - if r: - tmp[0] = lagmul(tmp[0], p[-1]) - p = tmp - n = m - return p[0] + return pu._fromroots(lagline, lagmul, roots) def lagadd(c1, c2): @@ -338,15 +324,7 @@ def lagadd(c1, c2): """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2): - c1[:c2.size] += c2 - ret = c1 - else: - c2[:c1.size] += c1 - ret = c2 - return pu.trimseq(ret) + return pu._add(c1, c2) def lagsub(c1, c2): @@ -386,16 +364,7 @@ def lagsub(c1, c2): array([0., 0., 0., 4.]) """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2): - c1[:c2.size] -= c2 - ret = c1 - else: - c2 = -c2 - c2[:c1.size] += c1 - ret = c2 - return pu.trimseq(ret) + return pu._sub(c1, c2) def lagmulx(c): @@ -561,26 +530,7 @@ def lagdiv(c1, c2): (array([1., 2., 3.]), array([1., 1.])) """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if c2[-1] == 0: - raise ZeroDivisionError() - - lc1 = len(c1) - lc2 = len(c2) - if lc1 < lc2: - return c1[:1]*0, c1 - elif lc2 == 1: - return c1/c2[-1], c1[:1]*0 - else: - quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype) - rem = c1 - for i in range(lc1 - lc2, - 1, -1): - p = lagmul([0]*i + [1], c2) - q = rem[-1]/p[-1] - rem = rem[:-1] - q*p[:-1] - quo[i] = q - return quo, pu.trimseq(rem) + return pu._div(lagmul, c1, c2) def lagpow(c, pow, maxpower=16): @@ -617,24 +567,7 @@ def lagpow(c, pow, maxpower=16): array([ 14., -16., 56., -72., 54.]) """ - # c is a trimmed copy - [c] = pu.as_series([c]) - power = int(pow) - if power != pow or power < 0: - raise ValueError("Power must be a non-negative integer.") - elif maxpower is not None and power > maxpower: - raise ValueError("Power is too large") - elif power == 0: - return np.array([1], dtype=c.dtype) - elif power == 1: - return c - else: - # This can be made more efficient by using powers of two - # in the usual way. - prd = c - for i in range(2, power + 1): - prd = lagmul(prd, c) - return prd + return pu._pow(lagmul, c, pow, maxpower) def lagder(c, m=1, scl=1, axis=0): @@ -695,14 +628,11 @@ def lagder(c, m=1, scl=1, axis=0): c = np.array(c, ndmin=1, copy=1) if c.dtype.char in '?bBhHiIlLqQpP': c = c.astype(np.double) - cnt, iaxis = [int(t) for t in [m, axis]] - if cnt != m: - raise ValueError("The order of derivation must be integer") + cnt = pu._deprecate_as_int(m, "the order of derivation") + iaxis = pu._deprecate_as_int(axis, "the axis") if cnt < 0: raise ValueError("The order of derivation must be non-negative") - if iaxis != axis: - raise ValueError("The axis must be integer") iaxis = normalize_axis_index(iaxis, c.ndim) if cnt == 0: @@ -815,10 +745,8 @@ def lagint(c, m=1, k=[], lbnd=0, scl=1, axis=0): c = c.astype(np.double) if not np.iterable(k): k = [k] - cnt, iaxis = [int(t) for t in [m, axis]] - - if cnt != m: - raise ValueError("The order of integration must be integer") + cnt = pu._deprecate_as_int(m, "the order of integration") + iaxis = pu._deprecate_as_int(axis, "the axis") if cnt < 0: raise ValueError("The order of integration must be non-negative") if len(k) > cnt: @@ -827,8 +755,6 @@ def lagint(c, m=1, k=[], lbnd=0, scl=1, axis=0): raise ValueError("lbnd must be a scalar.") if np.ndim(scl) != 0: raise ValueError("scl must be a scalar.") - if iaxis != axis: - raise ValueError("The axis must be integer") iaxis = normalize_axis_index(iaxis, c.ndim) if cnt == 0: @@ -995,14 +921,7 @@ def lagval2d(x, y, c): .. versionadded:: 1.7.0 """ - try: - x, y = np.array((x, y), copy=0) - except Exception: - raise ValueError('x, y are incompatible') - - c = lagval(x, c) - c = lagval(y, c, tensor=False) - return c + return pu._valnd(lagval, c, x, y) def laggrid2d(x, y, c): @@ -1055,9 +974,7 @@ def laggrid2d(x, y, c): .. versionadded:: 1.7.0 """ - c = lagval(x, c) - c = lagval(y, c) - return c + return pu._gridnd(lagval, c, x, y) def lagval3d(x, y, z, c): @@ -1108,15 +1025,7 @@ def lagval3d(x, y, z, c): .. versionadded:: 1.7.0 """ - try: - x, y, z = np.array((x, y, z), copy=0) - except Exception: - raise ValueError('x, y, z are incompatible') - - c = lagval(x, c) - c = lagval(y, c, tensor=False) - c = lagval(z, c, tensor=False) - return c + return pu._valnd(lagval, c, x, y, z) def laggrid3d(x, y, z, c): @@ -1172,10 +1081,7 @@ def laggrid3d(x, y, z, c): .. versionadded:: 1.7.0 """ - c = lagval(x, c) - c = lagval(y, c) - c = lagval(z, c) - return c + return pu._gridnd(lagval, c, x, y, z) def lagvander(x, deg): @@ -1222,9 +1128,7 @@ def lagvander(x, deg): [ 1. , -1. , -1. , -0.33333333]]) """ - ideg = int(deg) - if ideg != deg: - raise ValueError("deg must be integer") + ideg = pu._deprecate_as_int(deg, "deg") if ideg < 0: raise ValueError("deg must be non-negative") @@ -1290,17 +1194,7 @@ def lagvander2d(x, y, deg): .. versionadded:: 1.7.0 """ - ideg = [int(d) for d in deg] - is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] - if is_valid != [1, 1]: - raise ValueError("degrees must be non-negative integers") - degx, degy = ideg - x, y = np.array((x, y), copy=0) + 0.0 - - vx = lagvander(x, degx) - vy = lagvander(y, degy) - v = vx[..., None]*vy[..., None,:] - return v.reshape(v.shape[:-2] + (-1,)) + return pu._vander2d(lagvander, x, y, deg) def lagvander3d(x, y, z, deg): @@ -1354,18 +1248,7 @@ def lagvander3d(x, y, z, deg): .. versionadded:: 1.7.0 """ - ideg = [int(d) for d in deg] - is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] - if is_valid != [1, 1, 1]: - raise ValueError("degrees must be non-negative integers") - degx, degy, degz = ideg - x, y, z = np.array((x, y, z), copy=0) + 0.0 - - vx = lagvander(x, degx) - vy = lagvander(y, degy) - vz = lagvander(z, degz) - v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:] - return v.reshape(v.shape[:-3] + (-1,)) + return pu._vander3d(lagvander, x, y, z, deg) def lagfit(x, y, deg, rcond=None, full=False, w=None): @@ -1492,81 +1375,7 @@ def lagfit(x, y, deg, rcond=None, full=False, w=None): array([ 0.96971004, 2.00193749, 3.00288744]) # may vary """ - x = np.asarray(x) + 0.0 - y = np.asarray(y) + 0.0 - deg = np.asarray(deg) - - # check arguments. - if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0: - raise TypeError("deg must be an int or non-empty 1-D array of int") - if deg.min() < 0: - raise ValueError("expected deg >= 0") - if x.ndim != 1: - raise TypeError("expected 1D vector for x") - if x.size == 0: - raise TypeError("expected non-empty vector for x") - if y.ndim < 1 or y.ndim > 2: - raise TypeError("expected 1D or 2D array for y") - if len(x) != len(y): - raise TypeError("expected x and y to have same length") - - if deg.ndim == 0: - lmax = deg - order = lmax + 1 - van = lagvander(x, lmax) - else: - deg = np.sort(deg) - lmax = deg[-1] - order = len(deg) - van = lagvander(x, lmax)[:, deg] - - # set up the least squares matrices in transposed form - lhs = van.T - rhs = y.T - if w is not None: - w = np.asarray(w) + 0.0 - if w.ndim != 1: - raise TypeError("expected 1D vector for w") - if len(x) != len(w): - raise TypeError("expected x and w to have same length") - # apply weights. Don't use inplace operations as they - # can cause problems with NA. - lhs = lhs * w - rhs = rhs * w - - # set rcond - if rcond is None: - rcond = len(x)*np.finfo(x.dtype).eps - - # Determine the norms of the design matrix columns. - if issubclass(lhs.dtype.type, np.complexfloating): - scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1)) - else: - scl = np.sqrt(np.square(lhs).sum(1)) - scl[scl == 0] = 1 - - # Solve the least squares problem. - c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond) - c = (c.T/scl).T - - # Expand c to include non-fitted coefficients which are set to zero - if deg.ndim > 0: - if c.ndim == 2: - cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype) - else: - cc = np.zeros(lmax+1, dtype=c.dtype) - cc[deg] = c - c = cc - - # warn on rank reduction - if rank != order and not full: - msg = "The fit may be poorly conditioned" - warnings.warn(msg, pu.RankWarning, stacklevel=2) - - if full: - return c, [resids, rank, s, rcond] - else: - return c + return pu._fit(lagvander, x, y, deg, rcond, full, w) def lagcompanion(c): @@ -1666,7 +1475,8 @@ def lagroots(c): if len(c) == 2: return np.array([1 + c[0]/c[1]]) - m = lagcompanion(c) + # rotated companion matrix reduces error + m = lagcompanion(c)[::-1,::-1] r = la.eigvals(m) r.sort() return r @@ -1708,9 +1518,9 @@ def laggauss(deg): the right value when integrating 1. """ - ideg = int(deg) - if ideg != deg or ideg < 1: - raise ValueError("deg must be a non-negative integer") + ideg = pu._deprecate_as_int(deg, "deg") + if ideg <= 0: + raise ValueError("deg must be a positive integer") # first approximation of roots. We use the fact that the companion # matrix is symmetric in this case in order to obtain better zeros. diff --git a/numpy/polynomial/legendre.py b/numpy/polynomial/legendre.py index f81bc002c..9eec9740d 100644 --- a/numpy/polynomial/legendre.py +++ b/numpy/polynomial/legendre.py @@ -314,21 +314,7 @@ def legfromroots(roots): array([ 1.33333333+0.j, 0.00000000+0.j, 0.66666667+0.j]) # may vary """ - if len(roots) == 0: - return np.ones(1) - else: - [roots] = pu.as_series([roots], trim=False) - roots.sort() - p = [legline(-r, 1) for r in roots] - n = len(p) - while n > 1: - m, r = divmod(n, 2) - tmp = [legmul(p[i], p[i+m]) for i in range(m)] - if r: - tmp[0] = legmul(tmp[0], p[-1]) - p = tmp - n = m - return p[0] + return pu._fromroots(legline, legmul, roots) def legadd(c1, c2): @@ -370,15 +356,7 @@ def legadd(c1, c2): array([4., 4., 4.]) """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2): - c1[:c2.size] += c2 - ret = c1 - else: - c2[:c1.size] += c1 - ret = c2 - return pu.trimseq(ret) + return pu._add(c1, c2) def legsub(c1, c2): @@ -422,16 +400,7 @@ def legsub(c1, c2): array([ 2., 0., -2.]) """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2): - c1[:c2.size] -= c2 - ret = c1 - else: - c2 = -c2 - c2[:c1.size] += c1 - ret = c2 - return pu.trimseq(ret) + return pu._sub(c1, c2) def legmulx(c): @@ -604,26 +573,7 @@ def legdiv(c1, c2): (array([-0.07407407, 1.66666667]), array([-1.03703704, -2.51851852])) # may vary """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if c2[-1] == 0: - raise ZeroDivisionError() - - lc1 = len(c1) - lc2 = len(c2) - if lc1 < lc2: - return c1[:1]*0, c1 - elif lc2 == 1: - return c1/c2[-1], c1[:1]*0 - else: - quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype) - rem = c1 - for i in range(lc1 - lc2, - 1, -1): - p = legmul([0]*i + [1], c2) - q = rem[-1]/p[-1] - rem = rem[:-1] - q*p[:-1] - quo[i] = q - return quo, pu.trimseq(rem) + return pu._div(legmul, c1, c2) def legpow(c, pow, maxpower=16): @@ -657,24 +607,7 @@ def legpow(c, pow, maxpower=16): -------- """ - # c is a trimmed copy - [c] = pu.as_series([c]) - power = int(pow) - if power != pow or power < 0: - raise ValueError("Power must be a non-negative integer.") - elif maxpower is not None and power > maxpower: - raise ValueError("Power is too large") - elif power == 0: - return np.array([1], dtype=c.dtype) - elif power == 1: - return c - else: - # This can be made more efficient by using powers of two - # in the usual way. - prd = c - for i in range(2, power + 1): - prd = legmul(prd, c) - return prd + return pu._pow(legmul, c, pow, maxpower) def legder(c, m=1, scl=1, axis=0): @@ -740,14 +673,10 @@ def legder(c, m=1, scl=1, axis=0): c = np.array(c, ndmin=1, copy=1) if c.dtype.char in '?bBhHiIlLqQpP': c = c.astype(np.double) - cnt, iaxis = [int(t) for t in [m, axis]] - - if cnt != m: - raise ValueError("The order of derivation must be integer") + cnt = pu._deprecate_as_int(m, "the order of derivation") + iaxis = pu._deprecate_as_int(axis, "the axis") if cnt < 0: raise ValueError("The order of derivation must be non-negative") - if iaxis != axis: - raise ValueError("The axis must be integer") iaxis = normalize_axis_index(iaxis, c.ndim) if cnt == 0: @@ -863,10 +792,8 @@ def legint(c, m=1, k=[], lbnd=0, scl=1, axis=0): c = c.astype(np.double) if not np.iterable(k): k = [k] - cnt, iaxis = [int(t) for t in [m, axis]] - - if cnt != m: - raise ValueError("The order of integration must be integer") + cnt = pu._deprecate_as_int(m, "the order of integration") + iaxis = pu._deprecate_as_int(axis, "the axis") if cnt < 0: raise ValueError("The order of integration must be non-negative") if len(k) > cnt: @@ -875,8 +802,6 @@ def legint(c, m=1, k=[], lbnd=0, scl=1, axis=0): raise ValueError("lbnd must be a scalar.") if np.ndim(scl) != 0: raise ValueError("scl must be a scalar.") - if iaxis != axis: - raise ValueError("The axis must be integer") iaxis = normalize_axis_index(iaxis, c.ndim) if cnt == 0: @@ -1039,14 +964,7 @@ def legval2d(x, y, c): .. versionadded:: 1.7.0 """ - try: - x, y = np.array((x, y), copy=0) - except Exception: - raise ValueError('x, y are incompatible') - - c = legval(x, c) - c = legval(y, c, tensor=False) - return c + return pu._valnd(legval, c, x, y) def leggrid2d(x, y, c): @@ -1099,9 +1017,7 @@ def leggrid2d(x, y, c): .. versionadded:: 1.7.0 """ - c = legval(x, c) - c = legval(y, c) - return c + return pu._gridnd(legval, c, x, y) def legval3d(x, y, z, c): @@ -1152,15 +1068,7 @@ def legval3d(x, y, z, c): .. versionadded:: 1.7.0 """ - try: - x, y, z = np.array((x, y, z), copy=0) - except Exception: - raise ValueError('x, y, z are incompatible') - - c = legval(x, c) - c = legval(y, c, tensor=False) - c = legval(z, c, tensor=False) - return c + return pu._valnd(legval, c, x, y, z) def leggrid3d(x, y, z, c): @@ -1216,10 +1124,7 @@ def leggrid3d(x, y, z, c): .. versionadded:: 1.7.0 """ - c = legval(x, c) - c = legval(y, c) - c = legval(z, c) - return c + return pu._gridnd(legval, c, x, y, z) def legvander(x, deg): @@ -1257,9 +1162,7 @@ def legvander(x, deg): the converted `x`. """ - ideg = int(deg) - if ideg != deg: - raise ValueError("deg must be integer") + ideg = pu._deprecate_as_int(deg, "deg") if ideg < 0: raise ValueError("deg must be non-negative") @@ -1327,17 +1230,7 @@ def legvander2d(x, y, deg): .. versionadded:: 1.7.0 """ - ideg = [int(d) for d in deg] - is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] - if is_valid != [1, 1]: - raise ValueError("degrees must be non-negative integers") - degx, degy = ideg - x, y = np.array((x, y), copy=0) + 0.0 - - vx = legvander(x, degx) - vy = legvander(y, degy) - v = vx[..., None]*vy[..., None,:] - return v.reshape(v.shape[:-2] + (-1,)) + return pu._vander2d(legvander, x, y, deg) def legvander3d(x, y, z, deg): @@ -1391,18 +1284,7 @@ def legvander3d(x, y, z, deg): .. versionadded:: 1.7.0 """ - ideg = [int(d) for d in deg] - is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] - if is_valid != [1, 1, 1]: - raise ValueError("degrees must be non-negative integers") - degx, degy, degz = ideg - x, y, z = np.array((x, y, z), copy=0) + 0.0 - - vx = legvander(x, degx) - vy = legvander(y, degy) - vz = legvander(z, degz) - v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:] - return v.reshape(v.shape[:-3] + (-1,)) + return pu._vander3d(legvander, x, y, z, deg) def legfit(x, y, deg, rcond=None, full=False, w=None): @@ -1526,81 +1408,7 @@ def legfit(x, y, deg, rcond=None, full=False, w=None): -------- """ - x = np.asarray(x) + 0.0 - y = np.asarray(y) + 0.0 - deg = np.asarray(deg) - - # check arguments. - if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0: - raise TypeError("deg must be an int or non-empty 1-D array of int") - if deg.min() < 0: - raise ValueError("expected deg >= 0") - if x.ndim != 1: - raise TypeError("expected 1D vector for x") - if x.size == 0: - raise TypeError("expected non-empty vector for x") - if y.ndim < 1 or y.ndim > 2: - raise TypeError("expected 1D or 2D array for y") - if len(x) != len(y): - raise TypeError("expected x and y to have same length") - - if deg.ndim == 0: - lmax = deg - order = lmax + 1 - van = legvander(x, lmax) - else: - deg = np.sort(deg) - lmax = deg[-1] - order = len(deg) - van = legvander(x, lmax)[:, deg] - - # set up the least squares matrices in transposed form - lhs = van.T - rhs = y.T - if w is not None: - w = np.asarray(w) + 0.0 - if w.ndim != 1: - raise TypeError("expected 1D vector for w") - if len(x) != len(w): - raise TypeError("expected x and w to have same length") - # apply weights. Don't use inplace operations as they - # can cause problems with NA. - lhs = lhs * w - rhs = rhs * w - - # set rcond - if rcond is None: - rcond = len(x)*np.finfo(x.dtype).eps - - # Determine the norms of the design matrix columns. - if issubclass(lhs.dtype.type, np.complexfloating): - scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1)) - else: - scl = np.sqrt(np.square(lhs).sum(1)) - scl[scl == 0] = 1 - - # Solve the least squares problem. - c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond) - c = (c.T/scl).T - - # Expand c to include non-fitted coefficients which are set to zero - if deg.ndim > 0: - if c.ndim == 2: - cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype) - else: - cc = np.zeros(lmax+1, dtype=c.dtype) - cc[deg] = c - c = cc - - # warn on rank reduction - if rank != order and not full: - msg = "The fit may be poorly conditioned" - warnings.warn(msg, pu.RankWarning, stacklevel=2) - - if full: - return c, [resids, rank, s, rcond] - else: - return c + return pu._fit(legvander, x, y, deg, rcond, full, w) def legcompanion(c): @@ -1697,7 +1505,8 @@ def legroots(c): if len(c) == 2: return np.array([-c[0]/c[1]]) - m = legcompanion(c) + # rotated companion matrix reduces error + m = legcompanion(c)[::-1,::-1] r = la.eigvals(m) r.sort() return r @@ -1739,9 +1548,9 @@ def leggauss(deg): the right value when integrating 1. """ - ideg = int(deg) - if ideg != deg or ideg < 1: - raise ValueError("deg must be a non-negative integer") + ideg = pu._deprecate_as_int(deg, "deg") + if ideg <= 0: + raise ValueError("deg must be a positive integer") # first approximation of roots. We use the fact that the companion # matrix is symmetric in this case in order to obtain better zeros. diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py index 69599e3fd..afa330502 100644 --- a/numpy/polynomial/polynomial.py +++ b/numpy/polynomial/polynomial.py @@ -188,21 +188,7 @@ def polyfromroots(roots): array([1.+0.j, 0.+0.j, 1.+0.j]) """ - if len(roots) == 0: - return np.ones(1) - else: - [roots] = pu.as_series([roots], trim=False) - roots.sort() - p = [polyline(-r, 1) for r in roots] - n = len(p) - while n > 1: - m, r = divmod(n, 2) - tmp = [polymul(p[i], p[i+m]) for i in range(m)] - if r: - tmp[0] = polymul(tmp[0], p[-1]) - p = tmp - n = m - return p[0] + return pu._fromroots(polyline, polymul, roots) def polyadd(c1, c2): @@ -238,15 +224,7 @@ def polyadd(c1, c2): 28.0 """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2): - c1[:c2.size] += c2 - ret = c1 - else: - c2[:c1.size] += c1 - ret = c2 - return pu.trimseq(ret) + return pu._add(c1, c2) def polysub(c1, c2): @@ -283,16 +261,7 @@ def polysub(c1, c2): array([ 2., 0., -2.]) """ - # c1, c2 are trimmed copies - [c1, c2] = pu.as_series([c1, c2]) - if len(c1) > len(c2): - c1[:c2.size] -= c2 - ret = c1 - else: - c2 = -c2 - c2[:c1.size] += c1 - ret = c2 - return pu.trimseq(ret) + return pu._sub(c1, c2) def polymulx(c): @@ -411,18 +380,19 @@ def polydiv(c1, c2): if c2[-1] == 0: raise ZeroDivisionError() - len1 = len(c1) - len2 = len(c2) - if len2 == 1: - return c1/c2[-1], c1[:1]*0 - elif len1 < len2: + # note: this is more efficient than `pu._div(polymul, c1, c2)` + lc1 = len(c1) + lc2 = len(c2) + if lc1 < lc2: return c1[:1]*0, c1 + elif lc2 == 1: + return c1/c2[-1], c1[:1]*0 else: - dlen = len1 - len2 + dlen = lc1 - lc2 scl = c2[-1] c2 = c2[:-1]/scl i = dlen - j = len1 - 1 + j = lc1 - 1 while i >= 0: c1[i:j] -= c2*c1[j] i -= 1 @@ -464,24 +434,9 @@ def polypow(c, pow, maxpower=None): array([ 1., 4., 10., 12., 9.]) """ - # c is a trimmed copy - [c] = pu.as_series([c]) - power = int(pow) - if power != pow or power < 0: - raise ValueError("Power must be a non-negative integer.") - elif maxpower is not None and power > maxpower: - raise ValueError("Power is too large") - elif power == 0: - return np.array([1], dtype=c.dtype) - elif power == 1: - return c - else: - # This can be made more efficient by using powers of two - # in the usual way. - prd = c - for i in range(2, power + 1): - prd = np.convolve(prd, c) - return prd + # note: this is more efficient than `pu._pow(polymul, c1, c2)`, as it + # avoids calling `as_series` repeatedly + return pu._pow(np.convolve, c, pow, maxpower) def polyder(c, m=1, scl=1, axis=0): @@ -541,14 +496,10 @@ def polyder(c, m=1, scl=1, axis=0): # astype fails with NA c = c + 0.0 cdt = c.dtype - cnt, iaxis = [int(t) for t in [m, axis]] - - if cnt != m: - raise ValueError("The order of derivation must be integer") + cnt = pu._deprecate_as_int(m, "the order of derivation") + iaxis = pu._deprecate_as_int(axis, "the axis") if cnt < 0: raise ValueError("The order of derivation must be non-negative") - if iaxis != axis: - raise ValueError("The axis must be integer") iaxis = normalize_axis_index(iaxis, c.ndim) if cnt == 0: @@ -655,10 +606,8 @@ def polyint(c, m=1, k=[], lbnd=0, scl=1, axis=0): cdt = c.dtype if not np.iterable(k): k = [k] - cnt, iaxis = [int(t) for t in [m, axis]] - - if cnt != m: - raise ValueError("The order of integration must be integer") + cnt = pu._deprecate_as_int(m, "the order of integration") + iaxis = pu._deprecate_as_int(axis, "the axis") if cnt < 0: raise ValueError("The order of integration must be non-negative") if len(k) > cnt: @@ -667,8 +616,6 @@ def polyint(c, m=1, k=[], lbnd=0, scl=1, axis=0): raise ValueError("lbnd must be a scalar.") if np.ndim(scl) != 0: raise ValueError("scl must be a scalar.") - if iaxis != axis: - raise ValueError("The axis must be integer") iaxis = normalize_axis_index(iaxis, c.ndim) if cnt == 0: @@ -924,14 +871,7 @@ def polyval2d(x, y, c): .. versionadded:: 1.7.0 """ - try: - x, y = np.array((x, y), copy=0) - except Exception: - raise ValueError('x, y are incompatible') - - c = polyval(x, c) - c = polyval(y, c, tensor=False) - return c + return pu._valnd(polyval, c, x, y) def polygrid2d(x, y, c): @@ -984,9 +924,7 @@ def polygrid2d(x, y, c): .. versionadded:: 1.7.0 """ - c = polyval(x, c) - c = polyval(y, c) - return c + return pu._gridnd(polyval, c, x, y) def polyval3d(x, y, z, c): @@ -1037,15 +975,7 @@ def polyval3d(x, y, z, c): .. versionadded:: 1.7.0 """ - try: - x, y, z = np.array((x, y, z), copy=0) - except Exception: - raise ValueError('x, y, z are incompatible') - - c = polyval(x, c) - c = polyval(y, c, tensor=False) - c = polyval(z, c, tensor=False) - return c + return pu._valnd(polyval, c, x, y, z) def polygrid3d(x, y, z, c): @@ -1101,10 +1031,7 @@ def polygrid3d(x, y, z, c): .. versionadded:: 1.7.0 """ - c = polyval(x, c) - c = polyval(y, c) - c = polyval(z, c) - return c + return pu._gridnd(polyval, c, x, y, z) def polyvander(x, deg): @@ -1145,9 +1072,7 @@ def polyvander(x, deg): polyvander2d, polyvander3d """ - ideg = int(deg) - if ideg != deg: - raise ValueError("deg must be integer") + ideg = pu._deprecate_as_int(deg, "deg") if ideg < 0: raise ValueError("deg must be non-negative") @@ -1208,19 +1133,7 @@ def polyvander2d(x, y, deg): polyvander, polyvander3d. polyval2d, polyval3d """ - ideg = [int(d) for d in deg] - is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] - if is_valid != [1, 1]: - raise ValueError("degrees must be non-negative integers") - degx, degy = ideg - x, y = np.array((x, y), copy=0) + 0.0 - - vx = polyvander(x, degx) - vy = polyvander(y, degy) - v = vx[..., None]*vy[..., None,:] - # einsum bug - #v = np.einsum("...i,...j->...ij", vx, vy) - return v.reshape(v.shape[:-2] + (-1,)) + return pu._vander2d(polyvander, x, y, deg) def polyvander3d(x, y, z, deg): @@ -1274,20 +1187,7 @@ def polyvander3d(x, y, z, deg): .. versionadded:: 1.7.0 """ - ideg = [int(d) for d in deg] - is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)] - if is_valid != [1, 1, 1]: - raise ValueError("degrees must be non-negative integers") - degx, degy, degz = ideg - x, y, z = np.array((x, y, z), copy=0) + 0.0 - - vx = polyvander(x, degx) - vy = polyvander(y, degy) - vz = polyvander(z, degz) - v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:] - # einsum bug - #v = np.einsum("...i, ...j, ...k->...ijk", vx, vy, vz) - return v.reshape(v.shape[:-3] + (-1,)) + return pu._vander3d(polyvander, x, y, z, deg) def polyfit(x, y, deg, rcond=None, full=False, w=None): @@ -1433,81 +1333,7 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None): 0.50443316, 0.28853036]), 1.1324274851176597e-014] """ - x = np.asarray(x) + 0.0 - y = np.asarray(y) + 0.0 - deg = np.asarray(deg) - - # check arguments. - if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0: - raise TypeError("deg must be an int or non-empty 1-D array of int") - if deg.min() < 0: - raise ValueError("expected deg >= 0") - if x.ndim != 1: - raise TypeError("expected 1D vector for x") - if x.size == 0: - raise TypeError("expected non-empty vector for x") - if y.ndim < 1 or y.ndim > 2: - raise TypeError("expected 1D or 2D array for y") - if len(x) != len(y): - raise TypeError("expected x and y to have same length") - - if deg.ndim == 0: - lmax = deg - order = lmax + 1 - van = polyvander(x, lmax) - else: - deg = np.sort(deg) - lmax = deg[-1] - order = len(deg) - van = polyvander(x, lmax)[:, deg] - - # set up the least squares matrices in transposed form - lhs = van.T - rhs = y.T - if w is not None: - w = np.asarray(w) + 0.0 - if w.ndim != 1: - raise TypeError("expected 1D vector for w") - if len(x) != len(w): - raise TypeError("expected x and w to have same length") - # apply weights. Don't use inplace operations as they - # can cause problems with NA. - lhs = lhs * w - rhs = rhs * w - - # set rcond - if rcond is None: - rcond = len(x)*np.finfo(x.dtype).eps - - # Determine the norms of the design matrix columns. - if issubclass(lhs.dtype.type, np.complexfloating): - scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1)) - else: - scl = np.sqrt(np.square(lhs).sum(1)) - scl[scl == 0] = 1 - - # Solve the least squares problem. - c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond) - c = (c.T/scl).T - - # Expand c to include non-fitted coefficients which are set to zero - if deg.ndim == 1: - if c.ndim == 2: - cc = np.zeros((lmax + 1, c.shape[1]), dtype=c.dtype) - else: - cc = np.zeros(lmax + 1, dtype=c.dtype) - cc[deg] = c - c = cc - - # warn on rank reduction - if rank != order and not full: - msg = "The fit may be poorly conditioned" - warnings.warn(msg, pu.RankWarning, stacklevel=2) - - if full: - return c, [resids, rank, s, rcond] - else: - return c + return pu._fit(polyvander, x, y, deg, rcond, full, w) def polycompanion(c): @@ -1602,7 +1428,8 @@ def polyroots(c): if len(c) == 2: return np.array([-c[0]/c[1]]) - m = polycompanion(c) + # rotated companion matrix reduces error + m = polycompanion(c)[::-1,::-1] r = la.eigvals(m) r.sort() return r diff --git a/numpy/polynomial/polyutils.py b/numpy/polynomial/polyutils.py index eff4a8ee1..5128e35e0 100644 --- a/numpy/polynomial/polyutils.py +++ b/numpy/polynomial/polyutils.py @@ -45,6 +45,8 @@ Functions """ from __future__ import division, absolute_import, print_function +import operator + import numpy as np __all__ = [ @@ -410,3 +412,350 @@ def mapdomain(x, old, new): x = np.asanyarray(x) off, scl = mapparms(old, new) return off + scl*x + + +def _vander2d(vander_f, x, y, deg): + """ + Helper function used to implement the ``<type>vander2d`` functions. + + Parameters + ---------- + vander_f : function(array_like, int) -> ndarray + The 1d vander function, such as ``polyvander`` + x, y, deg : + See the ``<type>vander2d`` functions for more detail + """ + degx, degy = [ + _deprecate_as_int(d, "degrees") + for d in deg + ] + x, y = np.array((x, y), copy=0) + 0.0 + + vx = vander_f(x, degx) + vy = vander_f(y, degy) + v = vx[..., None]*vy[..., None,:] + return v.reshape(v.shape[:-2] + (-1,)) + + +def _vander3d(vander_f, x, y, z, deg): + """ + Helper function used to implement the ``<type>vander3d`` functions. + + Parameters + ---------- + vander_f : function(array_like, int) -> ndarray + The 1d vander function, such as ``polyvander`` + x, y, z, deg : + See the ``<type>vander3d`` functions for more detail + """ + degx, degy, degz = [ + _deprecate_as_int(d, "degrees") + for d in deg + ] + x, y, z = np.array((x, y, z), copy=0) + 0.0 + + vx = vander_f(x, degx) + vy = vander_f(y, degy) + vz = vander_f(z, degz) + v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:] + return v.reshape(v.shape[:-3] + (-1,)) + + +def _fromroots(line_f, mul_f, roots): + """ + Helper function used to implement the ``<type>fromroots`` functions. + + Parameters + ---------- + line_f : function(float, float) -> ndarray + The ``<type>line`` function, such as ``polyline`` + mul_f : function(array_like, array_like) -> ndarray + The ``<type>mul`` function, such as ``polymul`` + roots : + See the ``<type>fromroots`` functions for more detail + """ + if len(roots) == 0: + return np.ones(1) + else: + [roots] = as_series([roots], trim=False) + roots.sort() + p = [line_f(-r, 1) for r in roots] + n = len(p) + while n > 1: + m, r = divmod(n, 2) + tmp = [mul_f(p[i], p[i+m]) for i in range(m)] + if r: + tmp[0] = mul_f(tmp[0], p[-1]) + p = tmp + n = m + return p[0] + + +def _valnd(val_f, c, *args): + """ + Helper function used to implement the ``<type>val<n>d`` functions. + + Parameters + ---------- + val_f : function(array_like, array_like, tensor: bool) -> array_like + The ``<type>val`` function, such as ``polyval`` + c, args : + See the ``<type>val<n>d`` functions for more detail + """ + try: + args = tuple(np.array(args, copy=False)) + except Exception: + # preserve the old error message + if len(args) == 2: + raise ValueError('x, y, z are incompatible') + elif len(args) == 3: + raise ValueError('x, y are incompatible') + else: + raise ValueError('ordinates are incompatible') + + it = iter(args) + x0 = next(it) + + # use tensor on only the first + c = val_f(x0, c) + for xi in it: + c = val_f(xi, c, tensor=False) + return c + + +def _gridnd(val_f, c, *args): + """ + Helper function used to implement the ``<type>grid<n>d`` functions. + + Parameters + ---------- + val_f : function(array_like, array_like, tensor: bool) -> array_like + The ``<type>val`` function, such as ``polyval`` + c, args : + See the ``<type>grid<n>d`` functions for more detail + """ + for xi in args: + c = val_f(xi, c) + return c + + +def _div(mul_f, c1, c2): + """ + Helper function used to implement the ``<type>div`` functions. + + Implementation uses repeated subtraction of c2 multiplied by the nth basis. + For some polynomial types, a more efficient approach may be possible. + + Parameters + ---------- + mul_f : function(array_like, array_like) -> array_like + The ``<type>mul`` function, such as ``polymul`` + c1, c2 : + See the ``<type>div`` functions for more detail + """ + # c1, c2 are trimmed copies + [c1, c2] = as_series([c1, c2]) + if c2[-1] == 0: + raise ZeroDivisionError() + + lc1 = len(c1) + lc2 = len(c2) + if lc1 < lc2: + return c1[:1]*0, c1 + elif lc2 == 1: + return c1/c2[-1], c1[:1]*0 + else: + quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype) + rem = c1 + for i in range(lc1 - lc2, - 1, -1): + p = mul_f([0]*i + [1], c2) + q = rem[-1]/p[-1] + rem = rem[:-1] - q*p[:-1] + quo[i] = q + return quo, trimseq(rem) + + +def _add(c1, c2): + """ Helper function used to implement the ``<type>add`` functions. """ + # c1, c2 are trimmed copies + [c1, c2] = as_series([c1, c2]) + if len(c1) > len(c2): + c1[:c2.size] += c2 + ret = c1 + else: + c2[:c1.size] += c1 + ret = c2 + return trimseq(ret) + + +def _sub(c1, c2): + """ Helper function used to implement the ``<type>sub`` functions. """ + # c1, c2 are trimmed copies + [c1, c2] = as_series([c1, c2]) + if len(c1) > len(c2): + c1[:c2.size] -= c2 + ret = c1 + else: + c2 = -c2 + c2[:c1.size] += c1 + ret = c2 + return trimseq(ret) + + +def _fit(vander_f, x, y, deg, rcond=None, full=False, w=None): + """ + Helper function used to implement the ``<type>fit`` functions. + + Parameters + ---------- + vander_f : function(array_like, int) -> ndarray + The 1d vander function, such as ``polyvander`` + c1, c2 : + See the ``<type>fit`` functions for more detail + """ + x = np.asarray(x) + 0.0 + y = np.asarray(y) + 0.0 + deg = np.asarray(deg) + + # check arguments. + if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0: + raise TypeError("deg must be an int or non-empty 1-D array of int") + if deg.min() < 0: + raise ValueError("expected deg >= 0") + if x.ndim != 1: + raise TypeError("expected 1D vector for x") + if x.size == 0: + raise TypeError("expected non-empty vector for x") + if y.ndim < 1 or y.ndim > 2: + raise TypeError("expected 1D or 2D array for y") + if len(x) != len(y): + raise TypeError("expected x and y to have same length") + + if deg.ndim == 0: + lmax = deg + order = lmax + 1 + van = vander_f(x, lmax) + else: + deg = np.sort(deg) + lmax = deg[-1] + order = len(deg) + van = vander_f(x, lmax)[:, deg] + + # set up the least squares matrices in transposed form + lhs = van.T + rhs = y.T + if w is not None: + w = np.asarray(w) + 0.0 + if w.ndim != 1: + raise TypeError("expected 1D vector for w") + if len(x) != len(w): + raise TypeError("expected x and w to have same length") + # apply weights. Don't use inplace operations as they + # can cause problems with NA. + lhs = lhs * w + rhs = rhs * w + + # set rcond + if rcond is None: + rcond = len(x)*np.finfo(x.dtype).eps + + # Determine the norms of the design matrix columns. + if issubclass(lhs.dtype.type, np.complexfloating): + scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1)) + else: + scl = np.sqrt(np.square(lhs).sum(1)) + scl[scl == 0] = 1 + + # Solve the least squares problem. + c, resids, rank, s = np.linalg.lstsq(lhs.T/scl, rhs.T, rcond) + c = (c.T/scl).T + + # Expand c to include non-fitted coefficients which are set to zero + if deg.ndim > 0: + if c.ndim == 2: + cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype) + else: + cc = np.zeros(lmax+1, dtype=c.dtype) + cc[deg] = c + c = cc + + # warn on rank reduction + if rank != order and not full: + msg = "The fit may be poorly conditioned" + warnings.warn(msg, RankWarning, stacklevel=2) + + if full: + return c, [resids, rank, s, rcond] + else: + return c + + +def _pow(mul_f, c, pow, maxpower): + """ + Helper function used to implement the ``<type>pow`` functions. + + Parameters + ---------- + vander_f : function(array_like, int) -> ndarray + The 1d vander function, such as ``polyvander`` + pow, maxpower : + See the ``<type>pow`` functions for more detail + mul_f : function(array_like, array_like) -> ndarray + The ``<type>mul`` function, such as ``polymul`` + """ + # c is a trimmed copy + [c] = as_series([c]) + power = int(pow) + if power != pow or power < 0: + raise ValueError("Power must be a non-negative integer.") + elif maxpower is not None and power > maxpower: + raise ValueError("Power is too large") + elif power == 0: + return np.array([1], dtype=c.dtype) + elif power == 1: + return c + else: + # This can be made more efficient by using powers of two + # in the usual way. + prd = c + for i in range(2, power + 1): + prd = mul_f(prd, c) + return prd + + +def _deprecate_as_int(x, desc): + """ + Like `operator.index`, but emits a deprecation warning when passed a float + + Parameters + ---------- + x : int-like, or float with integral value + Value to interpret as an integer + desc : str + description to include in any error message + + Raises + ------ + TypeError : if x is a non-integral float or non-numeric + DeprecationWarning : if x is an integral float + """ + try: + return operator.index(x) + except TypeError: + # Numpy 1.17.0, 2019-03-11 + try: + ix = int(x) + except TypeError: + pass + else: + if ix == x: + warnings.warn( + "In future, this will raise TypeError, as {} will need to " + "be an integer not just an integral float." + .format(desc), + DeprecationWarning, + stacklevel=3 + ) + return ix + + raise TypeError("{} must be an integer".format(desc)) diff --git a/numpy/polynomial/tests/test_chebyshev.py b/numpy/polynomial/tests/test_chebyshev.py index 7fb7492c6..c8d2d6dba 100644 --- a/numpy/polynomial/tests/test_chebyshev.py +++ b/numpy/polynomial/tests/test_chebyshev.py @@ -221,12 +221,12 @@ class TestIntegral(object): def test_chebint(self): # check exceptions - assert_raises(ValueError, cheb.chebint, [0], .5) + assert_raises(TypeError, cheb.chebint, [0], .5) assert_raises(ValueError, cheb.chebint, [0], -1) assert_raises(ValueError, cheb.chebint, [0], 1, [0, 0]) assert_raises(ValueError, cheb.chebint, [0], lbnd=[0]) assert_raises(ValueError, cheb.chebint, [0], scl=[0]) - assert_raises(ValueError, cheb.chebint, [0], axis=.5) + assert_raises(TypeError, cheb.chebint, [0], axis=.5) # test integration of zero polynomial for i in range(2, 5): @@ -323,7 +323,7 @@ class TestDerivative(object): def test_chebder(self): # check exceptions - assert_raises(ValueError, cheb.chebder, [0], .5) + assert_raises(TypeError, cheb.chebder, [0], .5) assert_raises(ValueError, cheb.chebder, [0], -1) # check that zeroth derivative does nothing diff --git a/numpy/polynomial/tests/test_hermite.py b/numpy/polynomial/tests/test_hermite.py index 1287ef3fe..271c1964b 100644 --- a/numpy/polynomial/tests/test_hermite.py +++ b/numpy/polynomial/tests/test_hermite.py @@ -209,12 +209,12 @@ class TestIntegral(object): def test_hermint(self): # check exceptions - assert_raises(ValueError, herm.hermint, [0], .5) + assert_raises(TypeError, herm.hermint, [0], .5) assert_raises(ValueError, herm.hermint, [0], -1) assert_raises(ValueError, herm.hermint, [0], 1, [0, 0]) assert_raises(ValueError, herm.hermint, [0], lbnd=[0]) assert_raises(ValueError, herm.hermint, [0], scl=[0]) - assert_raises(ValueError, herm.hermint, [0], axis=.5) + assert_raises(TypeError, herm.hermint, [0], axis=.5) # test integration of zero polynomial for i in range(2, 5): @@ -311,7 +311,7 @@ class TestDerivative(object): def test_hermder(self): # check exceptions - assert_raises(ValueError, herm.hermder, [0], .5) + assert_raises(TypeError, herm.hermder, [0], .5) assert_raises(ValueError, herm.hermder, [0], -1) # check that zeroth derivative does nothing diff --git a/numpy/polynomial/tests/test_hermite_e.py b/numpy/polynomial/tests/test_hermite_e.py index ccb44ad73..434b30e7b 100644 --- a/numpy/polynomial/tests/test_hermite_e.py +++ b/numpy/polynomial/tests/test_hermite_e.py @@ -209,12 +209,12 @@ class TestIntegral(object): def test_hermeint(self): # check exceptions - assert_raises(ValueError, herme.hermeint, [0], .5) + assert_raises(TypeError, herme.hermeint, [0], .5) assert_raises(ValueError, herme.hermeint, [0], -1) assert_raises(ValueError, herme.hermeint, [0], 1, [0, 0]) assert_raises(ValueError, herme.hermeint, [0], lbnd=[0]) assert_raises(ValueError, herme.hermeint, [0], scl=[0]) - assert_raises(ValueError, herme.hermeint, [0], axis=.5) + assert_raises(TypeError, herme.hermeint, [0], axis=.5) # test integration of zero polynomial for i in range(2, 5): @@ -311,7 +311,7 @@ class TestDerivative(object): def test_hermeder(self): # check exceptions - assert_raises(ValueError, herme.hermeder, [0], .5) + assert_raises(TypeError, herme.hermeder, [0], .5) assert_raises(ValueError, herme.hermeder, [0], -1) # check that zeroth derivative does nothing diff --git a/numpy/polynomial/tests/test_laguerre.py b/numpy/polynomial/tests/test_laguerre.py index 3ababec5e..4b9b28637 100644 --- a/numpy/polynomial/tests/test_laguerre.py +++ b/numpy/polynomial/tests/test_laguerre.py @@ -206,12 +206,12 @@ class TestIntegral(object): def test_lagint(self): # check exceptions - assert_raises(ValueError, lag.lagint, [0], .5) + assert_raises(TypeError, lag.lagint, [0], .5) assert_raises(ValueError, lag.lagint, [0], -1) assert_raises(ValueError, lag.lagint, [0], 1, [0, 0]) assert_raises(ValueError, lag.lagint, [0], lbnd=[0]) assert_raises(ValueError, lag.lagint, [0], scl=[0]) - assert_raises(ValueError, lag.lagint, [0], axis=.5) + assert_raises(TypeError, lag.lagint, [0], axis=.5) # test integration of zero polynomial for i in range(2, 5): @@ -308,7 +308,7 @@ class TestDerivative(object): def test_lagder(self): # check exceptions - assert_raises(ValueError, lag.lagder, [0], .5) + assert_raises(TypeError, lag.lagder, [0], .5) assert_raises(ValueError, lag.lagder, [0], -1) # check that zeroth derivative does nothing diff --git a/numpy/polynomial/tests/test_legendre.py b/numpy/polynomial/tests/test_legendre.py index a23086d59..917a7e03a 100644 --- a/numpy/polynomial/tests/test_legendre.py +++ b/numpy/polynomial/tests/test_legendre.py @@ -210,12 +210,12 @@ class TestIntegral(object): def test_legint(self): # check exceptions - assert_raises(ValueError, leg.legint, [0], .5) + assert_raises(TypeError, leg.legint, [0], .5) assert_raises(ValueError, leg.legint, [0], -1) assert_raises(ValueError, leg.legint, [0], 1, [0, 0]) assert_raises(ValueError, leg.legint, [0], lbnd=[0]) assert_raises(ValueError, leg.legint, [0], scl=[0]) - assert_raises(ValueError, leg.legint, [0], axis=.5) + assert_raises(TypeError, leg.legint, [0], axis=.5) # test integration of zero polynomial for i in range(2, 5): @@ -312,7 +312,7 @@ class TestDerivative(object): def test_legder(self): # check exceptions - assert_raises(ValueError, leg.legder, [0], .5) + assert_raises(TypeError, leg.legder, [0], .5) assert_raises(ValueError, leg.legder, [0], -1) # check that zeroth derivative does nothing diff --git a/numpy/polynomial/tests/test_polynomial.py b/numpy/polynomial/tests/test_polynomial.py index 562aa904d..08b73da15 100644 --- a/numpy/polynomial/tests/test_polynomial.py +++ b/numpy/polynomial/tests/test_polynomial.py @@ -291,12 +291,12 @@ class TestIntegral(object): def test_polyint(self): # check exceptions - assert_raises(ValueError, poly.polyint, [0], .5) + assert_raises(TypeError, poly.polyint, [0], .5) assert_raises(ValueError, poly.polyint, [0], -1) assert_raises(ValueError, poly.polyint, [0], 1, [0, 0]) assert_raises(ValueError, poly.polyint, [0], lbnd=[0]) assert_raises(ValueError, poly.polyint, [0], scl=[0]) - assert_raises(ValueError, poly.polyint, [0], axis=.5) + assert_raises(TypeError, poly.polyint, [0], axis=.5) # test integration of zero polynomial for i in range(2, 5): @@ -388,7 +388,7 @@ class TestDerivative(object): def test_polyder(self): # check exceptions - assert_raises(ValueError, poly.polyder, [0], .5) + assert_raises(TypeError, poly.polyder, [0], .5) assert_raises(ValueError, poly.polyder, [0], -1) # check that zeroth derivative does nothing diff --git a/numpy/random/mtrand/distributions.c b/numpy/random/mtrand/distributions.c index 2548a646e..de397ccfa 100644 --- a/numpy/random/mtrand/distributions.c +++ b/numpy/random/mtrand/distributions.c @@ -894,7 +894,7 @@ double rk_triangular(rk_state *state, double left, double mode, double right) return left + sqrt(U*leftprod); } else { - return right - sqrt((1.0 - U) * rightprod); + return right - sqrt((1.0 - U) * rightprod); } } diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx index ca9840055..c97c166aa 100644 --- a/numpy/random/mtrand/mtrand.pyx +++ b/numpy/random/mtrand/mtrand.pyx @@ -866,10 +866,9 @@ cdef class RandomState: """ tomaxint(size=None) - Random integers between 0 and ``sys.maxint``, inclusive. - Return a sample of uniformly distributed random integers in the interval - [0, ``sys.maxint``]. + [0, ``np.iinfo(np.int).max``]. The np.int type translates to the C long + integer type and its precision is platform dependent. Parameters ---------- @@ -891,16 +890,15 @@ cdef class RandomState: Examples -------- - >>> RS = np.random.mtrand.RandomState() # need a RandomState object - >>> RS.tomaxint((2,2,2)) - array([[[1170048599, 1600360186], + >>> rs = np.random.RandomState() # need a RandomState object + >>> rs.tomaxint((2,2,2)) + array([[[1170048599, 1600360186], # random [ 739731006, 1947757578]], [[1871712945, 752307660], [1601631370, 1479324245]]]) - >>> import sys - >>> sys.maxint + >>> np.iinfo(np.int).max 2147483647 - >>> RS.tomaxint((2,2,2)) < sys.maxint + >>> rs.tomaxint((2,2,2)) < np.iinfo(np.int).max array([[[ True, True], [ True, True]], [[ True, True], @@ -992,7 +990,7 @@ cdef class RandomState: raise ValueError("high is out of bounds for %s" % dtype) if ilow >= ihigh and np.prod(size) != 0: raise ValueError("Range cannot be empty (low >= high) unless no samples are taken") - + with self.lock: ret = randfunc(ilow, ihigh - 1, size, self.state_address) @@ -1040,7 +1038,7 @@ cdef class RandomState: .. versionadded:: 1.7.0 Parameters - ----------- + ---------- a : 1-D array-like or int If an ndarray, a random sample is generated from its elements. If an int, the random sample is generated as if a were np.arange(a) @@ -1056,12 +1054,12 @@ cdef class RandomState: entries in a. Returns - -------- + ------- samples : single item or ndarray The generated random samples Raises - ------- + ------ ValueError If a is an int and less than zero, if a or p are not 1-dimensional, if a is an array-like of size 0, if p is not a vector of @@ -1070,11 +1068,11 @@ cdef class RandomState: size See Also - --------- + -------- randint, shuffle, permutation Examples - --------- + -------- Generate a uniform random sample from np.arange(5) of size 3: >>> np.random.choice(5, 3) @@ -1170,6 +1168,9 @@ cdef class RandomState: raise ValueError("Cannot take a larger sample than " "population when 'replace=False'") + if size < 0: + raise ValueError("negative dimensions are not allowed") + if p is not None: if np.count_nonzero(p > 0) < size: raise ValueError("Fewer non-zero entries in p than size") @@ -1328,6 +1329,12 @@ cdef class RandomState: Random values in a given shape. + .. note:: + This is a convenience function for users porting code from Matlab, + and wraps `numpy.random.random_sample`. That function takes a + tuple to specify the size of the output, which is consistent with + other NumPy functions like `numpy.zeros` and `numpy.ones`. + Create an array of the given shape and populate it with random samples from a uniform distribution over ``[0, 1)``. @@ -1347,12 +1354,6 @@ cdef class RandomState: -------- random - Notes - ----- - This is a convenience function. If you want an interface that - takes a shape-tuple as the first argument, refer to - np.random.random_sample . - Examples -------- >>> np.random.rand(3,2) @@ -1372,16 +1373,17 @@ cdef class RandomState: Return a sample (or samples) from the "standard normal" distribution. - If positive, int_like or int-convertible arguments are provided, - `randn` generates an array of shape ``(d0, d1, ..., dn)``, filled - with random floats sampled from a univariate "normal" (Gaussian) - distribution of mean 0 and variance 1 (if any of the :math:`d_i` are - floats, they are first converted to integers by truncation). A single - float randomly sampled from the distribution is returned if no - argument is provided. + .. note:: + This is a convenience function for users porting code from Matlab, + and wraps `numpy.random.standard_normal`. That function takes a + tuple to specify the size of the output, which is consistent with + other NumPy functions like `numpy.zeros` and `numpy.ones`. - This is a convenience function. If you want an interface that takes a - tuple as the first argument, use `numpy.random.standard_normal` instead. + If positive int_like arguments are provided, `randn` generates an array + of shape ``(d0, d1, ..., dn)``, filled + with random floats sampled from a univariate "normal" (Gaussian) + distribution of mean 0 and variance 1. A single float randomly sampled + from the distribution is returned if no argument is provided. Parameters ---------- @@ -1399,6 +1401,7 @@ cdef class RandomState: See Also -------- standard_normal : Similar, but takes a tuple as its argument. + normal : Also accepts mu and sigma arguments. Notes ----- @@ -1409,13 +1412,13 @@ cdef class RandomState: Examples -------- >>> np.random.randn() - 2.1923875335537315 #random + 2.1923875335537315 # random Two-by-four array of samples from N(3, 6.25): - >>> 2.5 * np.random.randn(2, 4) + 3 - array([[-4.49401501, 4.00950034, -1.81814867, 7.29718677], #random - [ 0.39924804, 4.68456316, 4.99394529, 4.84057254]]) #random + >>> 3 + 2.5 * np.random.randn(2, 4) + array([[-4.49401501, 4.00950034, -1.81814867, 7.29718677], # random + [ 0.39924804, 4.68456316, 4.99394529, 4.84057254]]) # random """ if len(args) == 0: @@ -1432,8 +1435,8 @@ cdef class RandomState: Return random integers of type np.int from the "discrete uniform" distribution in the closed interval [`low`, `high`]. If `high` is None (the default), then results are from [1, `low`]. The np.int - type translates to the C long type used by Python 2 for "short" - integers and its precision is platform dependent. + type translates to the C long integer type and its precision + is platform dependent. This function has been deprecated. Use randint instead. @@ -1536,20 +1539,43 @@ cdef class RandomState: Returns ------- out : float or ndarray - Drawn samples. + A floating-point array of shape ``size`` of drawn samples, or a + single sample if ``size`` was not specified. + + Notes + ----- + For random samples from :math:`N(\\mu, \\sigma^2)`, use one of:: + + mu + sigma * np.random.standard_normal(size=...) + np.random.normal(mu, sigma, size=...) + + See Also + -------- + normal : + Equivalent function with additional ``loc`` and ``scale`` arguments + for setting the mean and standard deviation. Examples -------- + >>> np.random.standard_normal() + 2.1923875335537315 #random + >>> s = np.random.standard_normal(8000) >>> s - array([ 0.6888893 , 0.78096262, -0.89086505, ..., 0.49876311, #random - -0.38672696, -0.4685006 ]) #random + array([ 0.6888893 , 0.78096262, -0.89086505, ..., 0.49876311, # random + -0.38672696, -0.4685006 ]) # random >>> s.shape (8000,) >>> s = np.random.standard_normal(size=(3, 4, 2)) >>> s.shape (3, 4, 2) + Two-by-four array of samples from :math:`N(3, 6.25)`: + + >>> 3 + 2.5 * np.random.standard_normal(size=(2, 4)) + array([[-4.49401501, 4.00950034, -1.81814867, 7.29718677], # random + [ 0.39924804, 4.68456316, 4.99394529, 4.84057254]]) # random + """ return cont0_array(self.internal_state, rk_gauss, size, self.lock) @@ -1626,11 +1652,11 @@ cdef class RandomState: Verify the mean and the variance: - >>> abs(mu - np.mean(s)) < 0.01 - True + >>> abs(mu - np.mean(s)) + 0.0 # may vary - >>> abs(sigma - np.std(s, ddof=1)) < 0.01 - True + >>> abs(sigma - np.std(s, ddof=1)) + 0.1 # may vary Display the histogram of the samples, along with the probability density function: @@ -1642,6 +1668,12 @@ cdef class RandomState: ... linewidth=2, color='r') >>> plt.show() + Two-by-four array of samples from N(3, 6.25): + + >>> np.random.normal(3, 2.5, size=(2, 4)) + array([[-4.49401501, 4.00950034, -1.81814867, 7.29718677], # random + [ 0.39924804, 4.68456316, 4.99394529, 4.84057254]]) # random + """ cdef ndarray oloc, oscale cdef double floc, fscale @@ -1675,7 +1707,7 @@ cdef class RandomState: .. math:: f(x; a,b) = \\frac{1}{B(\\alpha, \\beta)} x^{\\alpha - 1} (1 - x)^{\\beta - 1}, - where the normalisation, B, is the beta function, + where the normalization, B, is the beta function, .. math:: B(\\alpha, \\beta) = \\int_0^1 t^{\\alpha - 1} (1 - t)^{\\beta - 1} dt. @@ -2296,7 +2328,7 @@ cdef class RandomState: Draw samples from a noncentral chi-square distribution. - The noncentral :math:`\\chi^2` distribution is a generalisation of + The noncentral :math:`\\chi^2` distribution is a generalization of the :math:`\\chi^2` distribution. Parameters @@ -2326,7 +2358,7 @@ cdef class RandomState: .. math:: P(x;df,nonc) = \\sum^{\\infty}_{i=0} \\frac{e^{-nonc/2}(nonc/2)^{i}}{i!} - \\P_{Y_{df+2i}}(x), + P_{Y_{df+2i}}(x), where :math:`Y_{q}` is the Chi-square with q degrees of freedom. @@ -2362,6 +2394,7 @@ cdef class RandomState: >>> values = plt.hist(np.random.noncentral_chisquare(3, 20, 100000), ... bins=200, density=True) >>> plt.show() + """ cdef ndarray odf, ononc cdef double fdf, fnonc @@ -2888,7 +2921,7 @@ cdef class RandomState: Parameters ---------- a : float or array_like of floats - Parameter of the distribution. Should be greater than zero. + Parameter of the distribution. Must be non-negative. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then ``m * n * k`` samples are drawn. If size is ``None`` (default), @@ -3401,7 +3434,7 @@ cdef class RandomState: >>> # values, drawn from a normal distribution. >>> b = [] >>> for i in range(1000): - ... a = 10. + np.random.random(100) + ... a = 10. + np.random.standard_normal(100) ... b.append(np.product(a)) >>> b = np.array(b) / np.min(b) # scale values to be positive @@ -3746,8 +3779,8 @@ cdef class RandomState: product p*n <=5, where p = population proportion estimate, and n = number of samples, in which case the binomial distribution is used instead. For example, a sample of 15 people shows 4 who are left - handed, and 11 who are right handed. Then p = 4/15 = 27%. 0.27*15 = 4, - so the binomial distribution should be used in this case. + handed, and 11 who are right handed. Then p = 4/15 = 27%. Since + 0.27*15 = 4, the binomial distribution should be used in this case. References ---------- @@ -4019,7 +4052,7 @@ cdef class RandomState: Parameters ---------- a : float or array_like of floats - Distribution parameter. Should be greater than 1. + Distribution parameter. Must be greater than 1. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then ``m * n * k`` samples are drawn. If size is ``None`` (default), @@ -4170,9 +4203,9 @@ cdef class RandomState: Draw samples from a Hypergeometric distribution. Samples are drawn from a hypergeometric distribution with specified - parameters, ngood (ways to make a good selection), nbad (ways to make - a bad selection), and nsample = number of items sampled, which is less - than or equal to the sum ngood + nbad. + parameters, `ngood` (ways to make a good selection), `nbad` (ways to make + a bad selection), and `nsample` (number of items sampled, which is less + than or equal to the sum ``ngood + nbad``). Parameters ---------- @@ -4186,14 +4219,16 @@ cdef class RandomState: size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then ``m * n * k`` samples are drawn. If size is ``None`` (default), - a single value is returned if ``ngood``, ``nbad``, and ``nsample`` + a single value is returned if `ngood`, `nbad`, and `nsample` are all scalars. Otherwise, ``np.broadcast(ngood, nbad, nsample).size`` samples are drawn. Returns ------- out : ndarray or scalar - Drawn samples from the parameterized hypergeometric distribution. + Drawn samples from the parameterized hypergeometric distribution. Each + sample is the number of good items within a randomly selected subset of + size `nsample` taken from a set of `ngood` good items and `nbad` bad items. See Also -------- @@ -4208,11 +4243,11 @@ cdef class RandomState: where :math:`0 \\le x \\le n` and :math:`n-b \\le x \\le g` - for P(x) the probability of x successes, g = ngood, b = nbad, and - n = number of samples. + for P(x) the probability of ``x`` good results in the drawn sample, + g = `ngood`, b = `nbad`, and n = `nsample`. - Consider an urn with black and white marbles in it, ngood of them - black and nbad are white. If you draw nsample balls without + Consider an urn with black and white marbles in it, `ngood` of them + are black and `nbad` are white. If you draw `nsample` balls without replacement, then the hypergeometric distribution describes the distribution of black balls in the drawn sample. @@ -4387,7 +4422,7 @@ cdef class RandomState: def multivariate_normal(self, mean, cov, size=None, check_valid='warn', tol=1e-8): """ - multivariate_normal(mean, cov[, size, check_valid, tol]) + multivariate_normal(mean, cov, size=None, check_valid='warn', tol=1e-8) Draw random samples from a multivariate normal distribution. @@ -4414,6 +4449,7 @@ cdef class RandomState: Behavior when the covariance matrix is not positive semidefinite. tol : float, optional Tolerance when checking the singular values in covariance matrix. + cov is cast to double before the check. Returns ------- @@ -4525,6 +4561,8 @@ cdef class RandomState: # not zero. We continue to use the SVD rather than Cholesky in # order to preserve current outputs. + # GH10839, ensure double to make tol meaningful + cov = cov.astype(np.double) (u, s, v) = svd(cov) if check_valid != 'ignore': @@ -4552,7 +4590,7 @@ cdef class RandomState: Draw samples from a multinomial distribution. - The multinomial distribution is a multivariate generalisation of the + The multinomial distribution is a multivariate generalization of the binomial distribution. Take an experiment with one of ``p`` possible outcomes. An example of such an experiment is throwing a dice, where the outcome can be 1 through 6. Each sample drawn from the @@ -4668,7 +4706,7 @@ cdef class RandomState: Draw `size` samples of dimension k from a Dirichlet distribution. A Dirichlet-distributed random variable can be seen as a multivariate generalization of a Beta distribution. The Dirichlet distribution - is a conjugate prior of a multinomial distribution in Bayesian + is a conjugate prior of a multinomial distribution in Bayesian inference. Parameters @@ -4693,23 +4731,22 @@ cdef class RandomState: Notes ----- - - The Dirichlet distribution is a distribution over vectors - :math:`x` that fulfil the conditions :math:`x_i>0` and + The Dirichlet distribution is a distribution over vectors + :math:`x` that fulfil the conditions :math:`x_i>0` and :math:`\\sum_{i=1}^k x_i = 1`. - The probability density function :math:`p` of a - Dirichlet-distributed random vector :math:`X` is + The probability density function :math:`p` of a + Dirichlet-distributed random vector :math:`X` is proportional to .. math:: p(x) \\propto \\prod_{i=1}^{k}{x^{\\alpha_i-1}_i}, - where :math:`\\alpha` is a vector containing the positive + where :math:`\\alpha` is a vector containing the positive concentration parameters. The method uses the following property for computation: let :math:`Y` - be a random vector which has components that follow a standard gamma - distribution, then :math:`X = \\frac{1}{\\sum_{i=1}^k{Y_i}} Y` + be a random vector which has components that follow a standard gamma + distribution, then :math:`X = \\frac{1}{\\sum_{i=1}^k{Y_i}} Y` is Dirichlet-distributed References @@ -4924,7 +4961,7 @@ cdef class RandomState: return arr arr = np.asarray(x) - + # shuffle has fast-path for 1-d if arr.ndim == 1: # Return a copy if same memory @@ -4937,7 +4974,7 @@ cdef class RandomState: idx = np.arange(arr.shape[0], dtype=np.intp) self.shuffle(idx) return arr[idx] - + _rand = RandomState() seed = _rand.seed diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py index 656e38dc8..711270072 100644 --- a/numpy/random/tests/test_random.py +++ b/numpy/random/tests/test_random.py @@ -400,6 +400,10 @@ class TestRandomDist(object): assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1]) assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4]) assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False) + # gh-13087 + assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False) + assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False) + assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False) assert_raises(ValueError, sample, [1, 2, 3], 2, replace=False, p=[1, 0, 0]) @@ -717,6 +721,12 @@ class TestRandomDist(object): assert_raises(ValueError, np.random.multivariate_normal, mean, cov, check_valid='raise') + cov = np.array([[1, 0.1],[0.1, 1]], dtype=np.float32) + with suppress_warnings() as sup: + np.random.multivariate_normal(mean, cov) + w = sup.record(RuntimeWarning) + assert len(w) == 0 + def test_negative_binomial(self): np.random.seed(self.seed) actual = np.random.negative_binomial(n=100, p=.12345, size=(3, 2)) diff --git a/runtests.py b/runtests.py index 42866714f..23245aeac 100755 --- a/runtests.py +++ b/runtests.py @@ -74,7 +74,7 @@ def main(argv): parser.add_argument("--doctests", action="store_true", default=False, help="Run doctests in module") parser.add_argument("--refguide-check", action="store_true", default=False, - help="Run refguide check (do not run regular tests.)") + help="Run refguide (doctest) check (do not run regular tests.)") parser.add_argument("--coverage", action="store_true", default=False, help=("report coverage of project code. HTML output goes " "under build/coverage")) @@ -259,8 +259,6 @@ def main(argv): ret = subprocess.call(cmd, cwd=os.path.join(ROOT_DIR, 'benchmarks')) sys.exit(ret) - test_dir = os.path.join(ROOT_DIR, 'build', 'test') - if args.build_only: sys.exit(0) else: diff --git a/shippable.yml b/shippable.yml index 82ee9461f..75d2dcafe 100644 --- a/shippable.yml +++ b/shippable.yml @@ -22,7 +22,15 @@ runtime: build: ci: # install dependencies - - sudo apt-get install gcc gfortran libblas-dev liblapack-dev + - sudo apt-get install gcc gfortran + # ARMv8 OpenBLAS built using script available here: + # https://github.com/tylerjereddy/openblas-static-gcc/tree/master/ARMv8 + # build done on GCC compile farm machine named gcc115 + # tarball uploaded manually to an unshared Dropbox location + - wget -O openblas-v0.3.5-armv8.tar.gz https://www.dropbox.com/s/pbqkxzlmih4cky1/openblas-v0.3.5-armv8.tar.gz?dl=0 + - tar zxvf openblas-v0.3.5-armv8.tar.gz + - sudo cp -r ./64/lib/* /usr/lib + - sudo cp ./64/include/* /usr/include # add pathlib for Python 2, otherwise many tests are skipped - pip install --upgrade pip # we will pay the ~13 minute cost of compiling Cython only when a new diff --git a/tools/npy_tempita/_looper.py b/tools/npy_tempita/_looper.py index dcb206642..047bf5292 100644 --- a/tools/npy_tempita/_looper.py +++ b/tools/npy_tempita/_looper.py @@ -77,53 +77,53 @@ class loop_pos(object): return '<loop pos=%r at %r>' % ( self.seq[self.pos], self.pos) + @property def index(self): return self.pos - index = property(index) + @property def number(self): return self.pos + 1 - number = property(number) + @property def item(self): return self.seq[self.pos] - item = property(item) + @property def __next__(self): try: return self.seq[self.pos + 1] except IndexError: return None - __next__ = property(__next__) if sys.version < "3": next = __next__ + @property def previous(self): if self.pos == 0: return None return self.seq[self.pos - 1] - previous = property(previous) + @property def odd(self): return not self.pos % 2 - odd = property(odd) + @property def even(self): return self.pos % 2 - even = property(even) + @property def first(self): return self.pos == 0 - first = property(first) + @property def last(self): return self.pos == len(self.seq) - 1 - last = property(last) + @property def length(self): return len(self.seq) - length = property(length) def first_group(self, getter=None): """ diff --git a/tools/refguide_check.py b/tools/refguide_check.py index 531eeacb0..74dbad78b 100644 --- a/tools/refguide_check.py +++ b/tools/refguide_check.py @@ -311,7 +311,7 @@ def validate_rst_syntax(text, name, dots=True): return False, "ERROR: %s: no documentation" % (name,) ok_unknown_items = set([ - 'mod', 'currentmodule', 'autosummary', 'data', + 'mod', 'currentmodule', 'autosummary', 'data', 'attr', 'obj', 'versionadded', 'versionchanged', 'module', 'class', 'ref', 'func', 'toctree', 'moduleauthor', 'term', 'c:member', 'sectionauthor', 'codeauthor', 'eq', 'doi', 'DOI', 'arXiv', 'arxiv' diff --git a/tools/travis-before-install.sh b/tools/travis-before-install.sh index c334e91ae..db1f0bc5c 100755 --- a/tools/travis-before-install.sh +++ b/tools/travis-before-install.sh @@ -25,6 +25,17 @@ if [ -n "$INSTALL_PICKLE5" ]; then pip install pickle5 fi +if [ -n "$PPC64_LE" ]; then + # build script for POWER8 OpenBLAS available here: + # https://github.com/tylerjereddy/openblas-static-gcc/blob/master/power8 + # built on GCC compile farm machine named gcc112 + # manually uploaded tarball to an unshared Dropbox location + wget -O openblas-power8.tar.gz https://www.dropbox.com/s/zcwhk7c2zptwy0s/openblas-v0.3.5-ppc64le-power8.tar.gz?dl=0 + tar zxvf openblas-power8.tar.gz + sudo cp -r ./64/lib/* /usr/lib + sudo cp ./64/include/* /usr/include +fi + pip install --upgrade pip setuptools pip install nose pytz cython pytest if [ -n "$USE_ASV" ]; then pip install asv; fi |
