diff options
122 files changed, 6841 insertions, 4707 deletions
diff --git a/.appveyor.yml b/.appveyor.yml index 01440c6a0..079496d93 100644 --- a/.appveyor.yml +++ b/.appveyor.yml @@ -30,11 +30,6 @@ environment: PYTHON_ARCH: 32 TEST_MODE: fast - - PYTHON: C:\Python27-x64 - PYTHON_VERSION: 2.7 - PYTHON_ARCH: 64 - TEST_MODE: fast - - PYTHON: C:\Python36-x64 PYTHON_VERSION: 3.6 PYTHON_ARCH: 64 @@ -91,7 +86,7 @@ install: $clnt.DownloadFile($env:OPENBLAS, $file) Get-FileHash $file | Format-List - Expand-Archive $file $tmpdir + Expand-Archive $file $tmpdir rm $tmpdir\$env:PYTHON_ARCH\lib\*.dll.a $lib = ls $tmpdir\$env:PYTHON_ARCH\lib\*.a | ForEach { ls $_ } | Select-Object -first 1 diff --git a/.ctags.d b/.ctags.d new file mode 100644 index 000000000..60f7d6c65 --- /dev/null +++ b/.ctags.d @@ -0,0 +1 @@ +--langmaps=c:+.src diff --git a/.gitignore b/.gitignore index 0a1e1909f..c2eddb84a 100644 --- a/.gitignore +++ b/.gitignore @@ -124,6 +124,10 @@ numpy/core/include/numpy/config.h numpy/core/include/numpy/multiarray_api.txt numpy/core/include/numpy/ufunc_api.txt numpy/core/lib/ +numpy/core/src/common/npy_binsearch.h +numpy/core/src/common/npy_partition.h +numpy/core/src/common/npy_sort.h +numpy/core/src/common/templ_common.h numpy/core/src/multiarray/_multiarray_tests.c numpy/core/src/multiarray/arraytypes.c numpy/core/src/multiarray/einsum.c @@ -150,6 +154,7 @@ numpy/core/src/umath/_umath_tests.c numpy/core/src/umath/scalarmath.c numpy/core/src/umath/funcs.inc numpy/core/src/umath/loops.[ch] +numpy/core/src/umath/matmul.[ch] numpy/core/src/umath/operand_flag_tests.c numpy/core/src/umath/simd.inc numpy/core/src/umath/struct_ufunc_test.c @@ -12,3 +12,7 @@ extraction: python_setup: requirements: - cython>=0.29 + cpp: + index: + build_command: + - python3 setup.py build diff --git a/.travis.yml b/.travis.yml index 491fcefea..23c731f83 100644 --- a/.travis.yml +++ b/.travis.yml @@ -31,7 +31,6 @@ env: ahp7Qnm0rWRmA0z9SomuRUQOJQ6s684vU=" python: - - 2.7 - 3.5 - 3.6 matrix: @@ -40,15 +39,6 @@ matrix: dist: xenial # Required for Python 3.7 sudo: true # travis-ci/travis-ci#9069 env: INSTALL_PICKLE5=1 - - python: 3.6 - env: USE_CHROOT=1 ARCH=i386 DIST=bionic - sudo: true - addons: - apt: - update: true - packages: - - dpkg - - debootstrap - python: 3.5 dist: xenial # Required for python3.5-dbg sudo: true # travis-ci/travis-ci#9069 @@ -63,8 +53,6 @@ matrix: - python3-setuptools - python: 3.6 env: USE_WHEEL=1 RUN_FULL_TESTS=1 RUN_COVERAGE=1 INSTALL_PICKLE5=1 - - python: 2.7 - env: USE_WHEEL=1 RUN_FULL_TESTS=1 PYTHON_OPTS="-3 -OO" - python: 3.6 env: USE_SDIST=1 - python: 3.6 diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 812315bc2..6e7975039 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -6,7 +6,7 @@ trigger: - master - maintenance/* jobs: -- job: Linux_Python_36_32bit_full +- job: Linux_Python_36_32bit_full_with_asserts pool: vmIMage: 'ubuntu-16.04' steps: @@ -17,14 +17,22 @@ jobs: apt-get -y install python3.6-dev python3-pip locales && \ locale-gen fr_FR && update-locale && \ pip3 install setuptools nose cython==0.29.0 pytest pytz pickle5 && \ - apt-get -y install libopenblas-dev gfortran && \ + apt-get -y install gfortran-5 wget && \ + cd .. && \ + mkdir openblas && cd openblas && \ + wget https://3f23b170c54c2533c070-1c8a9b3114517dc5fe17b7c3f8c63a43.ssl.cf2.rackcdn.com/openblas-v0.3.4-manylinux1_i686.tar.gz && \ + tar zxvf openblas-v0.3.4-manylinux1_i686.tar.gz && \ + cp -r ./usr/local/lib/* /usr/lib && \ + cp ./usr/local/include/* /usr/include && \ + cd ../numpy && \ NUMPY_EXPERIMENTAL_ARRAY_FUNCTION=1 \ - python3 runtests.py --mode=full -- -rsx --junitxml=junit/test-results.xml" + F77=gfortran-5 F90=gfortran-5 \ + CFLAGS=-UNDEBUG python3 runtests.py --mode=full -- -rsx --junitxml=junit/test-results.xml" displayName: 'Run 32-bit Ubuntu Docker Build / Tests' - task: PublishTestResults@2 inputs: testResultsFiles: '**/test-*.xml' - testRunTitle: 'Publish test results for Python 3.6-32 bit' + testRunTitle: 'Publish test results for Python 3.6-32 bit full Linux' - job: macOS pool: # NOTE: at time of writing, there is a danger @@ -56,7 +64,7 @@ jobs: displayName: 'make gfortran available on mac os vm' - script: python -m pip install --upgrade pip setuptools wheel displayName: 'Install tools' - - script: python -m pip install cython nose pytz pytest pickle5 vulture + - script: python -m pip install cython nose pytz pytest pickle5 vulture docutils sphinx numpydoc matplotlib displayName: 'Install dependencies; some are optional to avoid test skips' - script: /bin/bash -c "! vulture . --min-confidence 100 --exclude doc/,numpy/distutils/ | grep 'unreachable'" displayName: 'Check for unreachable code paths in Python modules' @@ -76,12 +84,14 @@ jobs: ATLAS: None ACCELERATE: None CC: /usr/bin/clang + - script: python runtests.py -g --refguide-check + displayName: 'Run Refuide Check' - script: python runtests.py --mode=full -- -rsx --junitxml=junit/test-results.xml displayName: 'Run Full NumPy Test Suite' - task: PublishTestResults@2 inputs: testResultsFiles: '**/test-*.xml' - testRunTitle: 'Publish test results for Python $(python.version)' + testRunTitle: 'Publish test results for Python 3.6 64-bit full Mac OS' - job: Windows pool: vmIMage: 'VS2017-Win2016' @@ -105,12 +115,6 @@ jobs: TEST_MODE: fast OPENBLAS: $(OPENBLAS_32) BITS: 32 - Python27-64bit-fast: - PYTHON_VERSION: '2.7' - PYTHON_ARCH: 'x64' - TEST_MODE: fast - OPENBLAS: $(OPENBLAS_64) - BITS: 64 Python35-64bit-full: PYTHON_VERSION: '3.5' PYTHON_ARCH: 'x64' @@ -137,14 +141,6 @@ jobs: versionSpec: $(PYTHON_VERSION) addToPath: true architecture: $(PYTHON_ARCH) - # as noted by numba project, currently need - # specific VC install for Python 2.7 - - powershell: | - $wc = New-Object net.webclient - $wc.Downloadfile("https://download.microsoft.com/download/7/9/6/796EF2E4-801B-4FC4-AB28-B59FBF6D907B/VCForPython27.msi", "VCForPython27.msi") - Start-Process "VCForPython27.msi" /qn -Wait - displayName: 'Install VC 9.0' - condition: eq(variables['PYTHON_VERSION'], '2.7') - script: python -m pip install --upgrade pip setuptools wheel displayName: 'Install tools' - powershell: | @@ -187,4 +183,4 @@ jobs: - task: PublishTestResults@2 inputs: testResultsFiles: '**/test-*.xml' - testRunTitle: 'Publish test results for Python $(python.version)' + testRunTitle: 'Publish test results for Python $(PYTHON_VERSION) $(BITS)-bit $(TEST_MODE) Windows' diff --git a/benchmarks/benchmarks/bench_lib.py b/benchmarks/benchmarks/bench_lib.py index e6c91a27c..7a6b3f01d 100644 --- a/benchmarks/benchmarks/bench_lib.py +++ b/benchmarks/benchmarks/bench_lib.py @@ -9,20 +9,48 @@ import numpy as np class Pad(Benchmark): - """Benchmarks for `numpy.pad`.""" + """Benchmarks for `numpy.pad`. + + When benchmarking the pad function it is useful to cover scenarios where + the ratio between the size of the input array and the output array differs + significantly (original area vs. padded area). This allows to evaluate for + which scenario a padding algorithm is optimized. Furthermore involving + large range of array sizes ensures that the effects of CPU-bound caching is + visible. + + The table below shows the sizes of the arrays involved in this benchmark: + + +-----------------+----------+-----------+-----------+-----------------+ + | shape | original | padded: 1 | padded: 8 | padded: (0, 32) | + +=================+==========+===========+===========+=================+ + | (2 ** 22,) | 32 MiB | 32.0 MiB | 32.0 MiB | 32.0 MiB | + +-----------------+----------+-----------+-----------+-----------------+ + | (1024, 1024) | 8 MiB | 8.03 MiB | 8.25 MiB | 8.51 MiB | + +-----------------+----------+-----------+-----------+-----------------+ + | (256, 256, 1) | 256 KiB | 786 KiB | 5.08 MiB | 11.6 MiB | + +-----------------+----------+-----------+-----------+-----------------+ + | (4, 4, 4, 4) | 2 KiB | 10.1 KiB | 1.22 MiB | 12.8 MiB | + +-----------------+----------+-----------+-----------+-----------------+ + | (1, 1, 1, 1, 1) | 8 B | 1.90 MiB | 10.8 MiB | 299 MiB | + +-----------------+----------+-----------+-----------+-----------------+ + """ param_names = ["shape", "pad_width", "mode"] params = [ - [(1000,), (10, 100), (10, 10, 10)], - [1, 3, (0, 5)], + # Shape of the input arrays + [(2 ** 22,), (1024, 1024), (256, 128, 1), + (4, 4, 4, 4), (1, 1, 1, 1, 1)], + # Tested pad widths + [1, 8, (0, 32)], + # Tested modes: mean, median, minimum & maximum use the same code path + # reflect & symmetric share a lot of their code path ["constant", "edge", "linear_ramp", "mean", "reflect", "wrap"], ] def setup(self, shape, pad_width, mode): - # avoid np.zeros or np.empty's lazy allocation. - # np.full causes pagefaults to occur during setup - # instead of during the benchmark - self.array = np.full(shape, 0) + # Make sure to fill the array to make the OS page fault + # in the setup phase and not the timed phase + self.array = np.full(shape, fill_value=1, dtype=np.float64) def time_pad(self, shape, pad_width, mode): np.pad(self.array, pad_width, mode) diff --git a/benchmarks/benchmarks/bench_overrides.py b/benchmarks/benchmarks/bench_overrides.py index 2cb94c95c..58572d07d 100644 --- a/benchmarks/benchmarks/bench_overrides.py +++ b/benchmarks/benchmarks/bench_overrides.py @@ -2,7 +2,15 @@ from __future__ import absolute_import, division, print_function from .common import Benchmark -from numpy.core.overrides import array_function_dispatch +try: + from numpy.core.overrides import array_function_dispatch +except ImportError: + # Don't fail at import time with old Numpy versions + def array_function_dispatch(*args, **kwargs): + def wrap(*args, **kwargs): + return None + return wrap + import numpy as np @@ -16,10 +24,10 @@ def mock_broadcast_to(array, shape, subok=False): def _concatenate_dispatcher(arrays, axis=None, out=None): - for array in arrays: - yield array if out is not None: - yield out + arrays = list(arrays) + arrays.append(out) + return arrays @array_function_dispatch(_concatenate_dispatcher) diff --git a/doc/Makefile b/doc/Makefile index 667dbef29..d61d115f0 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -1,7 +1,7 @@ # Makefile for Sphinx documentation # -PYVER = 3.6 +PYVER = 3 PYTHON = python$(PYVER) # You can set these variables from the command line. diff --git a/doc/RELEASE_WALKTHROUGH.rst.txt b/doc/RELEASE_WALKTHROUGH.rst.txt index 960bb3f3e..79a296ffe 100644 --- a/doc/RELEASE_WALKTHROUGH.rst.txt +++ b/doc/RELEASE_WALKTHROUGH.rst.txt @@ -6,6 +6,11 @@ replace 1.14.5 by the correct version. Release Walkthrough ==================== +Note that in the code snippets below, ``upstream`` refers to the root repository on +github and ``origin`` to a fork in your personal account. You may need to make adjustments +if you have not forked the repository but simply cloned it locally. You can +also edit ``.git/config`` and add ``upstream`` if it isn't already present. + Backport Pull Requests ---------------------- @@ -55,7 +60,7 @@ Edit pavement.py and setup.py as detailed in HOWTO_RELEASE:: Sanity check:: - $ python runtests.py -m "full" + $ python runtests.py -m "full" # NumPy < 1.17 only $ python3 runtests.py -m "full" Push this release directly onto the end of the maintenance branch. This @@ -86,7 +91,7 @@ commit. This can take a while. The numpy-wheels repository is cloned from may have been accessed and changed by someone else and a push will fail:: $ cd ../numpy-wheels - $ git pull origin master + $ git pull upstream master $ git branch <new version> # only when starting new numpy version $ git checkout v1.14.x # v1.14.x already existed for the 1.14.4 release @@ -96,7 +101,7 @@ above for ``BUILD_COMMIT``, see the _example from `v1.14.3`:: $ gvim .travis.yml .appveyor.yml $ git commit -a - $ git push origin HEAD + $ git push upstream HEAD Now wait. If you get nervous at the amount of time taken -- the builds can take several hours-- you can check the build progress by following the links @@ -121,7 +126,7 @@ download all the wheels to the ``../numpy/release/installers`` directory and upload later using ``twine``:: $ cd ../terryfy - $ git pull origin master + $ git pull upstream master $ CDN_URL=https://3f23b170c54c2533c070-1c8a9b3114517dc5fe17b7c3f8c63a43.ssl.cf2.rackcdn.com $ NPY_WHLS=../numpy/release/installers $ ./wheel-uploader -u $CDN_URL -n -v -w $NPY_WHLS -t win numpy 1.14.5 @@ -135,7 +140,7 @@ Generate the README files ------------------------- This needs to be done after all installers are present, but before the pavement -file is updated for continued development. +file is updated for continued development:: $ cd ../numpy $ paver write_release @@ -158,15 +163,15 @@ push the tag upstream:: $ git push upstream v1.14.5 -We wait until this point to push the tag because it is very difficult to change -the tag after it has been pushed. +We wait until this point to push the tag because it is public and should not +be changed after it has been pushed. Reset the maintenance branch into a development state ----------------------------------------------------- Add another ``REL`` commit to the numpy maintenance branch, which resets the -``ISREALEASED`` flag to ``False`` and increments the version counter:: +``ISREALEASED`` flag to ``False`` and increments the version counter.:: $ gvim pavement.py setup.py $ git commit -a -m"REL: prepare 1.14.x for further development" @@ -177,7 +182,7 @@ Upload to PyPI -------------- Upload to PyPI using ``twine``. A recent version of ``twine`` of is needed -after recent PyPI changes, version ``1.11.0`` was used here. :: +after recent PyPI changes, version ``1.11.0`` was used here.:: $ cd ../numpy $ twine upload release/installers/*.whl @@ -251,8 +256,9 @@ Announce to mailing lists The release should be announced on the numpy-discussion, scipy-devel, scipy-user, and python-announce-list mailing lists. Look at previous -announcements for the basic template. The contributor and PR lists -are the same as generated for the release notes above. +announcements for the basic template. The contributor and PR lists are the same +as generated for the release notes above. If you crosspost, make sure that +python-announce-list is BCC so that replies will not be sent to that list. Post-Release Tasks diff --git a/doc/neps/nep-0018-array-function-protocol.rst b/doc/neps/nep-0018-array-function-protocol.rst index 988c9086a..ffe780c79 100644 --- a/doc/neps/nep-0018-array-function-protocol.rst +++ b/doc/neps/nep-0018-array-function-protocol.rst @@ -340,7 +340,7 @@ Changes within NumPy functions ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Given a function defining the above behavior, for now call it -``array_function_implementation_or_override``, we now need to call that +``implement_array_function``, we now need to call that function from within every relevant NumPy function. This is a pervasive change, but of fairly simple and innocuous code that should complete quickly and without effect if no arguments implement the ``__array_function__`` @@ -358,7 +358,7 @@ functions: @functools.wraps(implementation) def public_api(*args, **kwargs): relevant_args = dispatcher(*args, **kwargs) - return array_function_implementation_or_override( + return implement_array_function( implementation, public_api, relevant_args, args, kwargs) return public_api return decorator @@ -395,11 +395,11 @@ It's particularly worth calling out the decorator's use of In a few cases, it would not make sense to use the ``array_function_dispatch`` decorator directly, but override implementation in terms of -``array_function_implementation_or_override`` should still be straightforward. +``implement_array_function`` should still be straightforward. - Functions written entirely in C (e.g., ``np.concatenate``) can't use decorators, but they could still use a C equivalent of - ``array_function_implementation_or_override``. If performance is not a + ``implement_array_function``. If performance is not a concern, they could also be easily wrapped with a small Python wrapper. - ``np.einsum`` does complicated argument parsing to handle two different function signatures. It would probably be best to avoid the overhead of @@ -475,7 +475,7 @@ the difference in speed between the ``ndarray.sum()`` method (1.6 us) and ``numpy.sum()`` function (2.6 us). Fortunately, we expect significantly less overhead with a C implementation of -``array_function_implementation_or_override``, which is where the bulk of the +``implement_array_function``, which is where the bulk of the runtime is. This would leave the ``array_function_dispatch`` decorator and dispatcher function on their own adding about 0.5 microseconds of overhead, for perhaps ~1 microsecond of overhead in the typical case. @@ -503,7 +503,7 @@ already wrap a limited subset of SciPy functionality (e.g., If we want to do this, we should expose at least the decorator ``array_function_dispatch()`` and possibly also the lower level -``array_function_implementation_or_override()`` as part of NumPy's public API. +``implement_array_function()`` as part of NumPy's public API. Non-goals --------- @@ -807,7 +807,7 @@ public API. ``types`` is included because we can compute it almost for free as part of collecting ``__array_function__`` implementations to call in -``array_function_implementation_or_override``. We also think it will be used +``implement_array_function``. We also think it will be used by many ``__array_function__`` methods, which otherwise would need to extract this information themselves. It would be equivalently easy to provide single instances of each type, but providing only types seemed cleaner. @@ -823,7 +823,7 @@ There are two other arguments that we think *might* be important to pass to - Access to the non-dispatched implementation (i.e., before wrapping with ``array_function_dispatch``) in ``ndarray.__array_function__`` would allow us to drop special case logic for that method from - ``array_function_implementation_or_override``. + ``implement_array_function``. - Access to the ``dispatcher`` function passed into ``array_function_dispatch()`` would allow ``__array_function__`` implementations to determine the list of "array-like" arguments in a generic diff --git a/doc/release/1.16.0-notes.rst b/doc/release/1.16.0-notes.rst index 8d176c3ea..facce6e00 100644 --- a/doc/release/1.16.0-notes.rst +++ b/doc/release/1.16.0-notes.rst @@ -326,6 +326,11 @@ copying the data directly into the appropriate slice of the resulting array. This results in significant speedups for these large arrays, particularly for arrays being blocked along more than 2 dimensions. +``arr.ctypes.data_as(...)`` holds a reference to arr +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Previously the caller was responsible for keeping the array alive for the +lifetime of the pointer. + Speedup ``np.take`` for read-only arrays ---------------------------------------- The implementation of ``np.take`` no longer makes an unnecessary copy of the diff --git a/doc/release/1.17.0-notes.rst b/doc/release/1.17.0-notes.rst new file mode 100644 index 000000000..5c58632c0 --- /dev/null +++ b/doc/release/1.17.0-notes.rst @@ -0,0 +1,53 @@ +========================== +NumPy 1.17.0 Release Notes +========================== + + +Highlights +========== + +* NumPy's FFT implementation has switched to pocketfft + +New functions +============= + + +Deprecations +============ + + +Future Changes +============== + + +Expired deprecations +==================== + + +Compatibility notes +=================== + + +C API changes +============= + + +New Features +============ + + +Improvements +============ + +replacement of the `fftpack`-based FFT module by the `pocketfft` library +------------------------------------------------------------------------ +Both implementations have the same ancestor (Fortran77 `FFTPACK` by Paul N. +Swarztrauber), but `pocketfft` contains additional modifications which +improve both accuracy and performance in some circumstances. For FFT lengths +containing large prime factors, `pocketfft` uses Bluestein's algorithm, which +maintains `O(N log N)` run time complexity instead of deteriorating towards +`O(N*N)` for prime lengths. Also, accuracy for real-valued FFTs with near-prime +lengths has improved and is on par with complex-valued FFTs. + +Changes +======= diff --git a/doc/source/reference/arrays.indexing.rst b/doc/source/reference/arrays.indexing.rst index 62d36e28c..3a319ecca 100644 --- a/doc/source/reference/arrays.indexing.rst +++ b/doc/source/reference/arrays.indexing.rst @@ -111,9 +111,10 @@ concepts to remember include: [5], [6]]]) -- :const:`Ellipsis` expand to the number of ``:`` objects needed to - make a selection tuple of the same length as ``x.ndim``. There may - only be a single ellipsis present. +- :const:`Ellipsis` expands to the number of ``:`` objects needed for the + selection tuple to index all dimensions. In most cases, this means that + length of the expanded selection tuple is ``x.ndim``. There may only be a + single ellipsis present. .. admonition:: Example @@ -513,14 +514,10 @@ only the part of the data in the specified field. Also :ref:`record array <arrays.classes.rec>` scalars can be "indexed" this way. Indexing into a structured array can also be done with a list of field names, -*e.g.* ``x[['field-name1','field-name2']]``. Currently this returns a new -array containing a copy of the values in the fields specified in the list. -As of NumPy 1.7, returning a copy is being deprecated in favor of returning -a view. A copy will continue to be returned for now, but a FutureWarning -will be issued when writing to the copy. If you depend on the current -behavior, then we suggest copying the returned array explicitly, i.e. use -x[['field-name1','field-name2']].copy(). This will work with both past and -future versions of NumPy. +*e.g.* ``x[['field-name1','field-name2']]``. As of NumPy 1.16 this returns a +view containing only those fields. In older versions of numpy it returned a +copy. See the user guide section on :ref:`structured_arrays` for more +information on multifield indexing. If the accessed field is a sub-array, the dimensions of the sub-array are appended to the shape of the result. diff --git a/doc/source/release.rst b/doc/source/release.rst index 1cf215549..11a25d13e 100644 --- a/doc/source/release.rst +++ b/doc/source/release.rst @@ -2,6 +2,7 @@ Release Notes ************* +.. include:: ../release/1.17.0-notes.rst .. include:: ../release/1.16.0-notes.rst .. include:: ../release/1.15.4-notes.rst .. include:: ../release/1.15.3-notes.rst diff --git a/doc/source/user/numpy-for-matlab-users.rst b/doc/source/user/numpy-for-matlab-users.rst index 399237c21..45e40d011 100644 --- a/doc/source/user/numpy-for-matlab-users.rst +++ b/doc/source/user/numpy-for-matlab-users.rst @@ -693,19 +693,19 @@ this is just an example, not a statement of "best practices"): :: - # Make all numpy available via shorter 'num' prefix - import numpy as num + # Make all numpy available via shorter 'np' prefix + import numpy as np # Make all matlib functions accessible at the top level via M.func() import numpy.matlib as M # Make some matlib functions accessible directly at the top level via, e.g. rand(3,3) from numpy.matlib import rand,zeros,ones,empty,eye # Define a Hermitian function def hermitian(A, **kwargs): - return num.transpose(A,**kwargs).conj() + return np.transpose(A,**kwargs).conj() # Make some shortcuts for transpose,hermitian: - # num.transpose(A) --> T(A) + # np.transpose(A) --> T(A) # hermitian(A) --> H(A) - T = num.transpose + T = np.transpose H = hermitian Links diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py index 668aee935..513415e09 100644 --- a/numpy/core/_add_newdocs.py +++ b/numpy/core/_add_newdocs.py @@ -49,7 +49,7 @@ add_newdoc('numpy.core', 'flatiter', >>> x = np.arange(6).reshape(2, 3) >>> fl = x.flat >>> type(fl) - <type 'numpy.flatiter'> + <class 'numpy.flatiter'> >>> for item in fl: ... print(item) ... @@ -320,71 +320,68 @@ add_newdoc('numpy.core', 'nditer', Here is how we might write an ``iter_add`` function, using the Python iterator protocol:: - def iter_add_py(x, y, out=None): - addop = np.add - it = np.nditer([x, y, out], [], - [['readonly'], ['readonly'], ['writeonly','allocate']]) - with it: - for (a, b, c) in it: - addop(a, b, out=c) - return it.operands[2] + >>> def iter_add_py(x, y, out=None): + ... addop = np.add + ... it = np.nditer([x, y, out], [], + ... [['readonly'], ['readonly'], ['writeonly','allocate']]) + ... with it: + ... for (a, b, c) in it: + ... addop(a, b, out=c) + ... return it.operands[2] Here is the same function, but following the C-style pattern:: - def iter_add(x, y, out=None): - addop = np.add - - it = np.nditer([x, y, out], [], - [['readonly'], ['readonly'], ['writeonly','allocate']]) - with it: - while not it.finished: - addop(it[0], it[1], out=it[2]) - it.iternext() - - return it.operands[2] + >>> def iter_add(x, y, out=None): + ... addop = np.add + ... it = np.nditer([x, y, out], [], + ... [['readonly'], ['readonly'], ['writeonly','allocate']]) + ... with it: + ... while not it.finished: + ... addop(it[0], it[1], out=it[2]) + ... it.iternext() + ... return it.operands[2] Here is an example outer product function:: - def outer_it(x, y, out=None): - mulop = np.multiply - - it = np.nditer([x, y, out], ['external_loop'], - [['readonly'], ['readonly'], ['writeonly', 'allocate']], - op_axes=[list(range(x.ndim)) + [-1] * y.ndim, - [-1] * x.ndim + list(range(y.ndim)), - None]) - with it: - for (a, b, c) in it: - mulop(a, b, out=c) - return it.operands[2] - - >>> a = np.arange(2)+1 - >>> b = np.arange(3)+1 - >>> outer_it(a,b) - array([[1, 2, 3], - [2, 4, 6]]) + >>> def outer_it(x, y, out=None): + ... mulop = np.multiply + ... it = np.nditer([x, y, out], ['external_loop'], + ... [['readonly'], ['readonly'], ['writeonly', 'allocate']], + ... op_axes=[list(range(x.ndim)) + [-1] * y.ndim, + ... [-1] * x.ndim + list(range(y.ndim)), + ... None]) + ... with it: + ... for (a, b, c) in it: + ... mulop(a, b, out=c) + ... return it.operands[2] + + >>> a = np.arange(2)+1 + >>> b = np.arange(3)+1 + >>> outer_it(a,b) + array([[1, 2, 3], + [2, 4, 6]]) Here is an example function which operates like a "lambda" ufunc:: - def luf(lamdaexpr, *args, **kwargs): - "luf(lambdaexpr, op1, ..., opn, out=None, order='K', casting='safe', buffersize=0)" - nargs = len(args) - op = (kwargs.get('out',None),) + args - it = np.nditer(op, ['buffered','external_loop'], - [['writeonly','allocate','no_broadcast']] + - [['readonly','nbo','aligned']]*nargs, - order=kwargs.get('order','K'), - casting=kwargs.get('casting','safe'), - buffersize=kwargs.get('buffersize',0)) - while not it.finished: - it[0] = lamdaexpr(*it[1:]) - it.iternext() - return it.operands[0] - - >>> a = np.arange(5) - >>> b = np.ones(5) - >>> luf(lambda i,j:i*i + j/2, a, b) - array([ 0.5, 1.5, 4.5, 9.5, 16.5]) + >>> def luf(lamdaexpr, *args, **kwargs): + ... '''luf(lambdaexpr, op1, ..., opn, out=None, order='K', casting='safe', buffersize=0)''' + ... nargs = len(args) + ... op = (kwargs.get('out',None),) + args + ... it = np.nditer(op, ['buffered','external_loop'], + ... [['writeonly','allocate','no_broadcast']] + + ... [['readonly','nbo','aligned']]*nargs, + ... order=kwargs.get('order','K'), + ... casting=kwargs.get('casting','safe'), + ... buffersize=kwargs.get('buffersize',0)) + ... while not it.finished: + ... it[0] = lamdaexpr(*it[1:]) + ... it.iternext() + ... return it.operands[0] + + >>> a = np.arange(5) + >>> b = np.ones(5) + >>> luf(lambda i,j:i*i + j/2, a, b) + array([ 0.5, 1.5, 4.5, 9.5, 16.5]) If operand flags `"writeonly"` or `"readwrite"` are used the operands may be views into the original data with the `WRITEBACKIFCOPY` flag. In this case @@ -393,16 +390,16 @@ add_newdoc('numpy.core', 'nditer', data will be written back to the original data when the `__exit__` function is called but not before: - >>> a = np.arange(6, dtype='i4')[::-2] - >>> with nditer(a, [], - ... [['writeonly', 'updateifcopy']], - ... casting='unsafe', - ... op_dtypes=[np.dtype('f4')]) as i: - ... x = i.operands[0] - ... x[:] = [-1, -2, -3] - ... # a still unchanged here - >>> a, x - array([-1, -2, -3]), array([-1, -2, -3]) + >>> a = np.arange(6, dtype='i4')[::-2] + >>> with np.nditer(a, [], + ... [['writeonly', 'updateifcopy']], + ... casting='unsafe', + ... op_dtypes=[np.dtype('f4')]) as i: + ... x = i.operands[0] + ... x[:] = [-1, -2, -3] + ... # a still unchanged here + >>> a, x + (array([-1, -2, -3], dtype=int32), array([-1., -2., -3.], dtype=float32)) It is important to note that once the iterator is exited, dangling references (like `x` in the example) may or may not share data with @@ -428,10 +425,10 @@ add_newdoc('numpy.core', 'nditer', ('copy', >>> x = np.arange(10) >>> y = x + 1 >>> it = np.nditer([x, y]) - >>> it.next() + >>> next(it) (array(0), array(1)) >>> it2 = it.copy() - >>> it2.next() + >>> next(it2) (array(1), array(2)) """)) @@ -544,7 +541,6 @@ add_newdoc('numpy.core', 'nested_iters', ... print(i.multi_index) ... for y in j: ... print('', j.multi_index, y) - (0,) (0, 0) 0 (0, 1) 1 @@ -617,9 +613,9 @@ add_newdoc('numpy.core', 'broadcast', >>> out = np.empty(b.shape) >>> out.flat = [u+v for (u,v) in b] >>> out - array([[ 5., 6., 7.], - [ 6., 7., 8.], - [ 7., 8., 9.]]) + array([[5., 6., 7.], + [6., 7., 8.], + [7., 8., 9.]]) Compare against built-in broadcasting: @@ -643,7 +639,7 @@ add_newdoc('numpy.core', 'broadcast', ('index', >>> b = np.broadcast(x, y) >>> b.index 0 - >>> b.next(), b.next(), b.next() + >>> next(b), next(b), next(b) ((1, 4), (1, 5), (1, 6)) >>> b.index 3 @@ -762,11 +758,11 @@ add_newdoc('numpy.core', 'broadcast', ('reset', Examples -------- >>> x = np.array([1, 2, 3]) - >>> y = np.array([[4], [5], [6]] + >>> y = np.array([[4], [5], [6]]) >>> b = np.broadcast(x, y) >>> b.index 0 - >>> b.next(), b.next(), b.next() + >>> next(b), next(b), next(b) ((1, 4), (2, 4), (3, 4)) >>> b.index 3 @@ -1189,32 +1185,32 @@ add_newdoc('numpy.core.multiarray', 'fromfile', -------- Construct an ndarray: - >>> dt = np.dtype([('time', [('min', int), ('sec', int)]), + >>> dt = np.dtype([('time', [('min', np.int64), ('sec', np.int64)]), ... ('temp', float)]) >>> x = np.zeros((1,), dtype=dt) >>> x['time']['min'] = 10; x['temp'] = 98.25 >>> x array([((10, 0), 98.25)], - dtype=[('time', [('min', '<i4'), ('sec', '<i4')]), ('temp', '<f8')]) + dtype=[('time', [('min', '<i8'), ('sec', '<i8')]), ('temp', '<f8')]) Save the raw data to disk: - >>> import os - >>> fname = os.tmpnam() + >>> import tempfile + >>> fname = tempfile.mkstemp()[1] >>> x.tofile(fname) Read the raw data from disk: >>> np.fromfile(fname, dtype=dt) array([((10, 0), 98.25)], - dtype=[('time', [('min', '<i4'), ('sec', '<i4')]), ('temp', '<f8')]) + dtype=[('time', [('min', '<i8'), ('sec', '<i8')]), ('temp', '<f8')]) The recommended way to store and load data: >>> np.save(fname, x) >>> np.load(fname + '.npy') array([((10, 0), 98.25)], - dtype=[('time', [('min', '<i4'), ('sec', '<i4')]), ('temp', '<f8')]) + dtype=[('time', [('min', '<i8'), ('sec', '<i8')]), ('temp', '<f8')]) """) @@ -1242,17 +1238,16 @@ add_newdoc('numpy.core.multiarray', 'frombuffer', >>> dt = np.dtype(int) >>> dt = dt.newbyteorder('>') - >>> np.frombuffer(buf, dtype=dt) + >>> np.frombuffer(buf, dtype=dt) # doctest: +SKIP The data of the resulting array will not be byteswapped, but will be interpreted correctly. Examples -------- - >>> s = 'hello world' + >>> s = b'hello world' >>> np.frombuffer(s, dtype='S1', count=5, offset=6) - array(['w', 'o', 'r', 'l', 'd'], - dtype='|S1') + array([b'w', b'o', b'r', b'l', b'd'], dtype='|S1') >>> np.frombuffer(b'\\x01\\x02', dtype=np.uint8) array([1, 2], dtype=uint8) @@ -1941,8 +1936,8 @@ add_newdoc('numpy.core.multiarray', 'ndarray', First mode, `buffer` is None: >>> np.ndarray(shape=(2,2), dtype=float, order='F') - array([[ -1.13698227e+002, 4.25087011e-303], - [ 2.88528414e-306, 3.27025015e-309]]) #random + array([[0.0e+000, 0.0e+000], # random + [ nan, 2.5e-323]]) Second mode: @@ -2047,14 +2042,6 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('ctypes', .. automethod:: numpy.core._internal._ctypes.strides_as - Be careful using the ctypes attribute - especially on temporary - arrays or arrays constructed on the fly. For example, calling - ``(a+b).ctypes.data_as(ctypes.c_void_p)`` returns a pointer to memory - that is invalid because the array created as (a+b) is deallocated - before the next Python statement. You can avoid this problem using - either ``c=a+b`` or ``ct=(a+b).ctypes``. In the latter case, ct will - hold a reference to the array until ct is deleted or re-assigned. - If the ctypes module is not available, then the ctypes attribute of array objects still returns something useful, but ctypes objects are not returned and errors may be raised instead. In particular, @@ -2256,7 +2243,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('flat', >>> x.T.flat[3] 5 >>> type(x.flat) - <type 'numpy.flatiter'> + <class 'numpy.flatiter'> An assignment example: @@ -2706,7 +2693,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('astype', -------- >>> x = np.array([1, 2, 2.5]) >>> x - array([ 1. , 2. , 2.5]) + array([1. , 2. , 2.5]) >>> x.astype(int) array([1, 2, 2]) @@ -2737,19 +2724,20 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap', Examples -------- >>> A = np.array([1, 256, 8755], dtype=np.int16) - >>> map(hex, A) + >>> list(map(hex, A)) ['0x1', '0x100', '0x2233'] >>> A.byteswap(inplace=True) array([ 256, 1, 13090], dtype=int16) - >>> map(hex, A) + >>> list(map(hex, A)) ['0x100', '0x1', '0x3322'] Arrays of strings are not swapped >>> A = np.array(['ceg', 'fac']) >>> A.byteswap() - array(['ceg', 'fac'], - dtype='|S3') + Traceback (most recent call last): + ... + UnicodeDecodeError: ... """)) @@ -2937,14 +2925,14 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('dot', >>> a = np.eye(2) >>> b = np.ones((2, 2)) * 2 >>> a.dot(b) - array([[ 2., 2.], - [ 2., 2.]]) + array([[2., 2.], + [2., 2.]]) This array method can be conveniently chained: >>> a.dot(b).dot(b) - array([[ 8., 8.], - [ 8., 8.]]) + array([[8., 8.], + [8., 8.]]) """)) @@ -2997,7 +2985,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('fill', >>> a = np.empty(2) >>> a.fill(1) >>> a - array([ 1., 1.]) + array([1., 1.]) """)) @@ -3066,18 +3054,18 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('getfield', >>> x = np.diag([1.+1.j]*2) >>> x[1, 1] = 2 + 4.j >>> x - array([[ 1.+1.j, 0.+0.j], - [ 0.+0.j, 2.+4.j]]) + array([[1.+1.j, 0.+0.j], + [0.+0.j, 2.+4.j]]) >>> x.getfield(np.float64) - array([[ 1., 0.], - [ 0., 2.]]) + array([[1., 0.], + [0., 2.]]) By choosing an offset of 8 bytes we can select the complex part of the array for our view: >>> x.getfield(np.float64, offset=8) - array([[ 1., 0.], - [ 0., 4.]]) + array([[1., 0.], + [0., 4.]]) """)) @@ -3123,19 +3111,20 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('item', Examples -------- + >>> np.random.seed(123) >>> x = np.random.randint(9, size=(3, 3)) >>> x - array([[3, 1, 7], - [2, 8, 3], - [8, 5, 3]]) + array([[2, 2, 6], + [1, 3, 6], + [1, 0, 1]]) >>> x.item(3) - 2 + 1 >>> x.item(7) - 5 + 0 >>> x.item((0, 1)) - 1 + 2 >>> x.item((2, 2)) - 3 + 1 """)) @@ -3171,17 +3160,18 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('itemset', Examples -------- + >>> np.random.seed(123) >>> x = np.random.randint(9, size=(3, 3)) >>> x - array([[3, 1, 7], - [2, 8, 3], - [8, 5, 3]]) + array([[2, 2, 6], + [1, 3, 6], + [1, 0, 1]]) >>> x.itemset(4, 0) >>> x.itemset((2, 2), 9) >>> x - array([[3, 1, 7], - [2, 0, 3], - [8, 5, 9]]) + array([[2, 2, 6], + [1, 0, 6], + [1, 0, 9]]) """)) @@ -3622,7 +3612,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('resize', >>> a.resize((1, 1)) Traceback (most recent call last): ... - ValueError: cannot resize an array that has been referenced ... + ValueError: cannot resize an array that references or is referenced ... Unless `refcheck` is False: @@ -3695,23 +3685,23 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('setfield', -------- >>> x = np.eye(3) >>> x.getfield(np.float64) - array([[ 1., 0., 0.], - [ 0., 1., 0.], - [ 0., 0., 1.]]) + array([[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]]) >>> x.setfield(3, np.int32) >>> x.getfield(np.int32) array([[3, 3, 3], [3, 3, 3], - [3, 3, 3]]) + [3, 3, 3]], dtype=int32) >>> x - array([[ 1.00000000e+000, 1.48219694e-323, 1.48219694e-323], - [ 1.48219694e-323, 1.00000000e+000, 1.48219694e-323], - [ 1.48219694e-323, 1.48219694e-323, 1.00000000e+000]]) + array([[1.0e+000, 1.5e-323, 1.5e-323], + [1.5e-323, 1.0e+000, 1.5e-323], + [1.5e-323, 1.5e-323, 1.0e+000]]) >>> x.setfield(np.eye(3), np.int32) >>> x - array([[ 1., 0., 0.], - [ 0., 1., 0.], - [ 0., 0., 1.]]) + array([[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]]) """)) @@ -3764,6 +3754,9 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags', Examples -------- + >>> y = np.array([[3, 1, 7], + ... [2, 0, 0], + ... [8, 5, 9]]) >>> y array([[3, 1, 7], [2, 0, 0], @@ -3843,8 +3836,8 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('sort', >>> a = np.array([('a', 2), ('c', 1)], dtype=[('x', 'S1'), ('y', int)]) >>> a.sort(order='y') >>> a - array([('c', 1), ('a', 2)], - dtype=[('x', '|S1'), ('y', '<i4')]) + array([(b'c', 1), (b'a', 2)], + dtype=[('x', 'S1'), ('y', '<i8')]) """)) @@ -3900,6 +3893,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('partition', array([2, 1, 3, 4]) >>> a.partition((1, 3)) + >>> a array([1, 2, 3, 4]) """)) @@ -4081,13 +4075,13 @@ tobytesdoc = """ Examples -------- - >>> x = np.array([[0, 1], [2, 3]]) + >>> x = np.array([[0, 1], [2, 3]], dtype='<u2') >>> x.tobytes() - b'\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x03\\x00\\x00\\x00' + b'\\x00\\x00\\x01\\x00\\x02\\x00\\x03\\x00' >>> x.tobytes('C') == x.tobytes() True >>> x.tobytes('F') - b'\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x03\\x00\\x00\\x00' + b'\\x00\\x00\\x02\\x00\\x01\\x00\\x03\\x00' """ @@ -4237,7 +4231,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('view', >>> y matrix([[513]], dtype=int16) >>> print(type(y)) - <class 'numpy.matrixlib.defmatrix.matrix'> + <class 'numpy.matrix'> Creating a view on a structured array so it can be used in calculations @@ -4247,19 +4241,19 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('view', array([[1, 2], [3, 4]], dtype=int8) >>> xv.mean(0) - array([ 2., 3.]) + array([2., 3.]) Making changes to the view changes the underlying array >>> xv[0,1] = 20 - >>> print(x) - [(1, 20) (3, 4)] + >>> x + array([(1, 20), (3, 4)], dtype=[('a', 'i1'), ('b', 'i1')]) Using a view to convert an array to a recarray: >>> z = x.view(np.recarray) >>> z.a - array([1], dtype=int8) + array([1, 3], dtype=int8) Views share data: @@ -4277,8 +4271,8 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('view', [4, 5]], dtype=int16) >>> y.view(dtype=[('width', np.int16), ('length', np.int16)]) Traceback (most recent call last): - File "<stdin>", line 1, in <module> - ValueError: new type not compatible with array. + ... + ValueError: To change to a dtype of a different size, the array must be C-contiguous >>> z = y.copy() >>> z.view(dtype=[('width', np.int16), ('length', np.int16)]) array([[(1, 2)], @@ -4329,10 +4323,9 @@ add_newdoc('numpy.core.umath', 'frompyfunc', >>> oct_array = np.frompyfunc(oct, 1, 1) >>> oct_array(np.array((10, 30, 100))) - array([012, 036, 0144], dtype=object) + array(['0o12', '0o36', '0o144'], dtype=object) >>> np.array((oct(10), oct(30), oct(100))) # for comparison - array(['012', '036', '0144'], - dtype='|S4') + array(['0o12', '0o36', '0o144'], dtype='<U5') """) @@ -4394,7 +4387,7 @@ add_newdoc('numpy.core.umath', 'geterrobj', >>> np.base_repr(np.geterrobj()[1], 8) '0' >>> old_err = np.seterr(divide='warn', over='log', under='call', - invalid='print') + ... invalid='print') >>> np.base_repr(np.geterrobj()[1], 8) '4351' @@ -4540,7 +4533,10 @@ add_newdoc('numpy.core.multiarray', 'packbits', ... [0,0,1]]]) >>> b = np.packbits(a, axis=-1) >>> b - array([[[160],[64]],[[192],[32]]], dtype=uint8) + array([[[160], + [ 64]], + [[192], + [ 32]]], dtype=uint8) Note that in binary 160 = 1010 0000, 64 = 0100 0000, 192 = 1100 0000, and 32 = 0010 0000. @@ -4981,7 +4977,7 @@ add_newdoc('numpy.core', 'ufunc', ('reduce', >>> np.add.reduce([10], initial=5) 15 - >>> np.add.reduce(np.ones((2, 2, 2)), axis=(0, 2), initializer=10) + >>> np.add.reduce(np.ones((2, 2, 2)), axis=(0, 2), initial=10) array([14., 14.]) Allows reductions of empty arrays where they would normally fail, i.e. @@ -5054,23 +5050,23 @@ add_newdoc('numpy.core', 'ufunc', ('accumulate', >>> I = np.eye(2) >>> I - array([[ 1., 0.], - [ 0., 1.]]) + array([[1., 0.], + [0., 1.]]) Accumulate along axis 0 (rows), down columns: >>> np.add.accumulate(I, 0) - array([[ 1., 0.], - [ 1., 1.]]) + array([[1., 0.], + [1., 1.]]) >>> np.add.accumulate(I) # no axis specified = axis zero - array([[ 1., 0.], - [ 1., 1.]]) + array([[1., 0.], + [1., 1.]]) Accumulate along axis 1 (columns), through rows: >>> np.add.accumulate(I, 1) - array([[ 1., 1.], - [ 0., 1.]]) + array([[1., 1.], + [0., 1.]]) """)) @@ -5147,10 +5143,10 @@ add_newdoc('numpy.core', 'ufunc', ('reduceat', >>> x = np.linspace(0, 15, 16).reshape(4,4) >>> x - array([[ 0., 1., 2., 3.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.], - [ 12., 13., 14., 15.]]) + array([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [12., 13., 14., 15.]]) :: @@ -5162,11 +5158,11 @@ add_newdoc('numpy.core', 'ufunc', ('reduceat', # [row1 + row2 + row3 + row4] >>> np.add.reduceat(x, [0, 3, 1, 2, 0]) - array([[ 12., 15., 18., 21.], - [ 12., 13., 14., 15.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.], - [ 24., 28., 32., 36.]]) + array([[12., 15., 18., 21.], + [12., 13., 14., 15.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [24., 28., 32., 36.]]) :: @@ -5174,10 +5170,10 @@ add_newdoc('numpy.core', 'ufunc', ('reduceat', # [col1 * col2 * col3, col4] >>> np.multiply.reduceat(x, [0, 3], 1) - array([[ 0., 3.], - [ 120., 7.], - [ 720., 11.], - [ 2184., 15.]]) + array([[ 0., 3.], + [ 120., 7.], + [ 720., 11.], + [2184., 15.]]) """)) @@ -5276,14 +5272,14 @@ add_newdoc('numpy.core', 'ufunc', ('at', >>> a = np.array([1, 2, 3, 4]) >>> np.negative.at(a, [0, 1]) - >>> print(a) - array([-1, -2, 3, 4]) + >>> a + array([-1, -2, 3, 4]) Increment items 0 and 1, and increment item 2 twice: >>> a = np.array([1, 2, 3, 4]) >>> np.add.at(a, [0, 1, 2, 2], 1) - >>> print(a) + >>> a array([2, 3, 5, 4]) Add items 0 and 1 in first array to second array, @@ -5292,7 +5288,7 @@ add_newdoc('numpy.core', 'ufunc', ('at', >>> a = np.array([1, 2, 3, 4]) >>> b = np.array([1, 2]) >>> np.add.at(a, [0, 1], b) - >>> print(a) + >>> a array([2, 4, 3, 4]) """)) @@ -5357,13 +5353,13 @@ add_newdoc('numpy.core.multiarray', 'dtype', Structured type, two fields: the first field contains an unsigned int, the second an int32: - >>> np.dtype([('f1', np.uint), ('f2', np.int32)]) - dtype([('f1', '<u4'), ('f2', '<i4')]) + >>> np.dtype([('f1', np.uint64), ('f2', np.int32)]) + dtype([('f1', '<u8'), ('f2', '<i4')]) Using array-protocol type strings: >>> np.dtype([('a','f8'),('b','S10')]) - dtype([('a', '<f8'), ('b', '|S10')]) + dtype([('a', '<f8'), ('b', 'S10')]) Using comma-separated field formats. The shape is (2,3): @@ -5373,24 +5369,24 @@ add_newdoc('numpy.core.multiarray', 'dtype', Using tuples. ``int`` is a fixed type, 3 the field's shape. ``void`` is a flexible type, here of size 10: - >>> np.dtype([('hello',(int,3)),('world',np.void,10)]) - dtype([('hello', '<i4', 3), ('world', '|V10')]) + >>> np.dtype([('hello',(np.int64,3)),('world',np.void,10)]) + dtype([('hello', '<i8', (3,)), ('world', 'V10')]) Subdivide ``int16`` into 2 ``int8``'s, called x and y. 0 and 1 are the offsets in bytes: >>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)})) - dtype(('<i2', [('x', '|i1'), ('y', '|i1')])) + dtype((numpy.int16, [('x', 'i1'), ('y', 'i1')])) Using dictionaries. Two fields named 'gender' and 'age': >>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]}) - dtype([('gender', '|S1'), ('age', '|u1')]) + dtype([('gender', 'S1'), ('age', 'u1')]) Offsets in bytes, here 0 and 25: >>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)}) - dtype([('surname', '|S25'), ('age', '|u1')]) + dtype([('surname', 'S25'), ('age', 'u1')]) """) @@ -5794,7 +5790,7 @@ add_newdoc('numpy.core.multiarray', 'busdaycalendar', ... holidays=['2011-07-01', '2011-07-04', '2011-07-17']) >>> # Default is Monday to Friday weekdays ... bdd.weekmask - array([ True, True, True, True, True, False, False], dtype='bool') + array([ True, True, True, True, True, False, False]) >>> # Any holidays already on the weekend are removed ... bdd.holidays array(['2011-07-01', '2011-07-04'], dtype='datetime64[D]') @@ -5891,7 +5887,7 @@ add_newdoc('numpy.core.multiarray', 'datetime_data', as a timedelta >>> np.datetime64('2010', np.datetime_data(dt_25s)) - numpy.datetime64('2010-01-01T00:00:00', '25s') + numpy.datetime64('2010-01-01T00:00:00','25s') """) diff --git a/numpy/core/_internal.py b/numpy/core/_internal.py index 59da60253..1d3bb5584 100644 --- a/numpy/core/_internal.py +++ b/numpy/core/_internal.py @@ -238,19 +238,68 @@ _getintp_ctype.cache = None class _missing_ctypes(object): def cast(self, num, obj): - return num + return num.value + + class c_void_p(object): + def __init__(self, ptr): + self.value = ptr + + +class _unsafe_first_element_pointer(object): + """ + Helper to allow viewing an array as a ctypes pointer to the first element + + This avoids: + * dealing with strides + * `.view` rejecting object-containing arrays + * `memoryview` not supporting overlapping fields + """ + def __init__(self, arr): + self.base = arr + + @property + def __array_interface__(self): + i = dict( + shape=(), + typestr='|V0', + data=(self.base.__array_interface__['data'][0], False), + strides=(), + version=3, + ) + return i + + +def _get_void_ptr(arr): + """ + Get a `ctypes.c_void_p` to arr.data, that keeps a reference to the array + """ + import numpy as np + # convert to a 0d array that has a data pointer referrign to the start + # of arr. This holds a reference to arr. + simple_arr = np.asarray(_unsafe_first_element_pointer(arr)) + + # create a `char[0]` using the same memory. + c_arr = (ctypes.c_char * 0).from_buffer(simple_arr) + + # finally cast to void* + return ctypes.cast(ctypes.pointer(c_arr), ctypes.c_void_p) - def c_void_p(self, num): - return num class _ctypes(object): def __init__(self, array, ptr=None): + self._arr = array + if ctypes: self._ctypes = ctypes + # get a void pointer to the buffer, which keeps the array alive + self._data = _get_void_ptr(array) + assert self._data.value == ptr else: + # fake a pointer-like object that holds onto the reference self._ctypes = _missing_ctypes() - self._arr = array - self._data = ptr + self._data = self._ctypes.c_void_p(ptr) + self._data._objects = array + if self._arr.ndim == 0: self._zerod = True else: @@ -263,6 +312,8 @@ class _ctypes(object): ``self.data_as(ctypes.c_void_p)``. Perhaps you want to use the data as a pointer to a ctypes array of floating-point data: ``self.data_as(ctypes.POINTER(ctypes.c_double))``. + + The returned pointer will keep a reference to the array. """ return self._ctypes.cast(self._data, obj) @@ -284,7 +335,8 @@ class _ctypes(object): return None return (obj*self._arr.ndim)(*self._arr.strides) - def get_data(self): + @property + def data(self): """ A pointer to the memory area of the array as a Python integer. This memory area may contain data that is not aligned, or not in correct @@ -293,10 +345,16 @@ class _ctypes(object): attribute to arbitrary C-code to avoid trouble that can include Python crashing. User Beware! The value of this attribute is exactly the same as ``self._array_interface_['data'][0]``. + + Note that unlike `data_as`, a reference will not be kept to the array: + code like ``ctypes.c_void_p((a + b).ctypes.data)`` will result in a + pointer to a deallocated array, and should be spelt + ``(a + b).ctypes.data_as(ctypes.c_void_p)`` """ - return self._data + return self._data.value - def get_shape(self): + @property + def shape(self): """ (c_intp*self.ndim): A ctypes array of length self.ndim where the basetype is the C-integer corresponding to ``dtype('p')`` on this @@ -307,7 +365,8 @@ class _ctypes(object): """ return self.shape_as(_getintp_ctype()) - def get_strides(self): + @property + def strides(self): """ (c_intp*self.ndim): A ctypes array of length self.ndim where the basetype is the same as for the shape attribute. This ctypes array @@ -317,13 +376,20 @@ class _ctypes(object): """ return self.strides_as(_getintp_ctype()) - def get_as_parameter(self): - return self._ctypes.c_void_p(self._data) + @property + def _as_parameter_(self): + """ + Overrides the ctypes semi-magic method + + Enables `c_func(some_array.ctypes)` + """ + return self._data - data = property(get_data) - shape = property(get_shape) - strides = property(get_strides) - _as_parameter_ = property(get_as_parameter, None, doc="_as parameter_") + # kept for compatibility + get_data = data.fget + get_shape = shape.fget + get_strides = strides.fget + get_as_parameter = _as_parameter_.fget def _newnames(datatype, order): @@ -764,6 +830,13 @@ def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs): .format(ufunc, method, args_string, types_string)) +def array_function_errmsg_formatter(public_api, types): + """ Format the error message for when __array_ufunc__ gives up. """ + func_name = '{}.{}'.format(public_api.__module__, public_api.__name__) + return ("no implementation found for '{}' on types that implement " + '__array_function__: {}'.format(func_name, list(types))) + + def _ufunc_doc_signature_formatter(ufunc): """ Builds a signature string which resembles PEP 457 diff --git a/numpy/core/_methods.py b/numpy/core/_methods.py index baeab6383..33f6d01a8 100644 --- a/numpy/core/_methods.py +++ b/numpy/core/_methods.py @@ -154,15 +154,3 @@ def _ptp(a, axis=None, out=None, keepdims=False): umr_minimum(a, axis, None, None, keepdims), out ) - -_NDARRAY_ARRAY_FUNCTION = mu.ndarray.__array_function__ - -def _array_function(self, func, types, args, kwargs): - # TODO: rewrite this in C - # Cannot handle items that have __array_function__ other than our own. - for t in types: - if not issubclass(t, mu.ndarray) and hasattr(t, '__array_function__'): - return NotImplemented - - # The regular implementation can handle this, so we call it directly. - return func.__wrapped__(*args, **kwargs) diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py index 6a71de226..7d8785c32 100644 --- a/numpy/core/arrayprint.py +++ b/numpy/core/arrayprint.py @@ -201,21 +201,21 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None, Floating point precision can be set: >>> np.set_printoptions(precision=4) - >>> print(np.array([1.123456789])) - [ 1.1235] + >>> np.array([1.123456789]) + [1.1235] Long arrays can be summarised: >>> np.set_printoptions(threshold=5) - >>> print(np.arange(10)) - [0 1 2 ..., 7 8 9] + >>> np.arange(10) + array([0, 1, 2, ..., 7, 8, 9]) Small results can be suppressed: >>> eps = np.finfo(float).eps >>> x = np.arange(4.) >>> x**2 - (x + eps)**2 - array([ -4.9304e-32, -4.4409e-16, 0.0000e+00, 0.0000e+00]) + array([-4.9304e-32, -4.4409e-16, 0.0000e+00, 0.0000e+00]) >>> np.set_printoptions(suppress=True) >>> x**2 - (x + eps)**2 array([-0., -0., 0., 0.]) @@ -299,9 +299,10 @@ def printoptions(*args, **kwargs): Examples -------- + >>> from numpy.testing import assert_equal >>> with np.printoptions(precision=2): - ... print(np.array([2.0])) / 3 - [0.67] + ... np.array([2.0]) / 3 + array([0.67]) The `as`-clause of the `with`-statement gives the current print options: @@ -644,9 +645,9 @@ def array2string(a, max_line_width=None, precision=None, Examples -------- >>> x = np.array([1e-16,1,2,3]) - >>> print(np.array2string(x, precision=2, separator=',', - ... suppress_small=True)) - [ 0., 1., 2., 3.] + >>> np.array2string(x, precision=2, separator=',', + ... suppress_small=True) + '[0.,1.,2.,3.]' >>> x = np.arange(3.) >>> np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x}) @@ -654,7 +655,7 @@ def array2string(a, max_line_width=None, precision=None, >>> x = np.arange(3) >>> np.array2string(x, formatter={'int':lambda x: hex(x)}) - '[0x0L 0x1L 0x2L]' + '[0x0 0x1 0x2]' """ legacy = kwarg.pop('legacy', None) @@ -1357,7 +1358,7 @@ def dtype_is_implied(dtype): >>> np.core.arrayprint.dtype_is_implied(np.int8) False >>> np.array([1, 2, 3], np.int8) - array([1, 2, 3], dtype=np.int8) + array([1, 2, 3], dtype=int8) """ dtype = np.dtype(dtype) if _format_options['legacy'] == '1.13' and dtype.type == bool_: @@ -1377,6 +1378,7 @@ def dtype_short_repr(dtype): The intent is roughly that the following holds >>> from numpy import * + >>> dt = np.int64([1, 2]).dtype >>> assert eval(dtype_short_repr(dt)) == dt """ if dtype.names is not None: @@ -1480,13 +1482,13 @@ def array_repr(arr, max_line_width=None, precision=None, suppress_small=None): >>> np.array_repr(np.array([1,2])) 'array([1, 2])' >>> np.array_repr(np.ma.array([0.])) - 'MaskedArray([ 0.])' + 'MaskedArray([0.])' >>> np.array_repr(np.array([], np.int32)) 'array([], dtype=int32)' >>> x = np.array([1e-6, 4e-7, 2, 3]) >>> np.array_repr(x, precision=6, suppress_small=True) - 'array([ 0.000001, 0. , 2. , 3. ])' + 'array([0.000001, 0. , 2. , 3. ])' """ return _array_repr_implementation( @@ -1597,8 +1599,8 @@ def set_string_function(f, repr=True): >>> a = np.arange(10) >>> a HA! - What are you going to do now? - >>> print(a) - [0 1 2 3 4 5 6 7 8 9] + >>> _ = a + >>> # [0 1 2 3 4 5 6 7 8 9] We can reset the function to the default: @@ -1616,7 +1618,7 @@ def set_string_function(f, repr=True): >>> x.__str__() 'random' >>> x.__repr__() - 'array([ 0, 1, 2, 3])' + 'array([0, 1, 2, 3])' """ if f is None: diff --git a/numpy/core/code_generators/genapi.py b/numpy/core/code_generators/genapi.py index 1d2cd25c8..4aca2373c 100644 --- a/numpy/core/code_generators/genapi.py +++ b/numpy/core/code_generators/genapi.py @@ -19,6 +19,7 @@ __docformat__ = 'restructuredtext' # The files under src/ that are scanned for API functions API_FILES = [join('multiarray', 'alloc.c'), + join('multiarray', 'arrayfunction_override.c'), join('multiarray', 'array_assign_array.c'), join('multiarray', 'array_assign_scalar.c'), join('multiarray', 'arrayobject.c'), diff --git a/numpy/core/code_generators/ufunc_docstrings.py b/numpy/core/code_generators/ufunc_docstrings.py index 8a690c43d..267e63b2d 100644 --- a/numpy/core/code_generators/ufunc_docstrings.py +++ b/numpy/core/code_generators/ufunc_docstrings.py @@ -648,8 +648,8 @@ add_newdoc('numpy.core.umath', 'bitwise_or', array([ 6, 5, 255]) >>> np.array([2, 5, 255]) | np.array([4, 4, 4]) array([ 6, 5, 255]) - >>> np.bitwise_or(np.array([2, 5, 255, 2147483647L], dtype=np.int32), - ... np.array([4, 4, 4, 2147483647L], dtype=np.int32)) + >>> np.bitwise_or(np.array([2, 5, 255, 2147483647], dtype=np.int32), + ... np.array([4, 4, 4, 2147483647], dtype=np.int32)) array([ 6, 5, 255, 2147483647]) >>> np.bitwise_or([True, True], [False, True]) array([ True, True]) @@ -837,6 +837,7 @@ add_newdoc('numpy.core.umath', 'cos', array([ 1.00000000e+00, 6.12303177e-17, -1.00000000e+00]) >>> >>> # Example of providing the optional output parameter + >>> out1 = np.array([0], dtype='d') >>> out2 = np.cos([0.1], out1) >>> out2 is out1 True @@ -845,7 +846,7 @@ add_newdoc('numpy.core.umath', 'cos', >>> np.cos(np.zeros((3,3)),np.zeros((2,2))) Traceback (most recent call last): File "<stdin>", line 1, in <module> - ValueError: invalid return array shape + ValueError: operands could not be broadcast together with shapes (3,3) (2,2) """) @@ -912,7 +913,7 @@ add_newdoc('numpy.core.umath', 'degrees', 270., 300., 330.]) >>> out = np.zeros((rad.shape)) - >>> r = degrees(rad, out) + >>> r = np.degrees(rad, out) >>> np.all(r == out) True @@ -1559,33 +1560,31 @@ add_newdoc('numpy.core.umath', 'invert', We've seen that 13 is represented by ``00001101``. The invert or bit-wise NOT of 13 is then: - >>> np.invert(np.array([13], dtype=uint8)) - array([242], dtype=uint8) + >>> x = np.invert(np.array(13, dtype=np.uint8)) + >>> x + 242 >>> np.binary_repr(x, width=8) - '00001101' - >>> np.binary_repr(242, width=8) '11110010' The result depends on the bit-width: - >>> np.invert(np.array([13], dtype=uint16)) - array([65522], dtype=uint16) + >>> x = np.invert(np.array(13, dtype=np.uint16)) + >>> x + 65522 >>> np.binary_repr(x, width=16) - '0000000000001101' - >>> np.binary_repr(65522, width=16) '1111111111110010' When using signed integer types the result is the two's complement of the result for the unsigned type: - >>> np.invert(np.array([13], dtype=int8)) + >>> np.invert(np.array([13], dtype=np.int8)) array([-14], dtype=int8) >>> np.binary_repr(-14, width=8) '11110010' Booleans are accepted as well: - >>> np.invert(array([True, False])) + >>> np.invert(np.array([True, False])) array([False, True]) """) @@ -1969,7 +1968,7 @@ add_newdoc('numpy.core.umath', 'log10', Examples -------- >>> np.log10([1e-15, -3.]) - array([-15., NaN]) + array([-15., nan]) """) @@ -2361,7 +2360,7 @@ add_newdoc('numpy.core.umath', 'maximum', [ 0.5, 2. ]]) >>> np.maximum([np.nan, 0, np.nan], [0, np.nan, np.nan]) - array([ NaN, NaN, NaN]) + array([nan, nan, nan]) >>> np.maximum(np.Inf, 1) inf @@ -2420,7 +2419,7 @@ add_newdoc('numpy.core.umath', 'minimum', [ 0. , 1. ]]) >>> np.minimum([np.nan, 0, np.nan],[0, np.nan, np.nan]) - array([ NaN, NaN, NaN]) + array([nan, nan, nan]) >>> np.minimum(-np.Inf, 1) -inf @@ -2480,7 +2479,7 @@ add_newdoc('numpy.core.umath', 'fmax', [ 0.5, 2. ]]) >>> np.fmax([np.nan, 0, np.nan],[0, np.nan, np.nan]) - array([ 0., 0., NaN]) + array([ 0., 0., nan]) """) @@ -2538,7 +2537,7 @@ add_newdoc('numpy.core.umath', 'fmin', [ 0. , 1. ]]) >>> np.fmin([np.nan, 0, np.nan],[0, np.nan, np.nan]) - array([ 0., 0., NaN]) + array([ 0., 0., nan]) """) @@ -2604,12 +2603,13 @@ add_newdoc('numpy.core.umath', 'matmul', - Stacks of matrices are broadcast together as if the matrices were elements, respecting the signature ``(n,k),(k,m)->(n,m)``: - >>> a = a = np.full([9,5,7,3], True, dtype=bool) - >>> c = np.full([9, 5, 4,3], True, dtype=bool) + >>> a = np.ones([9, 5, 7, 4]) + >>> c = np.ones([9, 5, 4, 3]) >>> np.dot(a, c).shape - (9, 5, 7, 9, 5, 4) - >>> np.matmul(a, c).shape # n is 5, k is 3, m is 4 - (9, 5, 7, 4) + (9, 5, 7, 9, 5, 3) + >>> np.matmul(a, c).shape + (9, 5, 7, 3) + >>> # n is 7, k is 4, m is 3 The matmul function implements the semantics of the `@` operator introduced in Python 3.5 following PEP465. @@ -2621,7 +2621,7 @@ add_newdoc('numpy.core.umath', 'matmul', >>> a = np.array([[1, 0], ... [0, 1]]) >>> b = np.array([[4, 1], - ... [2, 2]] + ... [2, 2]]) >>> np.matmul(a, b) array([[4, 1], [2, 2]]) @@ -2629,7 +2629,7 @@ add_newdoc('numpy.core.umath', 'matmul', For 2-D mixed with 1-D, the result is the usual. >>> a = np.array([[1, 0], - ... [0, 1]] + ... [0, 1]]) >>> b = np.array([1, 2]) >>> np.matmul(a, b) array([1, 2]) @@ -3475,6 +3475,7 @@ add_newdoc('numpy.core.umath', 'sinh', >>> # Discrepancy due to vagaries of floating point arithmetic. >>> # Example of providing the optional output parameter + >>> out1 = np.array([0], dtype='d') >>> out2 = np.sinh([0.1], out1) >>> out2 is out1 True @@ -3483,7 +3484,7 @@ add_newdoc('numpy.core.umath', 'sinh', >>> np.sinh(np.zeros((3,3)),np.zeros((2,2))) Traceback (most recent call last): File "<stdin>", line 1, in <module> - ValueError: invalid return array shape + ValueError: operands could not be broadcast together with shapes (3,3) (2,2) """) @@ -3528,8 +3529,8 @@ add_newdoc('numpy.core.umath', 'sqrt', >>> np.sqrt([4, -1, -3+4J]) array([ 2.+0.j, 0.+1.j, 1.+2.j]) - >>> np.sqrt([4, -1, numpy.inf]) - array([ 2., NaN, Inf]) + >>> np.sqrt([4, -1, np.inf]) + array([ 2., nan, inf]) """) @@ -3660,6 +3661,7 @@ add_newdoc('numpy.core.umath', 'tan', >>> >>> # Example of providing the optional output parameter illustrating >>> # that what is returned is a reference to said parameter + >>> out1 = np.array([0], dtype='d') >>> out2 = np.cos([0.1], out1) >>> out2 is out1 True @@ -3668,7 +3670,7 @@ add_newdoc('numpy.core.umath', 'tan', >>> np.cos(np.zeros((3,3)),np.zeros((2,2))) Traceback (most recent call last): File "<stdin>", line 1, in <module> - ValueError: invalid return array shape + ValueError: operands could not be broadcast together with shapes (3,3) (2,2) """) @@ -3711,6 +3713,7 @@ add_newdoc('numpy.core.umath', 'tanh', >>> # Example of providing the optional output parameter illustrating >>> # that what is returned is a reference to said parameter + >>> out1 = np.array([0], dtype='d') >>> out2 = np.tanh([0.1], out1) >>> out2 is out1 True @@ -3719,7 +3722,7 @@ add_newdoc('numpy.core.umath', 'tanh', >>> np.tanh(np.zeros((3,3)),np.zeros((2,2))) Traceback (most recent call last): File "<stdin>", line 1, in <module> - ValueError: invalid return array shape + ValueError: operands could not be broadcast together with shapes (3,3) (2,2) """) @@ -3761,8 +3764,6 @@ add_newdoc('numpy.core.umath', 'true_divide', >>> np.true_divide(x, 4) array([ 0. , 0.25, 0.5 , 0.75, 1. ]) - >>> x/4 - array([0, 0, 0, 0, 1]) >>> x//4 array([0, 0, 0, 0, 1]) @@ -3858,7 +3859,7 @@ add_newdoc('numpy.core.umath', 'ldexp', Examples -------- >>> np.ldexp(5, np.arange(4)) - array([ 5., 10., 20., 40.], dtype=float32) + array([ 5., 10., 20., 40.], dtype=float16) >>> x = np.arange(6) >>> np.ldexp(*np.frexp(x)) diff --git a/numpy/core/defchararray.py b/numpy/core/defchararray.py index 12ba3f02e..007fc6186 100644 --- a/numpy/core/defchararray.py +++ b/numpy/core/defchararray.py @@ -498,8 +498,7 @@ def count(a, sub, start=0, end=None): -------- >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> c - array(['aAaAaA', ' aA ', 'abBABba'], - dtype='|S7') + array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7') >>> np.char.count(c, 'A') array([3, 1, 1]) >>> np.char.count(c, 'aA') @@ -552,8 +551,7 @@ def decode(a, encoding=None, errors=None): -------- >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> c - array(['aAaAaA', ' aA ', 'abBABba'], - dtype='|S7') + array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7') >>> np.char.encode(c, encoding='cp037') array(['\\x81\\xc1\\x81\\xc1\\x81\\xc1', '@@\\x81\\xc1@@', '\\x81\\x82\\xc2\\xc1\\xc2\\x82\\x81'], @@ -637,8 +635,7 @@ def endswith(a, suffix, start=0, end=None): >>> s[0] = 'foo' >>> s[1] = 'bar' >>> s - array(['foo', 'bar'], - dtype='|S3') + array(['foo', 'bar'], dtype='<U3') >>> np.char.endswith(s, 'ar') array([False, True]) >>> np.char.endswith(s, 'a', start=1, end=2) @@ -1036,11 +1033,9 @@ def lower(a): Examples -------- >>> c = np.array(['A1B C', '1BCA', 'BCA1']); c - array(['A1B C', '1BCA', 'BCA1'], - dtype='|S5') + array(['A1B C', '1BCA', 'BCA1'], dtype='<U5') >>> np.char.lower(c) - array(['a1b c', '1bca', 'bca1'], - dtype='|S5') + array(['a1b c', '1bca', 'bca1'], dtype='<U5') """ a_arr = numpy.asarray(a) @@ -1084,23 +1079,20 @@ def lstrip(a, chars=None): -------- >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> c - array(['aAaAaA', ' aA ', 'abBABba'], - dtype='|S7') + array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7') The 'a' variable is unstripped from c[1] because whitespace leading. >>> np.char.lstrip(c, 'a') - array(['AaAaA', ' aA ', 'bBABba'], - dtype='|S7') + array(['AaAaA', ' aA ', 'bBABba'], dtype='<U7') >>> np.char.lstrip(c, 'A') # leaves c unchanged - array(['aAaAaA', ' aA ', 'abBABba'], - dtype='|S7') + array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7') >>> (np.char.lstrip(c, ' ') == np.char.lstrip(c, '')).all() - ... # XXX: is this a regression? this line now returns False + ... # XXX: is this a regression? This used to return True ... # np.char.lstrip(c,'') does not modify c at all. - True + False >>> (np.char.lstrip(c, ' ') == np.char.lstrip(c, None)).all() True @@ -1400,10 +1392,10 @@ def rstrip(a, chars=None): >>> c = np.array(['aAaAaA', 'abBABba'], dtype='S7'); c array(['aAaAaA', 'abBABba'], dtype='|S7') - >>> np.char.rstrip(c, 'a') + >>> np.char.rstrip(c, b'a') array(['aAaAaA', 'abBABb'], dtype='|S7') - >>> np.char.rstrip(c, 'A') + >>> np.char.rstrip(c, b'A') array(['aAaAa', 'abBABba'], dtype='|S7') @@ -1549,17 +1541,13 @@ def strip(a, chars=None): -------- >>> c = np.array(['aAaAaA', ' aA ', 'abBABba']) >>> c - array(['aAaAaA', ' aA ', 'abBABba'], - dtype='|S7') + array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7') >>> np.char.strip(c) - array(['aAaAaA', 'aA', 'abBABba'], - dtype='|S7') + array(['aAaAaA', 'aA', 'abBABba'], dtype='<U7') >>> np.char.strip(c, 'a') # 'a' unstripped from c[1] because whitespace leads - array(['AaAaA', ' aA ', 'bBABb'], - dtype='|S7') + array(['AaAaA', ' aA ', 'bBABb'], dtype='<U7') >>> np.char.strip(c, 'A') # 'A' unstripped from c[1] because (unprinted) ws trails - array(['aAaAa', ' aA ', 'abBABba'], - dtype='|S7') + array(['aAaAa', ' aA ', 'abBABba'], dtype='<U7') """ a_arr = numpy.asarray(a) @@ -1711,11 +1699,9 @@ def upper(a): Examples -------- >>> c = np.array(['a1b c', '1bca', 'bca1']); c - array(['a1b c', '1bca', 'bca1'], - dtype='|S5') + array(['a1b c', '1bca', 'bca1'], dtype='<U5') >>> np.char.upper(c) - array(['A1B C', '1BCA', 'BCA1'], - dtype='|S5') + array(['A1B C', '1BCA', 'BCA1'], dtype='<U5') """ a_arr = numpy.asarray(a) @@ -1950,18 +1936,16 @@ class chararray(ndarray): >>> charar = np.chararray((3, 3)) >>> charar[:] = 'a' >>> charar - chararray([['a', 'a', 'a'], - ['a', 'a', 'a'], - ['a', 'a', 'a']], - dtype='|S1') + chararray([[b'a', b'a', b'a'], + [b'a', b'a', b'a'], + [b'a', b'a', b'a']], dtype='|S1') >>> charar = np.chararray(charar.shape, itemsize=5) >>> charar[:] = 'abc' >>> charar - chararray([['abc', 'abc', 'abc'], - ['abc', 'abc', 'abc'], - ['abc', 'abc', 'abc']], - dtype='|S5') + chararray([[b'abc', b'abc', b'abc'], + [b'abc', b'abc', b'abc'], + [b'abc', b'abc', b'abc']], dtype='|S5') """ def __new__(subtype, shape, itemsize=1, unicode=False, buffer=None, diff --git a/numpy/core/einsumfunc.py b/numpy/core/einsumfunc.py index c4fc77e9e..83b7d8287 100644 --- a/numpy/core/einsumfunc.py +++ b/numpy/core/einsumfunc.py @@ -41,10 +41,10 @@ def _flop_count(idx_contraction, inner, num_terms, size_dictionary): -------- >>> _flop_count('abc', False, 1, {'a': 2, 'b':3, 'c':5}) - 90 + 30 >>> _flop_count('abc', True, 2, {'a': 2, 'b':3, 'c':5}) - 270 + 60 """ @@ -171,7 +171,7 @@ def _optimal_path(input_sets, output_set, idx_dict, memory_limit): >>> isets = [set('abd'), set('ac'), set('bdc')] >>> oset = set() >>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4} - >>> _path__optimal_path(isets, oset, idx_sizes, 5000) + >>> _optimal_path(isets, oset, idx_sizes, 5000) [(0, 2), (0, 1)] """ @@ -342,7 +342,7 @@ def _greedy_path(input_sets, output_set, idx_dict, memory_limit): >>> isets = [set('abd'), set('ac'), set('bdc')] >>> oset = set() >>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4} - >>> _path__greedy_path(isets, oset, idx_sizes, 5000) + >>> _greedy_path(isets, oset, idx_sizes, 5000) [(0, 2), (0, 1)] """ @@ -539,13 +539,14 @@ def _parse_einsum_input(operands): -------- The operand list is simplified to reduce printing: + >>> np.random.seed(123) >>> a = np.random.rand(4, 4) >>> b = np.random.rand(4, 4, 4) - >>> __parse_einsum_input(('...a,...a->...', a, b)) - ('za,xza', 'xz', [a, b]) + >>> _parse_einsum_input(('...a,...a->...', a, b)) + ('za,xza', 'xz', [a, b]) # may vary - >>> __parse_einsum_input((a, [Ellipsis, 0], b, [Ellipsis, 0])) - ('za,xza', 'xz', [a, b]) + >>> _parse_einsum_input((a, [Ellipsis, 0], b, [Ellipsis, 0])) + ('za,xza', 'xz', [a, b]) # may vary """ if len(operands) == 0: @@ -763,6 +764,7 @@ def einsum_path(*operands, **kwargs): of the contraction and the remaining contraction ``(0, 1)`` is then completed. + >>> np.random.seed(123) >>> a = np.random.rand(2, 2) >>> b = np.random.rand(2, 5) >>> c = np.random.rand(5, 2) @@ -770,7 +772,7 @@ def einsum_path(*operands, **kwargs): >>> print(path_info[0]) ['einsum_path', (1, 2), (0, 1)] >>> print(path_info[1]) - Complete contraction: ij,jk,kl->il + Complete contraction: ij,jk,kl->il # may vary Naive scaling: 4 Optimized scaling: 3 Naive FLOP count: 1.600e+02 @@ -789,12 +791,12 @@ def einsum_path(*operands, **kwargs): >>> I = np.random.rand(10, 10, 10, 10) >>> C = np.random.rand(10, 10) >>> path_info = np.einsum_path('ea,fb,abcd,gc,hd->efgh', C, C, I, C, C, - optimize='greedy') + ... optimize='greedy') >>> print(path_info[0]) ['einsum_path', (0, 2), (0, 3), (0, 2), (0, 1)] - >>> print(path_info[1]) - Complete contraction: ea,fb,abcd,gc,hd->efgh + >>> print(path_info[1]) + Complete contraction: ea,fb,abcd,gc,hd->efgh # may vary Naive scaling: 8 Optimized scaling: 5 Naive FLOP count: 8.000e+08 @@ -1274,32 +1276,32 @@ def einsum(*operands, **kwargs): >>> a = np.arange(60.).reshape(3,4,5) >>> b = np.arange(24.).reshape(4,3,2) >>> np.einsum('ijk,jil->kl', a, b) - array([[ 4400., 4730.], - [ 4532., 4874.], - [ 4664., 5018.], - [ 4796., 5162.], - [ 4928., 5306.]]) + array([[4400., 4730.], + [4532., 4874.], + [4664., 5018.], + [4796., 5162.], + [4928., 5306.]]) >>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3]) - array([[ 4400., 4730.], - [ 4532., 4874.], - [ 4664., 5018.], - [ 4796., 5162.], - [ 4928., 5306.]]) + array([[4400., 4730.], + [4532., 4874.], + [4664., 5018.], + [4796., 5162.], + [4928., 5306.]]) >>> np.tensordot(a,b, axes=([1,0],[0,1])) - array([[ 4400., 4730.], - [ 4532., 4874.], - [ 4664., 5018.], - [ 4796., 5162.], - [ 4928., 5306.]]) + array([[4400., 4730.], + [4532., 4874.], + [4664., 5018.], + [4796., 5162.], + [4928., 5306.]]) Writeable returned arrays (since version 1.10.0): >>> a = np.zeros((3, 3)) >>> np.einsum('ii->i', a)[:] = 1 >>> a - array([[ 1., 0., 0.], - [ 0., 1., 0.], - [ 0., 0., 1.]]) + array([[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]]) Example of ellipsis use: @@ -1322,19 +1324,27 @@ def einsum(*operands, **kwargs): particularly significant with larger arrays: >>> a = np.ones(64).reshape(2,4,8) - # Basic `einsum`: ~1520ms (benchmarked on 3.1GHz Intel i5.) + + Basic `einsum`: ~1520ms (benchmarked on 3.1GHz Intel i5.) + >>> for iteration in range(500): - ... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a) - # Sub-optimal `einsum` (due to repeated path calculation time): ~330ms + ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a) + + Sub-optimal `einsum` (due to repeated path calculation time): ~330ms + >>> for iteration in range(500): - ... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal') - # Greedy `einsum` (faster optimal path approximation): ~160ms + ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal') + + Greedy `einsum` (faster optimal path approximation): ~160ms + >>> for iteration in range(500): - ... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='greedy') - # Optimal `einsum` (best usage pattern in some use cases): ~110ms + ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='greedy') + + Optimal `einsum` (best usage pattern in some use cases): ~110ms + >>> path = np.einsum_path('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal')[0] >>> for iteration in range(500): - ... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=path) + ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=path) """ diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py index 59a820d53..240eac6ce 100644 --- a/numpy/core/fromnumeric.py +++ b/numpy/core/fromnumeric.py @@ -240,12 +240,16 @@ def reshape(a, newshape, order='C'): you should assign the new shape to the shape attribute of the array:: >>> a = np.zeros((10, 2)) + # A transpose makes the array non-contiguous >>> b = a.T + # Taking a view makes it possible to modify the shape without modifying # the initial object. >>> c = b.view() >>> c.shape = (20) + Traceback (most recent call last): + ... AttributeError: incompatible shape for a non-contiguous array The `order` keyword gives the index ordering both for *fetching* the values @@ -1644,21 +1648,21 @@ def ravel(a, order='C'): It is equivalent to ``reshape(-1, order=order)``. >>> x = np.array([[1, 2, 3], [4, 5, 6]]) - >>> print(np.ravel(x)) - [1 2 3 4 5 6] + >>> np.ravel(x) + array([1, 2, 3, 4, 5, 6]) - >>> print(x.reshape(-1)) - [1 2 3 4 5 6] + >>> x.reshape(-1) + array([1, 2, 3, 4, 5, 6]) - >>> print(np.ravel(x, order='F')) - [1 4 2 5 3 6] + >>> np.ravel(x, order='F') + array([1, 4, 2, 5, 3, 6]) When ``order`` is 'A', it will preserve the array's 'C' or 'F' ordering: - >>> print(np.ravel(x.T)) - [1 4 2 5 3 6] - >>> print(np.ravel(x.T, order='A')) - [1 2 3 4 5 6] + >>> np.ravel(x.T) + array([1, 4, 2, 5, 3, 6]) + >>> np.ravel(x.T, order='A') + array([1, 2, 3, 4, 5, 6]) When ``order`` is 'K', it will preserve orderings that are neither 'C' nor 'F', but won't reverse axes: @@ -1747,7 +1751,7 @@ def nonzero(a): array([[0, 0], [1, 1], [2, 0], - [2, 1]) + [2, 1]]) A common use for ``nonzero`` is to find the indices of an array, where a condition is True. Given an array `a`, the condition `a` > 3 is a @@ -2150,10 +2154,10 @@ def any(a, axis=None, out=None, keepdims=np._NoValue): >>> np.any(np.nan) True - >>> o=np.array([False]) + >>> o=np.array(False) >>> z=np.any([-1, 4, 5], out=o) >>> z, o - (array([ True]), array([ True])) + (array(True), array(True)) >>> # Check now that z is a reference to o >>> z is o True @@ -2236,10 +2240,10 @@ def all(a, axis=None, out=None, keepdims=np._NoValue): >>> np.all([1.0, np.nan]) True - >>> o=np.array([False]) + >>> o=np.array(False) >>> z=np.all([-1, 4, 5], out=o) - >>> id(z), id(o), z # doctest: +SKIP - (28293632, 28293632, array([ True])) + >>> id(z), id(o), z + (28293632, 28293632, array(True)) # may vary """ return _wrapreduction(a, np.logical_and, 'all', axis, None, out, keepdims=keepdims) @@ -2724,8 +2728,8 @@ def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._N raised on overflow. That means that, on a 32-bit platform: >>> x = np.array([536870910, 536870910, 536870910, 536870910]) - >>> np.prod(x) # random - 16 + >>> np.prod(x) + 16 # may vary The product of an empty array is the neutral element 1: @@ -2993,11 +2997,11 @@ def around(a, decimals=0, out=None): Examples -------- >>> np.around([0.37, 1.64]) - array([ 0., 2.]) + array([0., 2.]) >>> np.around([0.37, 1.64], decimals=1) - array([ 0.4, 1.6]) + array([0.4, 1.6]) >>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value - array([ 0., 2., 2., 4., 4.]) + array([0., 2., 2., 4., 4.]) >>> np.around([1,2,3,11], decimals=1) # ndarray of ints is returned array([ 1, 2, 3, 11]) >>> np.around([1,2,3,11], decimals=-1) @@ -3085,9 +3089,9 @@ def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): >>> np.mean(a) 2.5 >>> np.mean(a, axis=0) - array([ 2., 3.]) + array([2., 3.]) >>> np.mean(a, axis=1) - array([ 1.5, 3.5]) + array([1.5, 3.5]) In single precision, `mean` can be inaccurate: @@ -3100,7 +3104,7 @@ def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): Computing the mean in float64 is more accurate: >>> np.mean(a, dtype=np.float64) - 0.55000000074505806 + 0.55000000074505806 # may vary """ kwargs = {} @@ -3206,11 +3210,11 @@ def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): -------- >>> a = np.array([[1, 2], [3, 4]]) >>> np.std(a) - 1.1180339887498949 + 1.1180339887498949 # may vary >>> np.std(a, axis=0) - array([ 1., 1.]) + array([1., 1.]) >>> np.std(a, axis=1) - array([ 0.5, 0.5]) + array([0.5, 0.5]) In single precision, std() can be inaccurate: @@ -3223,7 +3227,7 @@ def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): Computing the standard deviation in float64 is more accurate: >>> np.std(a, dtype=np.float64) - 0.44999999925494177 + 0.44999999925494177 # may vary """ kwargs = {} @@ -3330,9 +3334,9 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): >>> np.var(a) 1.25 >>> np.var(a, axis=0) - array([ 1., 1.]) + array([1., 1.]) >>> np.var(a, axis=1) - array([ 0.25, 0.25]) + array([0.25, 0.25]) In single precision, var() can be inaccurate: @@ -3345,7 +3349,7 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): Computing the variance in float64 is more accurate: >>> np.var(a, dtype=np.float64) - 0.20249999932944759 + 0.20249999932944759 # may vary >>> ((1-0.55)**2 + (0.1-0.55)**2)/2 0.2025 diff --git a/numpy/core/function_base.py b/numpy/core/function_base.py index b68fd4068..762328173 100644 --- a/numpy/core/function_base.py +++ b/numpy/core/function_base.py @@ -102,14 +102,17 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, Examples -------- >>> np.linspace(2.0, 3.0, num=5) - array([ 2. , 2.25, 2.5 , 2.75, 3. ]) + array([2. , 2.25, 2.5 , 2.75, 3. ]) >>> np.linspace(2.0, 3.0, num=5, endpoint=False) - array([ 2. , 2.2, 2.4, 2.6, 2.8]) + array([2. , 2.2, 2.4, 2.6, 2.8]) >>> np.linspace(2.0, 3.0, num=5, retstep=True) - (array([ 2. , 2.25, 2.5 , 2.75, 3. ]), 0.25) + (array([2. , 2.25, 2.5 , 2.75, 3. ]), 0.25) Graphical illustration: + >>> import matplotlib + >>> import matplotlib.pyplot + >>> matplotlib.pyplot.switch_backend('agg') >>> import matplotlib.pyplot as plt >>> N = 8 >>> y = np.zeros(N) @@ -252,14 +255,17 @@ def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, Examples -------- >>> np.logspace(2.0, 3.0, num=4) - array([ 100. , 215.443469 , 464.15888336, 1000. ]) + array([ 100. , 215.443469 , 464.15888336, 1000. ]) >>> np.logspace(2.0, 3.0, num=4, endpoint=False) - array([ 100. , 177.827941 , 316.22776602, 562.34132519]) + array([100. , 177.827941 , 316.22776602, 562.34132519]) >>> np.logspace(2.0, 3.0, num=4, base=2.0) - array([ 4. , 5.0396842 , 6.34960421, 8. ]) + array([4. , 5.0396842 , 6.34960421, 8. ]) Graphical illustration: + >>> import matplotlib + >>> import matplotlib.pyplot + >>> matplotlib.pyplot.switch_backend('agg') >>> import matplotlib.pyplot as plt >>> N = 10 >>> x1 = np.logspace(0.1, 1, N, endpoint=True) @@ -361,24 +367,29 @@ def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0): Negative, decreasing, and complex inputs are allowed: >>> np.geomspace(1000, 1, num=4) - array([ 1000., 100., 10., 1.]) + array([1000., 100., 10., 1.]) >>> np.geomspace(-1000, -1, num=4) array([-1000., -100., -10., -1.]) >>> np.geomspace(1j, 1000j, num=4) # Straight line - array([ 0. +1.j, 0. +10.j, 0. +100.j, 0.+1000.j]) + array([0. +1.j, 0. +10.j, 0. +100.j, 0.+1000.j]) >>> np.geomspace(-1+0j, 1+0j, num=5) # Circle - array([-1.00000000+0.j , -0.70710678+0.70710678j, - 0.00000000+1.j , 0.70710678+0.70710678j, - 1.00000000+0.j ]) + array([-1.00000000e+00+1.22464680e-16j, -7.07106781e-01+7.07106781e-01j, + 6.12323400e-17+1.00000000e+00j, 7.07106781e-01+7.07106781e-01j, + 1.00000000e+00+0.00000000e+00j]) Graphical illustration of ``endpoint`` parameter: + >>> import matplotlib + >>> matplotlib.use('agg') >>> import matplotlib.pyplot as plt >>> N = 10 >>> y = np.zeros(N) >>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=True), y + 1, 'o') + [<matplotlib.lines.Line2D object at 0x...>] >>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=False), y + 2, 'o') + [<matplotlib.lines.Line2D object at 0x...>] >>> plt.axis([0.5, 2000, 0, 3]) + [0.5, 2000, 0, 3] >>> plt.grid(True, color='0.7', linestyle='-', which='both', axis='both') >>> plt.show() diff --git a/numpy/core/memmap.py b/numpy/core/memmap.py index 82bc4707c..9ba4817f4 100644 --- a/numpy/core/memmap.py +++ b/numpy/core/memmap.py @@ -135,9 +135,9 @@ class memmap(ndarray): >>> fp = np.memmap(filename, dtype='float32', mode='w+', shape=(3,4)) >>> fp - memmap([[ 0., 0., 0., 0.], - [ 0., 0., 0., 0.], - [ 0., 0., 0., 0.]], dtype=float32) + memmap([[0., 0., 0., 0.], + [0., 0., 0., 0.], + [0., 0., 0., 0.]], dtype=float32) Write data to memmap array: diff --git a/numpy/core/multiarray.py b/numpy/core/multiarray.py index df0ed2df4..4c2715892 100644 --- a/numpy/core/multiarray.py +++ b/numpy/core/multiarray.py @@ -117,11 +117,11 @@ def empty_like(prototype, dtype=None, order=None, subok=None): -------- >>> a = ([1,2,3], [4,5,6]) # a is array-like >>> np.empty_like(a) - array([[-1073741821, -1073741821, 3], #random + array([[-1073741821, -1073741821, 3], # random [ 0, 0, -1073741821]]) >>> a = np.array([[1., 2., 3.],[4.,5.,6.]]) >>> np.empty_like(a) - array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000],#random + array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000], # random [ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]]) """ @@ -211,9 +211,11 @@ def concatenate(arrays, axis=None, out=None): fill_value=999999) """ - for array in arrays: - yield array - yield out + if out is not None: + # optimize for the typical case where only arrays is provided + arrays = list(arrays) + arrays.append(out) + return arrays @array_function_from_c_func_and_dispatcher(_multiarray_umath.inner) @@ -286,8 +288,8 @@ def inner(a, b): An example where `b` is a scalar: >>> np.inner(np.eye(2), 7) - array([[ 7., 0.], - [ 0., 7.]]) + array([[7., 0.], + [0., 7.]]) """ return (a, b) @@ -421,8 +423,8 @@ def lexsort(keys, axis=None): >>> a = [1,5,1,4,3,4,4] # First column >>> b = [9,4,0,4,0,2,1] # Second column >>> ind = np.lexsort((b,a)) # Sort by a, then by b - >>> print(ind) - [2 0 4 6 5 3 1] + >>> ind + array([2, 0, 4, 6, 5, 3, 1]) >>> [(a[i],b[i]) for i in ind] [(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)] @@ -1139,7 +1141,10 @@ def packbits(myarray, axis=None): ... [0,0,1]]]) >>> b = np.packbits(a, axis=-1) >>> b - array([[[160],[64]],[[192],[32]]], dtype=uint8) + array([[[160], + [ 64]], + [[192], + [ 32]]], dtype=uint8) Note that in binary 160 = 1010 0000, 64 = 0100 0000, 192 = 1100 0000, and 32 = 0010 0000. @@ -1329,7 +1334,7 @@ def is_busday(dates, weekmask=None, holidays=None, busdaycal=None, out=None): >>> # The weekdays are Friday, Saturday, and Monday ... np.is_busday(['2011-07-01', '2011-07-02', '2011-07-18'], ... holidays=['2011-07-01', '2011-07-04', '2011-07-17']) - array([False, False, True], dtype='bool') + array([False, False, True]) """ return (dates, weekmask, holidays, out) @@ -1403,27 +1408,27 @@ def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None, -------- >>> # First business day in October 2011 (not accounting for holidays) ... np.busday_offset('2011-10', 0, roll='forward') - numpy.datetime64('2011-10-03','D') + numpy.datetime64('2011-10-03') >>> # Last business day in February 2012 (not accounting for holidays) ... np.busday_offset('2012-03', -1, roll='forward') - numpy.datetime64('2012-02-29','D') + numpy.datetime64('2012-02-29') >>> # Third Wednesday in January 2011 ... np.busday_offset('2011-01', 2, roll='forward', weekmask='Wed') - numpy.datetime64('2011-01-19','D') + numpy.datetime64('2011-01-19') >>> # 2012 Mother's Day in Canada and the U.S. ... np.busday_offset('2012-05', 1, roll='forward', weekmask='Sun') - numpy.datetime64('2012-05-13','D') + numpy.datetime64('2012-05-13') >>> # First business day on or after a date ... np.busday_offset('2011-03-20', 0, roll='forward') - numpy.datetime64('2011-03-21','D') + numpy.datetime64('2011-03-21') >>> np.busday_offset('2011-03-22', 0, roll='forward') - numpy.datetime64('2011-03-22','D') + numpy.datetime64('2011-03-22') >>> # First business day after a date ... np.busday_offset('2011-03-20', 1, roll='backward') - numpy.datetime64('2011-03-21','D') + numpy.datetime64('2011-03-21') >>> np.busday_offset('2011-03-22', 1, roll='backward') - numpy.datetime64('2011-03-23','D') + numpy.datetime64('2011-03-23') """ return (dates, offsets, weekmask, holidays, out) @@ -1487,7 +1492,7 @@ def busday_count(begindates, enddates, weekmask=None, holidays=None, ... np.busday_count('2011-01', '2011-02') 21 >>> # Number of weekdays in 2011 - ... np.busday_count('2011', '2012') + >>> np.busday_count('2011', '2012') 260 >>> # Number of Saturdays in 2011 ... np.busday_count('2011', '2012', weekmask='Sat') @@ -1525,6 +1530,7 @@ def datetime_as_string(arr, unit=None, timezone=None, casting=None): Examples -------- + >>> import pytz >>> d = np.arange('2002-10-27T04:30', 4*60, 60, dtype='M8[m]') >>> d array(['2002-10-27T04:30', '2002-10-27T05:30', '2002-10-27T06:30', @@ -1555,6 +1561,8 @@ def datetime_as_string(arr, unit=None, timezone=None, casting=None): 'casting' can be used to specify whether precision can be changed >>> np.datetime_as_string(d, unit='h', casting='safe') + Traceback (most recent call last): + ... TypeError: Cannot create a datetime string as units 'h' from a NumPy datetime with units 'm' according to the rule 'safe' """ diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py index 8768cbe56..8a8efddf3 100644 --- a/numpy/core/numeric.py +++ b/numpy/core/numeric.py @@ -160,9 +160,9 @@ def zeros_like(a, dtype=None, order='K', subok=True): >>> y = np.arange(3, dtype=float) >>> y - array([ 0., 1., 2.]) + array([0., 1., 2.]) >>> np.zeros_like(y) - array([ 0., 0., 0.]) + array([0., 0., 0.]) """ res = empty_like(a, dtype=dtype, order=order, subok=subok) @@ -205,19 +205,19 @@ def ones(shape, dtype=None, order='C'): Examples -------- >>> np.ones(5) - array([ 1., 1., 1., 1., 1.]) + array([1., 1., 1., 1., 1.]) >>> np.ones((5,), dtype=int) array([1, 1, 1, 1, 1]) >>> np.ones((2, 1)) - array([[ 1.], - [ 1.]]) + array([[1.], + [1.]]) >>> s = (2,2) >>> np.ones(s) - array([[ 1., 1.], - [ 1., 1.]]) + array([[1., 1.], + [1., 1.]]) """ a = empty(shape, dtype, order) @@ -280,9 +280,9 @@ def ones_like(a, dtype=None, order='K', subok=True): >>> y = np.arange(3, dtype=float) >>> y - array([ 0., 1., 2.]) + array([0., 1., 2.]) >>> np.ones_like(y) - array([ 1., 1., 1.]) + array([1., 1., 1.]) """ res = empty_like(a, dtype=dtype, order=order, subok=subok) @@ -323,8 +323,8 @@ def full(shape, fill_value, dtype=None, order='C'): Examples -------- >>> np.full((2, 2), np.inf) - array([[ inf, inf], - [ inf, inf]]) + array([[inf, inf], + [inf, inf]]) >>> np.full((2, 2), 10) array([[10, 10], [10, 10]]) @@ -385,13 +385,13 @@ def full_like(a, fill_value, dtype=None, order='K', subok=True): >>> np.full_like(x, 0.1) array([0, 0, 0, 0, 0, 0]) >>> np.full_like(x, 0.1, dtype=np.double) - array([ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) + array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) >>> np.full_like(x, np.nan, dtype=np.double) - array([ nan, nan, nan, nan, nan, nan]) + array([nan, nan, nan, nan, nan, nan]) >>> y = np.arange(6, dtype=np.double) >>> np.full_like(y, 0.1) - array([ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) + array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1]) """ res = empty_like(a, dtype=dtype, order=order, subok=subok) @@ -620,8 +620,8 @@ def ascontiguousarray(a, dtype=None): -------- >>> x = np.arange(6).reshape(2,3) >>> np.ascontiguousarray(x, dtype=np.float32) - array([[ 0., 1., 2.], - [ 3., 4., 5.]], dtype=float32) + array([[0., 1., 2.], + [3., 4., 5.]], dtype=float32) >>> x.flags['C_CONTIGUOUS'] True @@ -802,7 +802,7 @@ def isfortran(a): >>> np.isfortran(a) False - >>> b = np.array([[1, 2, 3], [4, 5, 6]], order='FORTRAN') + >>> b = np.array([[1, 2, 3], [4, 5, 6]], order='F') >>> b array([[1, 2, 3], [4, 5, 6]]) @@ -987,11 +987,11 @@ def correlate(a, v, mode='valid'): Examples -------- >>> np.correlate([1, 2, 3], [0, 1, 0.5]) - array([ 3.5]) + array([3.5]) >>> np.correlate([1, 2, 3], [0, 1, 0.5], "same") - array([ 2. , 3.5, 3. ]) + array([2. , 3.5, 3. ]) >>> np.correlate([1, 2, 3], [0, 1, 0.5], "full") - array([ 0.5, 2. , 3.5, 3. , 0. ]) + array([0.5, 2. , 3.5, 3. , 0. ]) Using complex sequences: @@ -1087,20 +1087,20 @@ def convolve(a, v, mode='full'): before "sliding" the two across one another: >>> np.convolve([1, 2, 3], [0, 1, 0.5]) - array([ 0. , 1. , 2.5, 4. , 1.5]) + array([0. , 1. , 2.5, 4. , 1.5]) Only return the middle values of the convolution. Contains boundary effects, where zeros are taken into account: >>> np.convolve([1,2,3],[0,1,0.5], 'same') - array([ 1. , 2.5, 4. ]) + array([1. , 2.5, 4. ]) The two arrays are of the same length, so there is only one position where they completely overlap: >>> np.convolve([1,2,3],[0,1,0.5], 'valid') - array([ 2.5]) + array([2.5]) """ a, v = array(a, copy=False, ndmin=1), array(v, copy=False, ndmin=1) @@ -1176,11 +1176,11 @@ def outer(a, b, out=None): [-2., -1., 0., 1., 2.]]) >>> im = np.outer(1j*np.linspace(2, -2, 5), np.ones((5,))) >>> im - array([[ 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j], - [ 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j], - [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], - [ 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j], - [ 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]]) + array([[0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j], + [0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j], + [0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], + [0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j], + [0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]]) >>> grid = rl + im >>> grid array([[-2.+2.j, -1.+2.j, 0.+2.j, 1.+2.j, 2.+2.j], @@ -1193,9 +1193,9 @@ def outer(a, b, out=None): >>> x = np.array(['a', 'b', 'c'], dtype=object) >>> np.outer(x, [1, 2, 3]) - array([[a, aa, aaa], - [b, bb, bbb], - [c, cc, ccc]], dtype=object) + array([['a', 'aa', 'aaa'], + ['b', 'bb', 'bbb'], + ['c', 'cc', 'ccc']], dtype=object) """ a = asarray(a) @@ -1264,11 +1264,11 @@ def tensordot(a, b, axes=2): >>> c.shape (5, 2) >>> c - array([[ 4400., 4730.], - [ 4532., 4874.], - [ 4664., 5018.], - [ 4796., 5162.], - [ 4928., 5306.]]) + array([[4400., 4730.], + [4532., 4874.], + [4664., 5018.], + [4796., 5162.], + [4928., 5306.]]) >>> # A slower but equivalent way of computing the same... >>> d = np.zeros((5,2)) >>> for i in range(5): @@ -1294,40 +1294,40 @@ def tensordot(a, b, axes=2): [3, 4]], [[5, 6], [7, 8]]]) - array([[a, b], - [c, d]], dtype=object) + array([['a', 'b'], + ['c', 'd']], dtype=object) >>> np.tensordot(a, A) # third argument default is 2 for double-contraction - array([abbcccdddd, aaaaabbbbbbcccccccdddddddd], dtype=object) + array(['abbcccdddd', 'aaaaabbbbbbcccccccdddddddd'], dtype=object) >>> np.tensordot(a, A, 1) - array([[[acc, bdd], - [aaacccc, bbbdddd]], - [[aaaaacccccc, bbbbbdddddd], - [aaaaaaacccccccc, bbbbbbbdddddddd]]], dtype=object) + array([[['acc', 'bdd'], + ['aaacccc', 'bbbdddd']], + [['aaaaacccccc', 'bbbbbdddddd'], + ['aaaaaaacccccccc', 'bbbbbbbdddddddd']]], dtype=object) >>> np.tensordot(a, A, 0) # tensor product (result too long to incl.) - array([[[[[a, b], - [c, d]], + array([[[[['a', 'b'], + ['c', 'd']], ... >>> np.tensordot(a, A, (0, 1)) - array([[[abbbbb, cddddd], - [aabbbbbb, ccdddddd]], - [[aaabbbbbbb, cccddddddd], - [aaaabbbbbbbb, ccccdddddddd]]], dtype=object) + array([[['abbbbb', 'cddddd'], + ['aabbbbbb', 'ccdddddd']], + [['aaabbbbbbb', 'cccddddddd'], + ['aaaabbbbbbbb', 'ccccdddddddd']]], dtype=object) >>> np.tensordot(a, A, (2, 1)) - array([[[abb, cdd], - [aaabbbb, cccdddd]], - [[aaaaabbbbbb, cccccdddddd], - [aaaaaaabbbbbbbb, cccccccdddddddd]]], dtype=object) + array([[['abb', 'cdd'], + ['aaabbbb', 'cccdddd']], + [['aaaaabbbbbb', 'cccccdddddd'], + ['aaaaaaabbbbbbbb', 'cccccccdddddddd']]], dtype=object) >>> np.tensordot(a, A, ((0, 1), (0, 1))) - array([abbbcccccddddddd, aabbbbccccccdddddddd], dtype=object) + array(['abbbcccccddddddd', 'aabbbbccccccdddddddd'], dtype=object) >>> np.tensordot(a, A, ((2, 1), (1, 0))) - array([acccbbdddd, aaaaacccccccbbbbbbdddddddd], dtype=object) + array(['acccbbdddd', 'aaaaacccccccbbbbbbdddddddd'], dtype=object) """ try: @@ -1780,7 +1780,7 @@ def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None): >>> x = [1,2] >>> y = [4,5] >>> np.cross(x, y) - -3 + array(-3) Multiple vector cross-products. Note that the direction of the cross product vector is defined by the `right-hand rule`. @@ -2097,10 +2097,10 @@ def isscalar(num): NumPy supports PEP 3141 numbers: >>> from fractions import Fraction - >>> isscalar(Fraction(5, 17)) + >>> np.isscalar(Fraction(5, 17)) True >>> from numbers import Number - >>> isscalar(Number()) + >>> np.isscalar(Number()) True """ @@ -2339,9 +2339,9 @@ def identity(n, dtype=None): Examples -------- >>> np.identity(3) - array([[ 1., 0., 0.], - [ 0., 1., 0.], - [ 0., 0., 1.]]) + array([[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]]) """ from numpy import eye @@ -2487,23 +2487,23 @@ def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False): Examples -------- >>> np.isclose([1e10,1e-7], [1.00001e10,1e-8]) - array([True, False]) + array([ True, False]) >>> np.isclose([1e10,1e-8], [1.00001e10,1e-9]) - array([True, True]) + array([ True, True]) >>> np.isclose([1e10,1e-8], [1.0001e10,1e-9]) - array([False, True]) + array([False, True]) >>> np.isclose([1.0, np.nan], [1.0, np.nan]) - array([True, False]) + array([ True, False]) >>> np.isclose([1.0, np.nan], [1.0, np.nan], equal_nan=True) - array([True, True]) + array([ True, True]) >>> np.isclose([1e-8, 1e-7], [0.0, 0.0]) - array([ True, False], dtype=bool) + array([ True, False]) >>> np.isclose([1e-100, 1e-7], [0.0, 0.0], atol=0.0) - array([False, False], dtype=bool) + array([False, False]) >>> np.isclose([1e-10, 1e-10], [1e-20, 0.0]) - array([ True, True], dtype=bool) + array([ True, True]) >>> np.isclose([1e-10, 1e-10], [1e-20, 0.999999e-10], atol=0.0) - array([False, True], dtype=bool) + array([False, True]) """ def within_tol(x, y, atol, rtol): with errstate(invalid='ignore'): @@ -2710,11 +2710,9 @@ def seterr(all=None, divide=None, over=None, under=None, invalid=None): -------- >>> old_settings = np.seterr(all='ignore') #seterr to known value >>> np.seterr(over='raise') - {'over': 'ignore', 'divide': 'ignore', 'invalid': 'ignore', - 'under': 'ignore'} + {'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'} >>> np.seterr(**old_settings) # reset to default - {'over': 'raise', 'divide': 'ignore', 'invalid': 'ignore', - 'under': 'ignore'} + {'divide': 'ignore', 'over': 'raise', 'under': 'ignore', 'invalid': 'ignore'} >>> np.int16(32000) * np.int16(3) 30464 @@ -2724,11 +2722,11 @@ def seterr(all=None, divide=None, over=None, under=None, invalid=None): File "<stdin>", line 1, in <module> FloatingPointError: overflow encountered in short_scalars + >>> from collections import OrderedDict >>> old_settings = np.seterr(all='print') - >>> np.geterr() - {'over': 'print', 'divide': 'print', 'invalid': 'print', 'under': 'print'} + >>> OrderedDict(np.geterr()) + OrderedDict([('divide', 'print'), ('over', 'print'), ('under', 'print'), ('invalid', 'print')]) >>> np.int16(32000) * np.int16(3) - Warning: overflow encountered in short_scalars 30464 """ @@ -2779,18 +2777,17 @@ def geterr(): Examples -------- - >>> np.geterr() - {'over': 'warn', 'divide': 'warn', 'invalid': 'warn', - 'under': 'ignore'} + >>> from collections import OrderedDict + >>> sorted(np.geterr().items()) + [('divide', 'warn'), ('invalid', 'warn'), ('over', 'warn'), ('under', 'ignore')] >>> np.arange(3.) / np.arange(3.) - array([ NaN, 1., 1.]) + array([nan, 1., 1.]) >>> oldsettings = np.seterr(all='warn', over='raise') - >>> np.geterr() - {'over': 'raise', 'divide': 'warn', 'invalid': 'warn', 'under': 'warn'} + >>> OrderedDict(sorted(np.geterr().items())) + OrderedDict([('divide', 'warn'), ('invalid', 'warn'), ('over', 'raise'), ('under', 'warn')]) >>> np.arange(3.) / np.arange(3.) - __main__:1: RuntimeWarning: invalid value encountered in divide - array([ NaN, 1., 1.]) + array([nan, 1., 1.]) """ maskvalue = umath.geterrobj()[1] @@ -2897,15 +2894,16 @@ def seterrcall(func): >>> saved_handler = np.seterrcall(err_handler) >>> save_err = np.seterr(all='call') + >>> from collections import OrderedDict >>> np.array([1, 2, 3]) / 0.0 Floating point error (divide by zero), with flag 1 - array([ Inf, Inf, Inf]) + array([inf, inf, inf]) >>> np.seterrcall(saved_handler) <function err_handler at 0x...> - >>> np.seterr(**save_err) - {'over': 'call', 'divide': 'call', 'invalid': 'call', 'under': 'call'} + >>> OrderedDict(sorted(np.seterr(**save_err).items())) + OrderedDict([('divide', 'call'), ('invalid', 'call'), ('over', 'call'), ('under', 'call')]) Log error message: @@ -2919,14 +2917,13 @@ def seterrcall(func): >>> save_err = np.seterr(all='log') >>> np.array([1, 2, 3]) / 0.0 - LOG: Warning: divide by zero encountered in divide - <BLANKLINE> - array([ Inf, Inf, Inf]) + LOG: Warning: divide by zero encountered in true_divide + array([inf, inf, inf]) >>> np.seterrcall(saved_handler) - <__main__.Log object at 0x...> - >>> np.seterr(**save_err) - {'over': 'log', 'divide': 'log', 'invalid': 'log', 'under': 'log'} + <numpy.core.numeric.Log object at 0x...> + >>> OrderedDict(sorted(np.seterr(**save_err).items())) + OrderedDict([('divide', 'log'), ('invalid', 'log'), ('over', 'log'), ('under', 'log')]) """ if func is not None and not isinstance(func, collections_abc.Callable): @@ -2975,7 +2972,7 @@ def geterrcall(): >>> oldhandler = np.seterrcall(err_handler) >>> np.array([1, 2, 3]) / 0.0 Floating point error (divide by zero), with flag 1 - array([ Inf, Inf, Inf]) + array([inf, inf, inf]) >>> cur_handler = np.geterrcall() >>> cur_handler is err_handler @@ -3023,15 +3020,14 @@ class errstate(object): Examples -------- + >>> from collections import OrderedDict >>> olderr = np.seterr(all='ignore') # Set error handling to known state. >>> np.arange(3) / 0. - array([ NaN, Inf, Inf]) + array([nan, inf, inf]) >>> with np.errstate(divide='warn'): ... np.arange(3) / 0. - ... - __main__:2: RuntimeWarning: divide by zero encountered in divide - array([ NaN, Inf, Inf]) + array([nan, inf, inf]) >>> np.sqrt(-1) nan @@ -3043,9 +3039,8 @@ class errstate(object): Outside the context the error handling behavior has not changed: - >>> np.geterr() - {'over': 'warn', 'divide': 'warn', 'invalid': 'warn', - 'under': 'ignore'} + >>> OrderedDict(sorted(np.geterr().items())) + OrderedDict([('divide', 'ignore'), ('invalid', 'ignore'), ('over', 'ignore'), ('under', 'ignore')]) """ # Note that we don't want to run the above doctests because they will fail diff --git a/numpy/core/numerictypes.py b/numpy/core/numerictypes.py index f00f92286..5bc37b73a 100644 --- a/numpy/core/numerictypes.py +++ b/numpy/core/numerictypes.py @@ -163,19 +163,19 @@ def maximum_sctype(t): Examples -------- >>> np.maximum_sctype(int) - <type 'numpy.int64'> + <class 'numpy.int64'> >>> np.maximum_sctype(np.uint8) - <type 'numpy.uint64'> + <class 'numpy.uint64'> >>> np.maximum_sctype(complex) - <type 'numpy.complex192'> + <class 'numpy.complex256'> # may vary >>> np.maximum_sctype(str) - <type 'numpy.string_'> + <class 'numpy.str_'> >>> np.maximum_sctype('i2') - <type 'numpy.int64'> + <class 'numpy.int64'> >>> np.maximum_sctype('f4') - <type 'numpy.float96'> + <class 'numpy.float128'> # may vary """ g = obj2sctype(t) @@ -260,19 +260,18 @@ def obj2sctype(rep, default=None): Examples -------- >>> np.obj2sctype(np.int32) - <type 'numpy.int32'> + <class 'numpy.int32'> >>> np.obj2sctype(np.array([1., 2.])) - <type 'numpy.float64'> + <class 'numpy.float64'> >>> np.obj2sctype(np.array([1.j])) - <type 'numpy.complex128'> + <class 'numpy.complex128'> >>> np.obj2sctype(dict) - <type 'numpy.object_'> + <class 'numpy.object_'> >>> np.obj2sctype('string') - <type 'numpy.string_'> >>> np.obj2sctype(1, default=list) - <type 'list'> + <class 'list'> """ # prevent abtract classes being upcast @@ -319,7 +318,7 @@ def issubclass_(arg1, arg2): Examples -------- >>> np.issubclass_(np.int32, int) - True + False # True on Python 2.7 >>> np.issubclass_(np.int32, float) False @@ -352,7 +351,7 @@ def issubsctype(arg1, arg2): Examples -------- >>> np.issubsctype('S8', str) - True + False >>> np.issubsctype(np.array([1]), int) True >>> np.issubsctype(np.array([1]), float) @@ -485,9 +484,9 @@ def sctype2char(sctype): Examples -------- - >>> for sctype in [np.int32, float, complex, np.string_, np.ndarray]: + >>> for sctype in [np.int32, np.double, np.complex, np.string_, np.ndarray]: ... print(np.sctype2char(sctype)) - l + l # may vary d D S diff --git a/numpy/core/overrides.py b/numpy/core/overrides.py index 0979858a1..c55174ecd 100644 --- a/numpy/core/overrides.py +++ b/numpy/core/overrides.py @@ -1,73 +1,23 @@ -"""Preliminary implementation of NEP-18 - -TODO: rewrite this in C for performance. -""" +"""Implementation of __array_function__ overrides from NEP-18.""" import collections import functools import os -from numpy.core._multiarray_umath import add_docstring, ndarray +from numpy.core._multiarray_umath import ( + add_docstring, implement_array_function, _get_implementing_args) from numpy.compat._inspect import getargspec -_NDARRAY_ARRAY_FUNCTION = ndarray.__array_function__ -_NDARRAY_ONLY = [ndarray] - ENABLE_ARRAY_FUNCTION = bool( int(os.environ.get('NUMPY_EXPERIMENTAL_ARRAY_FUNCTION', 0))) -def get_overloaded_types_and_args(relevant_args): - """Returns a list of arguments on which to call __array_function__. - - Parameters - ---------- - relevant_args : iterable of array-like - Iterable of array-like arguments to check for __array_function__ - methods. - - Returns - ------- - overloaded_types : collection of types - Types of arguments from relevant_args with __array_function__ methods. - overloaded_args : list - Arguments from relevant_args on which to call __array_function__ - methods, in the order in which they should be called. +add_docstring( + implement_array_function, """ - # Runtime is O(num_arguments * num_unique_types) - overloaded_types = [] - overloaded_args = [] - for arg in relevant_args: - arg_type = type(arg) - # We only collect arguments if they have a unique type, which ensures - # reasonable performance even with a long list of possibly overloaded - # arguments. - if (arg_type not in overloaded_types and - hasattr(arg_type, '__array_function__')): - - # Create lists explicitly for the first type (usually the only one - # done) to avoid setting up the iterator for overloaded_args. - if overloaded_types: - overloaded_types.append(arg_type) - # By default, insert argument at the end, but if it is - # subclass of another argument, insert it before that argument. - # This ensures "subclasses before superclasses". - index = len(overloaded_args) - for i, old_arg in enumerate(overloaded_args): - if issubclass(arg_type, type(old_arg)): - index = i - break - overloaded_args.insert(index, arg) - else: - overloaded_types = [arg_type] - overloaded_args = [arg] - - return overloaded_types, overloaded_args - - -def array_function_implementation_or_override( - implementation, public_api, relevant_args, args, kwargs): - """Implement a function with checks for __array_function__ overrides. + Implement a function with checks for __array_function__ overrides. + + All arguments are required, and can only be passed by position. Arguments --------- @@ -82,41 +32,37 @@ def array_function_implementation_or_override( Iterable of arguments to check for __array_function__ methods. args : tuple Arbitrary positional arguments originally passed into ``public_api``. - kwargs : tuple + kwargs : dict Arbitrary keyword arguments originally passed into ``public_api``. Returns ------- - Result from calling `implementation()` or an `__array_function__` + Result from calling ``implementation()`` or an ``__array_function__`` method, as appropriate. Raises ------ TypeError : if no implementation is found. + """) + + +# exposed for testing purposes; used internally by implement_array_function +add_docstring( + _get_implementing_args, """ - # Check for __array_function__ methods. - types, overloaded_args = get_overloaded_types_and_args(relevant_args) - # Short-cut for common cases: no overload or only ndarray overload - # (directly or with subclasses that do not override __array_function__). - if (not overloaded_args or types == _NDARRAY_ONLY or - all(type(arg).__array_function__ is _NDARRAY_ARRAY_FUNCTION - for arg in overloaded_args)): - return implementation(*args, **kwargs) - - # Call overrides - for overloaded_arg in overloaded_args: - # Use `public_api` instead of `implemenation` so __array_function__ - # implementations can do equality/identity comparisons. - result = overloaded_arg.__array_function__( - public_api, types, args, kwargs) - - if result is not NotImplemented: - return result - - func_name = '{}.{}'.format(public_api.__module__, public_api.__name__) - raise TypeError("no implementation found for '{}' on types that implement " - '__array_function__: {}' - .format(func_name, list(map(type, overloaded_args)))) + Collect arguments on which to call __array_function__. + + Parameters + ---------- + relevant_args : iterable of array-like + Iterable of possibly array-like arguments to check for + __array_function__ methods. + + Returns + ------- + Sequence of arguments with __array_function__ methods, in the order in + which they should be called. + """) ArgSpec = collections.namedtuple('ArgSpec', 'args varargs keywords defaults') @@ -215,7 +161,7 @@ def array_function_dispatch(dispatcher, module=None, verify=True, @functools.wraps(implementation) def public_api(*args, **kwargs): relevant_args = dispatcher(*args, **kwargs) - return array_function_implementation_or_override( + return implement_array_function( implementation, public_api, relevant_args, args, kwargs) if module is not None: diff --git a/numpy/core/records.py b/numpy/core/records.py index 86a43306a..4d18c5712 100644 --- a/numpy/core/records.py +++ b/numpy/core/records.py @@ -7,10 +7,9 @@ Most commonly, ndarrays contain elements of a single type, e.g. floats, integers, bools etc. However, it is possible for elements to be combinations of these using structured types, such as:: - >>> a = np.array([(1, 2.0), (1, 2.0)], dtype=[('x', int), ('y', float)]) + >>> a = np.array([(1, 2.0), (1, 2.0)], dtype=[('x', np.int64), ('y', np.float64)]) >>> a - array([(1, 2.0), (1, 2.0)], - dtype=[('x', '<i4'), ('y', '<f8')]) + array([(1, 2.), (1, 2.)], dtype=[('x', '<i8'), ('y', '<f8')]) Here, each element consists of two fields: x (and int), and y (a float). This is known as a structured array. The different fields are analogous @@ -21,7 +20,7 @@ one would a dictionary:: array([1, 1]) >>> a['y'] - array([ 2., 2.]) + array([2., 2.]) Record arrays allow us to access fields as properties:: @@ -31,7 +30,7 @@ Record arrays allow us to access fields as properties:: array([1, 1]) >>> ar.y - array([ 2., 2.]) + array([2., 2.]) """ from __future__ import division, absolute_import, print_function @@ -39,6 +38,7 @@ from __future__ import division, absolute_import, print_function import sys import os import warnings +from collections import Counter, OrderedDict from . import numeric as sb from . import numerictypes as nt @@ -74,14 +74,25 @@ _byteorderconv = {'b':'>', numfmt = nt.typeDict +# taken from OrderedDict recipes in the Python documentation +# https://docs.python.org/3.3/library/collections.html#ordereddict-examples-and-recipes +class _OrderedCounter(Counter, OrderedDict): + """Counter that remembers the order elements are first encountered""" + + def __repr__(self): + return '%s(%r)' % (self.__class__.__name__, OrderedDict(self)) + + def __reduce__(self): + return self.__class__, (OrderedDict(self),) + + def find_duplicate(list): """Find duplication in a list, return a list of duplicated elements""" - dup = [] - for i in range(len(list)): - if (list[i] in list[i + 1:]): - if (list[i] not in dup): - dup.append(list[i]) - return dup + return [ + item + for item, counts in _OrderedCounter(list).items() + if counts > 1 + ] @set_module('numpy') @@ -128,10 +139,9 @@ class format_parser(object): Examples -------- - >>> np.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'], + >>> np.format_parser(['<f8', '<i4', '<a5'], ['col1', 'col2', 'col3'], ... ['T1', 'T2', 'T3']).dtype - dtype([(('T1', 'col1'), '<f8'), (('T2', 'col2'), '<i4'), - (('T3', 'col3'), '|S5')]) + dtype([(('T1', 'col1'), '<f8'), (('T2', 'col2'), '<i4'), (('T3', 'col3'), 'S5')]) `names` and/or `titles` can be empty lists. If `titles` is an empty list, titles will simply not appear. If `names` is empty, default field names @@ -139,9 +149,9 @@ class format_parser(object): >>> np.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'], ... []).dtype - dtype([('col1', '<f8'), ('col2', '<i4'), ('col3', '|S5')]) - >>> np.format_parser(['f8', 'i4', 'a5'], [], []).dtype - dtype([('f0', '<f8'), ('f1', '<i4'), ('f2', '|S5')]) + dtype([('col1', '<f8'), ('col2', '<i4'), ('col3', '<S5')]) + >>> np.format_parser(['<f8', '<i4', '<a5'], [], []).dtype + dtype([('f0', '<f8'), ('f1', '<i4'), ('f2', 'S5')]) """ @@ -380,20 +390,19 @@ class recarray(ndarray): -------- Create an array with two fields, ``x`` and ``y``: - >>> x = np.array([(1.0, 2), (3.0, 4)], dtype=[('x', float), ('y', int)]) + >>> x = np.array([(1.0, 2), (3.0, 4)], dtype=[('x', '<f8'), ('y', '<i8')]) >>> x - array([(1.0, 2), (3.0, 4)], - dtype=[('x', '<f8'), ('y', '<i4')]) + array([(1., 2), (3., 4)], dtype=[('x', '<f8'), ('y', '<i8')]) >>> x['x'] - array([ 1., 3.]) + array([1., 3.]) View the array as a record array: >>> x = x.view(np.recarray) >>> x.x - array([ 1., 3.]) + array([1., 3.]) >>> x.y array([2, 4]) @@ -580,7 +589,7 @@ def fromarrays(arrayList, dtype=None, shape=None, formats=None, >>> x3=np.array([1.1,2,3,4]) >>> r = np.core.records.fromarrays([x1,x2,x3],names='a,b,c') >>> print(r[1]) - (2, 'dd', 2.0) + (2, 'dd', 2.0) # may vary >>> x1[1]=34 >>> r.a array([1, 2, 3, 4]) @@ -659,11 +668,11 @@ def fromrecords(recList, dtype=None, shape=None, formats=None, names=None, >>> r.col1 array([456, 2]) >>> r.col2 - array(['dbe', 'de'], - dtype='|S3') + array(['dbe', 'de'], dtype='<U3') >>> import pickle - >>> print(pickle.loads(pickle.dumps(r))) - [(456, 'dbe', 1.2) (2, 'de', 1.3)] + >>> pickle.loads(pickle.dumps(r)) + rec.array([(456, 'dbe', 1.2), ( 2, 'de', 1.3)], + dtype=[('col1', '<i8'), ('col2', '<U3'), ('col3', '<f8')]) """ if formats is None and dtype is None: # slower @@ -750,7 +759,7 @@ def fromfile(fd, dtype=None, shape=None, offset=0, formats=None, >>> a = a.newbyteorder('<') >>> a.tofile(fd) >>> - >>> fd.seek(0) + >>> _ = fd.seek(0) >>> r=np.core.records.fromfile(fd, formats='f8,i4,a5', shape=10, ... byteorder='<') >>> print(r[5]) diff --git a/numpy/core/setup.py b/numpy/core/setup.py index 467b590ac..9ccca629e 100644 --- a/numpy/core/setup.py +++ b/numpy/core/setup.py @@ -775,6 +775,7 @@ def configuration(parent_package='',top_path=None): multiarray_deps = [ join('src', 'multiarray', 'arrayobject.h'), join('src', 'multiarray', 'arraytypes.h'), + join('src', 'multiarray', 'arrayfunction_override.h'), join('src', 'multiarray', 'buffer.h'), join('src', 'multiarray', 'calculation.h'), join('src', 'multiarray', 'common.h'), @@ -827,6 +828,7 @@ def configuration(parent_package='',top_path=None): join('src', 'multiarray', 'arraytypes.c.src'), join('src', 'multiarray', 'array_assign_scalar.c'), join('src', 'multiarray', 'array_assign_array.c'), + join('src', 'multiarray', 'arrayfunction_override.c'), join('src', 'multiarray', 'buffer.c'), join('src', 'multiarray', 'calculation.c'), join('src', 'multiarray', 'compiled_base.c'), diff --git a/numpy/core/shape_base.py b/numpy/core/shape_base.py index a529d2ad7..f8332c362 100644 --- a/numpy/core/shape_base.py +++ b/numpy/core/shape_base.py @@ -48,13 +48,13 @@ def atleast_1d(*arys): Examples -------- >>> np.atleast_1d(1.0) - array([ 1.]) + array([1.]) >>> x = np.arange(9.0).reshape(3,3) >>> np.atleast_1d(x) - array([[ 0., 1., 2.], - [ 3., 4., 5.], - [ 6., 7., 8.]]) + array([[0., 1., 2.], + [3., 4., 5.], + [6., 7., 8.]]) >>> np.atleast_1d(x) is x True @@ -106,11 +106,11 @@ def atleast_2d(*arys): Examples -------- >>> np.atleast_2d(3.0) - array([[ 3.]]) + array([[3.]]) >>> x = np.arange(3.0) >>> np.atleast_2d(x) - array([[ 0., 1., 2.]]) + array([[0., 1., 2.]]) >>> np.atleast_2d(x).base is x True @@ -166,7 +166,7 @@ def atleast_3d(*arys): Examples -------- >>> np.atleast_3d(3.0) - array([[[ 3.]]]) + array([[[3.]]]) >>> x = np.arange(3.0) >>> np.atleast_3d(x).shape @@ -179,7 +179,7 @@ def atleast_3d(*arys): True >>> for arr in np.atleast_3d([1, 2], [[1, 2]], [[[1, 2]]]): - ... print(arr, arr.shape) + ... print(arr, arr.shape) # doctest: +SKIP ... [[[1] [2]]] (1, 2, 1) @@ -342,10 +342,11 @@ def hstack(tup): def _stack_dispatcher(arrays, axis=None, out=None): arrays = _arrays_for_stack_dispatcher(arrays, stacklevel=6) - for a in arrays: - yield a if out is not None: - yield out + # optimize for the typical case where only arrays is provided + arrays = list(arrays) + arrays.append(out) + return arrays @array_function_dispatch(_stack_dispatcher) @@ -760,11 +761,11 @@ def block(arrays): ... [A, np.zeros((2, 3))], ... [np.ones((3, 2)), B ] ... ]) - array([[ 2., 0., 0., 0., 0.], - [ 0., 2., 0., 0., 0.], - [ 1., 1., 3., 0., 0.], - [ 1., 1., 0., 3., 0.], - [ 1., 1., 0., 0., 3.]]) + array([[2., 0., 0., 0., 0.], + [0., 2., 0., 0., 0.], + [1., 1., 3., 0., 0.], + [1., 1., 0., 3., 0.], + [1., 1., 0., 0., 3.]]) With a list of depth 1, `block` can be used as `hstack` @@ -774,7 +775,7 @@ def block(arrays): >>> a = np.array([1, 2, 3]) >>> b = np.array([2, 3, 4]) >>> np.block([a, b, 10]) # hstack([a, b, 10]) - array([1, 2, 3, 2, 3, 4, 10]) + array([ 1, 2, 3, 2, 3, 4, 10]) >>> A = np.ones((2, 2), int) >>> B = 2 * A diff --git a/numpy/core/src/common/array_assign.c b/numpy/core/src/common/array_assign.c index ac3fdbef7..02a423e3a 100644 --- a/numpy/core/src/common/array_assign.c +++ b/numpy/core/src/common/array_assign.c @@ -125,9 +125,13 @@ raw_array_is_aligned(int ndim, npy_intp *shape, return npy_is_aligned((void *)align_check, alignment); } - else { + else if (alignment == 1) { return 1; } + else { + /* always return false for alignment == 0, which means cannot-be-aligned */ + return 0; + } } NPY_NO_EXPORT int diff --git a/numpy/core/src/common/array_assign.h b/numpy/core/src/common/array_assign.h index 07438c5e8..69ef56bb4 100644 --- a/numpy/core/src/common/array_assign.h +++ b/numpy/core/src/common/array_assign.h @@ -87,8 +87,10 @@ broadcast_strides(int ndim, npy_intp *shape, /* * Checks whether a data pointer + set of strides refers to a raw - * array whose elements are all aligned to a given alignment. - * alignment should be a power of two. + * array whose elements are all aligned to a given alignment. Returns + * 1 if data is aligned to alignment or 0 if not. + * alignment should be a power of two, or may be the sentinel value 0 to mean + * cannot-be-aligned, in which case 0 (false) is always returned. */ NPY_NO_EXPORT int raw_array_is_aligned(int ndim, npy_intp *shape, diff --git a/numpy/core/src/common/get_attr_string.h b/numpy/core/src/common/get_attr_string.h index bec87c5ed..d458d9550 100644 --- a/numpy/core/src/common/get_attr_string.h +++ b/numpy/core/src/common/get_attr_string.h @@ -103,7 +103,6 @@ PyArray_LookupSpecial(PyObject *obj, char *name) if (_is_basic_python_type(tp)) { return NULL; } - return maybe_get_attr((PyObject *)tp, name); } diff --git a/numpy/core/src/common/ufunc_override.c b/numpy/core/src/common/ufunc_override.c index b67422132..89f08a9cb 100644 --- a/numpy/core/src/common/ufunc_override.c +++ b/numpy/core/src/common/ufunc_override.c @@ -71,7 +71,7 @@ PyUFunc_HasOverride(PyObject * obj) * Get possible out argument from kwds, and returns the number of outputs * contained within it: if a tuple, the number of elements in it, 1 otherwise. * The out argument itself is returned in out_kwd_obj, and the outputs - * in the out_obj array (all as borrowed references). + * in the out_obj array (as borrowed references). * * Returns 0 if no outputs found, -1 if kwds is not a dict (with an error set). */ @@ -79,24 +79,42 @@ NPY_NO_EXPORT int PyUFuncOverride_GetOutObjects(PyObject *kwds, PyObject **out_kwd_obj, PyObject ***out_objs) { if (kwds == NULL) { + Py_INCREF(Py_None); + *out_kwd_obj = Py_None; return 0; } if (!PyDict_CheckExact(kwds)) { PyErr_SetString(PyExc_TypeError, "Internal Numpy error: call to PyUFuncOverride_GetOutObjects " "with non-dict kwds"); + *out_kwd_obj = NULL; return -1; } /* borrowed reference */ *out_kwd_obj = PyDict_GetItemString(kwds, "out"); if (*out_kwd_obj == NULL) { + Py_INCREF(Py_None); + *out_kwd_obj = Py_None; return 0; } if (PyTuple_CheckExact(*out_kwd_obj)) { - *out_objs = PySequence_Fast_ITEMS(*out_kwd_obj); - return PySequence_Fast_GET_SIZE(*out_kwd_obj); + /* + * The C-API recommends calling PySequence_Fast before any of the other + * PySequence_Fast* functions. This is required for PyPy + */ + PyObject *seq; + seq = PySequence_Fast(*out_kwd_obj, + "Could not convert object to sequence"); + if (seq == NULL) { + *out_kwd_obj = NULL; + return -1; + } + *out_objs = PySequence_Fast_ITEMS(seq); + *out_kwd_obj = seq; + return PySequence_Fast_GET_SIZE(seq); } else { + Py_INCREF(*out_kwd_obj); *out_objs = out_kwd_obj; return 1; } diff --git a/numpy/core/src/common/ufunc_override.h b/numpy/core/src/common/ufunc_override.h index cc39166b3..bf86865c9 100644 --- a/numpy/core/src/common/ufunc_override.h +++ b/numpy/core/src/common/ufunc_override.h @@ -28,7 +28,7 @@ PyUFunc_HasOverride(PyObject *obj); * Get possible out argument from kwds, and returns the number of outputs * contained within it: if a tuple, the number of elements in it, 1 otherwise. * The out argument itself is returned in out_kwd_obj, and the outputs - * in the out_obj array (all as borrowed references). + * in the out_obj array (as borrowed references). * * Returns 0 if no outputs found, -1 if kwds is not a dict (with an error set). */ diff --git a/numpy/core/src/multiarray/arrayfunction_override.c b/numpy/core/src/multiarray/arrayfunction_override.c new file mode 100644 index 000000000..e62b32ab2 --- /dev/null +++ b/numpy/core/src/multiarray/arrayfunction_override.c @@ -0,0 +1,376 @@ +#define NPY_NO_DEPRECATED_API NPY_API_VERSION +#define _MULTIARRAYMODULE + +#include "npy_pycompat.h" +#include "get_attr_string.h" +#include "npy_import.h" +#include "multiarraymodule.h" + + +/* Return the ndarray.__array_function__ method. */ +static PyObject * +get_ndarray_array_function(void) +{ + PyObject* method = PyObject_GetAttrString((PyObject *)&PyArray_Type, + "__array_function__"); + assert(method != NULL); + return method; +} + + +/* + * Get an object's __array_function__ method in the fastest way possible. + * Never raises an exception. Returns NULL if the method doesn't exist. + */ +static PyObject * +get_array_function(PyObject *obj) +{ + static PyObject *ndarray_array_function = NULL; + + if (ndarray_array_function == NULL) { + ndarray_array_function = get_ndarray_array_function(); + } + + /* Fast return for ndarray */ + if (PyArray_CheckExact(obj)) { + Py_INCREF(ndarray_array_function); + return ndarray_array_function; + } + + return PyArray_LookupSpecial(obj, "__array_function__"); +} + + +/* + * Like list.insert(), but for C arrays of PyObject*. Skips error checking. + */ +static void +pyobject_array_insert(PyObject **array, int length, int index, PyObject *item) +{ + int j; + + for (j = length; j > index; j--) { + array[j] = array[j - 1]; + } + array[index] = item; +} + + +/* + * Collects arguments with __array_function__ and their corresponding methods + * in the order in which they should be tried (i.e., skipping redundant types). + * `relevant_args` is expected to have been produced by PySequence_Fast. + * Returns the number of arguments, or -1 on failure. + */ +static int +get_implementing_args_and_methods(PyObject *relevant_args, + PyObject **implementing_args, + PyObject **methods) +{ + int num_implementing_args = 0; + Py_ssize_t i; + int j; + + PyObject **items = PySequence_Fast_ITEMS(relevant_args); + Py_ssize_t length = PySequence_Fast_GET_SIZE(relevant_args); + + for (i = 0; i < length; i++) { + int new_class = 1; + PyObject *argument = items[i]; + + /* Have we seen this type before? */ + for (j = 0; j < num_implementing_args; j++) { + if (Py_TYPE(argument) == Py_TYPE(implementing_args[j])) { + new_class = 0; + break; + } + } + if (new_class) { + PyObject *method = get_array_function(argument); + + if (method != NULL) { + int arg_index; + + if (num_implementing_args >= NPY_MAXARGS) { + PyErr_Format( + PyExc_TypeError, + "maximum number (%d) of distinct argument types " \ + "implementing __array_function__ exceeded", + NPY_MAXARGS); + Py_DECREF(method); + goto fail; + } + + /* "subclasses before superclasses, otherwise left to right" */ + arg_index = num_implementing_args; + for (j = 0; j < num_implementing_args; j++) { + PyObject *other_type; + other_type = (PyObject *)Py_TYPE(implementing_args[j]); + if (PyObject_IsInstance(argument, other_type)) { + arg_index = j; + break; + } + } + Py_INCREF(argument); + pyobject_array_insert(implementing_args, num_implementing_args, + arg_index, argument); + pyobject_array_insert(methods, num_implementing_args, + arg_index, method); + ++num_implementing_args; + } + } + } + return num_implementing_args; + +fail: + for (j = 0; j < num_implementing_args; j++) { + Py_DECREF(implementing_args[j]); + Py_DECREF(methods[j]); + } + return -1; +} + + +/* + * Is this object ndarray.__array_function__? + */ +static int +is_default_array_function(PyObject *obj) +{ + static PyObject *ndarray_array_function = NULL; + + if (ndarray_array_function == NULL) { + ndarray_array_function = get_ndarray_array_function(); + } + return obj == ndarray_array_function; +} + + +/* + * Core implementation of ndarray.__array_function__. This is exposed + * separately so we can avoid the overhead of a Python method call from + * within `implement_array_function`. + */ +NPY_NO_EXPORT PyObject * +array_function_method_impl(PyObject *func, PyObject *types, PyObject *args, + PyObject *kwargs) +{ + Py_ssize_t j; + PyObject *implementation, *result; + + PyObject **items = PySequence_Fast_ITEMS(types); + Py_ssize_t length = PySequence_Fast_GET_SIZE(types); + + for (j = 0; j < length; j++) { + int is_subclass = PyObject_IsSubclass( + items[j], (PyObject *)&PyArray_Type); + if (is_subclass == -1) { + return NULL; + } + if (!is_subclass) { + Py_INCREF(Py_NotImplemented); + return Py_NotImplemented; + } + } + + implementation = PyObject_GetAttr(func, npy_ma_str_wrapped); + if (implementation == NULL) { + return NULL; + } + result = PyObject_Call(implementation, args, kwargs); + Py_DECREF(implementation); + return result; +} + + +/* + * Calls __array_function__ on the provided argument, with a fast-path for + * ndarray. + */ +static PyObject * +call_array_function(PyObject* argument, PyObject* method, + PyObject* public_api, PyObject* types, + PyObject* args, PyObject* kwargs) +{ + if (is_default_array_function(method)) { + return array_function_method_impl(public_api, types, args, kwargs); + } + else { + return PyObject_CallFunctionObjArgs( + method, argument, public_api, types, args, kwargs, NULL); + } +} + + +/* + * Implements the __array_function__ protocol for a function, as described in + * in NEP-18. See numpy.core.overrides for a full docstring. + */ +NPY_NO_EXPORT PyObject * +array_implement_array_function( + PyObject *NPY_UNUSED(dummy), PyObject *positional_args) +{ + PyObject *implementation, *public_api, *relevant_args, *args, *kwargs; + + PyObject *types = NULL; + PyObject *implementing_args[NPY_MAXARGS]; + PyObject *array_function_methods[NPY_MAXARGS]; + + int j, any_overrides; + int num_implementing_args = 0; + PyObject *result = NULL; + + static PyObject *errmsg_formatter = NULL; + + if (!PyArg_UnpackTuple( + positional_args, "implement_array_function", 5, 5, + &implementation, &public_api, &relevant_args, &args, &kwargs)) { + return NULL; + } + + relevant_args = PySequence_Fast( + relevant_args, + "dispatcher for __array_function__ did not return an iterable"); + if (relevant_args == NULL) { + return NULL; + } + + /* Collect __array_function__ implementations */ + num_implementing_args = get_implementing_args_and_methods( + relevant_args, implementing_args, array_function_methods); + if (num_implementing_args == -1) { + goto cleanup; + } + + /* + * Handle the typical case of no overrides. This is merely an optimization + * if some arguments are ndarray objects, but is also necessary if no + * arguments implement __array_function__ at all (e.g., if they are all + * built-in types). + */ + any_overrides = 0; + for (j = 0; j < num_implementing_args; j++) { + if (!is_default_array_function(array_function_methods[j])) { + any_overrides = 1; + break; + } + } + if (!any_overrides) { + result = PyObject_Call(implementation, args, kwargs); + goto cleanup; + } + + /* + * Create a Python object for types. + * We use a tuple, because it's the fastest Python collection to create + * and has the bonus of being immutable. + */ + types = PyTuple_New(num_implementing_args); + if (types == NULL) { + goto cleanup; + } + for (j = 0; j < num_implementing_args; j++) { + PyObject *arg_type = (PyObject *)Py_TYPE(implementing_args[j]); + Py_INCREF(arg_type); + PyTuple_SET_ITEM(types, j, arg_type); + } + + /* Call __array_function__ methods */ + for (j = 0; j < num_implementing_args; j++) { + PyObject *argument = implementing_args[j]; + PyObject *method = array_function_methods[j]; + + /* + * We use `public_api` instead of `implementation` here so + * __array_function__ implementations can do equality/identity + * comparisons. + */ + result = call_array_function( + argument, method, public_api, types, args, kwargs); + + if (result == Py_NotImplemented) { + /* Try the next one */ + Py_DECREF(result); + result = NULL; + } + else { + /* Either a good result, or an exception was raised. */ + goto cleanup; + } + } + + /* No acceptable override found, raise TypeError. */ + npy_cache_import("numpy.core._internal", + "array_function_errmsg_formatter", + &errmsg_formatter); + if (errmsg_formatter != NULL) { + PyObject *errmsg = PyObject_CallFunctionObjArgs( + errmsg_formatter, public_api, types, NULL); + if (errmsg != NULL) { + PyErr_SetObject(PyExc_TypeError, errmsg); + Py_DECREF(errmsg); + } + } + +cleanup: + for (j = 0; j < num_implementing_args; j++) { + Py_DECREF(implementing_args[j]); + Py_DECREF(array_function_methods[j]); + } + Py_XDECREF(types); + Py_DECREF(relevant_args); + return result; +} + + +/* + * Python wrapper for get_implementing_args_and_methods, for testing purposes. + */ +NPY_NO_EXPORT PyObject * +array__get_implementing_args( + PyObject *NPY_UNUSED(dummy), PyObject *positional_args) +{ + PyObject *relevant_args; + int j; + int num_implementing_args = 0; + PyObject *implementing_args[NPY_MAXARGS]; + PyObject *array_function_methods[NPY_MAXARGS]; + PyObject *result = NULL; + + if (!PyArg_ParseTuple(positional_args, "O:array__get_implementing_args", + &relevant_args)) { + return NULL; + } + + relevant_args = PySequence_Fast( + relevant_args, + "dispatcher for __array_function__ did not return an iterable"); + if (relevant_args == NULL) { + return NULL; + } + + num_implementing_args = get_implementing_args_and_methods( + relevant_args, implementing_args, array_function_methods); + if (num_implementing_args == -1) { + goto cleanup; + } + + /* create a Python object for implementing_args */ + result = PyList_New(num_implementing_args); + if (result == NULL) { + goto cleanup; + } + for (j = 0; j < num_implementing_args; j++) { + PyObject *argument = implementing_args[j]; + Py_INCREF(argument); + PyList_SET_ITEM(result, j, argument); + } + +cleanup: + for (j = 0; j < num_implementing_args; j++) { + Py_DECREF(implementing_args[j]); + Py_DECREF(array_function_methods[j]); + } + Py_DECREF(relevant_args); + return result; +} diff --git a/numpy/core/src/multiarray/arrayfunction_override.h b/numpy/core/src/multiarray/arrayfunction_override.h new file mode 100644 index 000000000..0d224e2b6 --- /dev/null +++ b/numpy/core/src/multiarray/arrayfunction_override.h @@ -0,0 +1,16 @@ +#ifndef _NPY_PRIVATE__ARRAYFUNCTION_OVERRIDE_H +#define _NPY_PRIVATE__ARRAYFUNCTION_OVERRIDE_H + +NPY_NO_EXPORT PyObject * +array_implement_array_function( + PyObject *NPY_UNUSED(dummy), PyObject *positional_args); + +NPY_NO_EXPORT PyObject * +array__get_implementing_args( + PyObject *NPY_UNUSED(dummy), PyObject *positional_args); + +NPY_NO_EXPORT PyObject * +array_function_method_impl(PyObject *func, PyObject *types, PyObject *args, + PyObject *kwargs); + +#endif diff --git a/numpy/core/src/multiarray/common.c b/numpy/core/src/multiarray/common.c index 3e5221a59..2e51cee7e 100644 --- a/numpy/core/src/multiarray/common.c +++ b/numpy/core/src/multiarray/common.c @@ -440,12 +440,18 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims, return 0; } - /* Recursive case, first check the sequence contains only one type */ + /* + * The C-API recommends calling PySequence_Fast before any of the other + * PySequence_Fast* functions. This is required for PyPy + */ seq = PySequence_Fast(obj, "Could not convert object to sequence"); if (seq == NULL) { goto fail; } + + /* Recursive case, first check the sequence contains only one type */ size = PySequence_Fast_GET_SIZE(seq); + /* objects is borrowed, do not release seq */ objects = PySequence_Fast_ITEMS(seq); common_type = size > 0 ? Py_TYPE(objects[0]) : NULL; for (i = 1; i < size; ++i) { diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c index 23a8dcea2..f77e414da 100644 --- a/numpy/core/src/multiarray/ctors.c +++ b/numpy/core/src/multiarray/ctors.c @@ -2024,7 +2024,7 @@ PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags) newtype = oldtype; Py_INCREF(oldtype); } - if (PyDataType_ISUNSIZED(newtype)) { + else if (PyDataType_ISUNSIZED(newtype)) { PyArray_DESCR_REPLACE(newtype); if (newtype == NULL) { return NULL; diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c index b9be3c09f..3038e4dea 100644 --- a/numpy/core/src/multiarray/descriptor.c +++ b/numpy/core/src/multiarray/descriptor.c @@ -257,6 +257,9 @@ _convert_from_tuple(PyObject *obj, int align) return NULL; } PyArray_DESCR_REPLACE(type); + if (type == NULL) { + return NULL; + } if (type->type_num == NPY_UNICODE) { type->elsize = itemsize << 2; } @@ -1651,6 +1654,9 @@ finish: if (PyDataType_ISUNSIZED(*at) && (*at)->elsize != elsize) { PyArray_DESCR_REPLACE(*at); + if (*at == NULL) { + goto error; + } (*at)->elsize = elsize; } if (endian != '=' && PyArray_ISNBO(endian)) { @@ -1659,6 +1665,9 @@ finish: if (endian != '=' && (*at)->byteorder != '|' && (*at)->byteorder != endian) { PyArray_DESCR_REPLACE(*at); + if (*at == NULL) { + goto error; + } (*at)->byteorder = endian; } return NPY_SUCCEED; diff --git a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src index 159bb4103..896e466c8 100644 --- a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src +++ b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src @@ -121,8 +121,8 @@ static void { #if @is_aligned@ && @elsize@ != 16 /* sanity check */ - assert(npy_is_aligned(dst, _ALIGN(@type@))); - assert(npy_is_aligned(src, _ALIGN(@type@))); + assert(N == 0 || npy_is_aligned(dst, _ALIGN(@type@))); + assert(N == 0 || npy_is_aligned(src, _ALIGN(@type@))); #endif /*printf("fn @prefix@_@oper@_size@elsize@\n");*/ while (N > 0) { @@ -201,8 +201,8 @@ static NPY_GCC_OPT_3 void } #if @is_aligned@ && @elsize@ != 16 /* sanity check */ - assert(npy_is_aligned(dst, _ALIGN(@type@))); - assert(npy_is_aligned(src, _ALIGN(@type@))); + assert(N == 0 || npy_is_aligned(dst, _ALIGN(@type@))); + assert(N == 0 || npy_is_aligned(src, _ALIGN(@type@))); #endif #if @elsize@ == 1 && @dst_contig@ memset(dst, *src, N); @@ -809,10 +809,10 @@ static NPY_GCC_OPT_3 void #if @aligned@ /* sanity check */ # if !@is_complex1@ - assert(npy_is_aligned(src, _ALIGN(_TYPE1))); + assert(N == 0 || npy_is_aligned(src, _ALIGN(_TYPE1))); # endif # if !@is_complex2@ - assert(npy_is_aligned(dst, _ALIGN(_TYPE2))); + assert(N == 0 || npy_is_aligned(dst, _ALIGN(_TYPE2))); # endif #endif diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c index 231bd86dc..085bc00c0 100644 --- a/numpy/core/src/multiarray/methods.c +++ b/numpy/core/src/multiarray/methods.c @@ -8,6 +8,7 @@ #include "numpy/arrayobject.h" #include "numpy/arrayscalars.h" +#include "arrayfunction_override.h" #include "npy_config.h" #include "npy_pycompat.h" #include "npy_import.h" @@ -187,7 +188,7 @@ array_reshape(PyArrayObject *self, PyObject *args, PyObject *kwds) } if (n <= 1) { - if (PyTuple_GET_ITEM(args, 0) == Py_None) { + if (n != 0 && PyTuple_GET_ITEM(args, 0) == Py_None) { return PyArray_View(self, NULL, NULL); } if (!PyArg_ParseTuple(args, "O&:reshape", PyArray_IntpConverter, @@ -1003,6 +1004,7 @@ any_array_ufunc_overrides(PyObject *args, PyObject *kwds) int i; int nin, nout; PyObject *out_kwd_obj; + PyObject *fast; PyObject **in_objs, **out_objs; /* check inputs */ @@ -1010,12 +1012,18 @@ any_array_ufunc_overrides(PyObject *args, PyObject *kwds) if (nin < 0) { return -1; } - in_objs = PySequence_Fast_ITEMS(args); + fast = PySequence_Fast(args, "Could not convert object to sequence"); + if (fast == NULL) { + return -1; + } + in_objs = PySequence_Fast_ITEMS(fast); for (i = 0; i < nin; ++i) { if (PyUFunc_HasOverride(in_objs[i])) { + Py_DECREF(fast); return 1; } } + Py_DECREF(fast); /* check outputs, if any */ nout = PyUFuncOverride_GetOutObjects(kwds, &out_kwd_obj, &out_objs); if (nout < 0) { @@ -1023,9 +1031,11 @@ any_array_ufunc_overrides(PyObject *args, PyObject *kwds) } for (i = 0; i < nout; i++) { if (PyUFunc_HasOverride(out_objs[i])) { + Py_DECREF(out_kwd_obj); return 1; } } + Py_DECREF(out_kwd_obj); return 0; } @@ -1079,13 +1089,29 @@ cleanup: return result; } - static PyObject * -array_function(PyArrayObject *self, PyObject *args, PyObject *kwds) +array_function(PyArrayObject *self, PyObject *c_args, PyObject *c_kwds) { - NPY_FORWARD_NDARRAY_METHOD("_array_function"); -} + PyObject *func, *types, *args, *kwargs, *result; + static char *kwlist[] = {"func", "types", "args", "kwargs", NULL}; + + if (!PyArg_ParseTupleAndKeywords( + c_args, c_kwds, "OOOO:__array_function__", kwlist, + &func, &types, &args, &kwargs)) { + return NULL; + } + + types = PySequence_Fast( + types, + "types argument to ndarray.__array_function__ must be iterable"); + if (types == NULL) { + return NULL; + } + result = array_function_method_impl(func, types, args, kwargs); + Py_DECREF(types); + return result; +} static PyObject * array_copy(PyArrayObject *self, PyObject *args, PyObject *kwds) diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c index ce8af4392..62345d2b0 100644 --- a/numpy/core/src/multiarray/multiarraymodule.c +++ b/numpy/core/src/multiarray/multiarraymodule.c @@ -34,6 +34,7 @@ NPY_NO_EXPORT int NPY_NUMUSERTYPES = 0; /* Internal APIs */ +#include "arrayfunction_override.h" #include "arraytypes.h" #include "arrayobject.h" #include "hashdescr.h" @@ -982,7 +983,7 @@ PyArray_MatrixProduct2(PyObject *op1, PyObject *op2, PyArrayObject* out) for (i = 0; i < PyArray_NDIM(ap2) - 2; i++) { dimensions[j++] = PyArray_DIMS(ap2)[i]; } - if(PyArray_NDIM(ap2) > 1) { + if (PyArray_NDIM(ap2) > 1) { dimensions[j++] = PyArray_DIMS(ap2)[PyArray_NDIM(ap2)-1]; } @@ -1318,7 +1319,7 @@ PyArray_Correlate2(PyObject *op1, PyObject *op2, int mode) */ if (inverted) { st = _pyarray_revert(ret); - if(st) { + if (st) { goto clean_ret; } } @@ -1365,7 +1366,7 @@ PyArray_Correlate(PyObject *op1, PyObject *op2, int mode) } ret = _pyarray_correlate(ap1, ap2, typenum, mode, &unused); - if(ret == NULL) { + if (ret == NULL) { goto fail; } Py_DECREF(ap1); @@ -1654,7 +1655,7 @@ _array_fromobject(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws) } full_path: - if(!PyArg_ParseTupleAndKeywords(args, kws, "O|O&O&O&O&i:array", kwd, + if (!PyArg_ParseTupleAndKeywords(args, kws, "O|O&O&O&O&i:array", kwd, &op, PyArray_DescrConverter2, &type, PyArray_BoolConverter, ©, @@ -2489,7 +2490,7 @@ einsum_sub_op_from_lists(PyObject *args, "operand and a subscripts list to einsum"); return -1; } - else if(nop >= NPY_MAXARGS) { + else if (nop >= NPY_MAXARGS) { PyErr_SetString(PyExc_ValueError, "too many operands"); return -1; } @@ -2724,7 +2725,7 @@ array_arange(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws) { static char *kwd[]= {"start", "stop", "step", "dtype", NULL}; PyArray_Descr *typecode = NULL; - if(!PyArg_ParseTupleAndKeywords(args, kws, "O|OOO&:arange", kwd, + if (!PyArg_ParseTupleAndKeywords(args, kws, "O|OOO&:arange", kwd, &o_start, &o_stop, &o_step, @@ -2762,7 +2763,7 @@ array__get_ndarray_c_version(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObje { static char *kwlist[] = {NULL}; - if(!PyArg_ParseTupleAndKeywords(args, kwds, "", kwlist )) { + if (!PyArg_ParseTupleAndKeywords(args, kwds, "", kwlist )) { return NULL; } return PyInt_FromLong( (long) PyArray_GetNDArrayCVersion() ); @@ -2835,7 +2836,7 @@ array_set_string_function(PyObject *NPY_UNUSED(self), PyObject *args, int repr = 1; static char *kwlist[] = {"f", "repr", NULL}; - if(!PyArg_ParseTupleAndKeywords(args, kwds, "|Oi:set_string_function", kwlist, &op, &repr)) { + if (!PyArg_ParseTupleAndKeywords(args, kwds, "|Oi:set_string_function", kwlist, &op, &repr)) { return NULL; } /* reset the array_repr function to built-in */ @@ -3145,7 +3146,7 @@ array_promote_types(PyObject *NPY_UNUSED(dummy), PyObject *args) PyArray_Descr *d1 = NULL; PyArray_Descr *d2 = NULL; PyObject *ret = NULL; - if(!PyArg_ParseTuple(args, "O&O&:promote_types", + if (!PyArg_ParseTuple(args, "O&O&:promote_types", PyArray_DescrConverter2, &d1, PyArray_DescrConverter2, &d2)) { goto finish; } @@ -3171,7 +3172,7 @@ array_min_scalar_type(PyObject *NPY_UNUSED(dummy), PyObject *args) PyArrayObject *array; PyObject *ret = NULL; - if(!PyArg_ParseTuple(args, "O:min_scalar_type", &array_in)) { + if (!PyArg_ParseTuple(args, "O:min_scalar_type", &array_in)) { return NULL; } @@ -3248,7 +3249,7 @@ array_datetime_data(PyObject *NPY_UNUSED(dummy), PyObject *args) PyArray_Descr *dtype; PyArray_DatetimeMetaData *meta; - if(!PyArg_ParseTuple(args, "O&:datetime_data", + if (!PyArg_ParseTuple(args, "O&:datetime_data", PyArray_DescrConverter, &dtype)) { return NULL; } @@ -3267,7 +3268,7 @@ new_buffer(PyObject *NPY_UNUSED(dummy), PyObject *args) { int size; - if(!PyArg_ParseTuple(args, "i:buffer", &size)) { + if (!PyArg_ParseTuple(args, "i:buffer", &size)) { return NULL; } return PyBuffer_New(size); @@ -4062,6 +4063,9 @@ normalize_axis_index(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds) } static struct PyMethodDef array_module_methods[] = { + {"_get_implementing_args", + (PyCFunction)array__get_implementing_args, + METH_VARARGS, NULL}, {"_get_ndarray_c_version", (PyCFunction)array__get_ndarray_c_version, METH_VARARGS|METH_KEYWORDS, NULL}, @@ -4224,6 +4228,9 @@ static struct PyMethodDef array_module_methods[] = { METH_VARARGS | METH_KEYWORDS, NULL}, {"_monotonicity", (PyCFunction)arr__monotonicity, METH_VARARGS | METH_KEYWORDS, NULL}, + {"implement_array_function", + (PyCFunction)array_implement_array_function, + METH_VARARGS, NULL}, {"interp", (PyCFunction)arr_interp, METH_VARARGS | METH_KEYWORDS, NULL}, {"interp_complex", (PyCFunction)arr_interp_complex, @@ -4476,6 +4483,7 @@ NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array_wrap = NULL; NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array_finalize = NULL; NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_buffer = NULL; NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_ufunc = NULL; +NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_wrapped = NULL; NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_order = NULL; NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_copy = NULL; NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_dtype = NULL; @@ -4492,6 +4500,7 @@ intern_strings(void) npy_ma_str_array_finalize = PyUString_InternFromString("__array_finalize__"); npy_ma_str_buffer = PyUString_InternFromString("__buffer__"); npy_ma_str_ufunc = PyUString_InternFromString("__array_ufunc__"); + npy_ma_str_wrapped = PyUString_InternFromString("__wrapped__"); npy_ma_str_order = PyUString_InternFromString("order"); npy_ma_str_copy = PyUString_InternFromString("copy"); npy_ma_str_dtype = PyUString_InternFromString("dtype"); @@ -4501,7 +4510,7 @@ intern_strings(void) return npy_ma_str_array && npy_ma_str_array_prepare && npy_ma_str_array_wrap && npy_ma_str_array_finalize && - npy_ma_str_buffer && npy_ma_str_ufunc && + npy_ma_str_buffer && npy_ma_str_ufunc && npy_ma_str_wrapped && npy_ma_str_order && npy_ma_str_copy && npy_ma_str_dtype && npy_ma_str_ndmin && npy_ma_str_axis1 && npy_ma_str_axis2; } @@ -4570,6 +4579,10 @@ PyMODINIT_FUNC init_multiarray_umath(void) { */ PyArray_Type.tp_hash = PyObject_HashNotImplemented; + if (PyType_Ready(&PyUFunc_Type) < 0) { + goto err; + } + /* Load the ufunc operators into the array module's namespace */ if (InitOperators(d) < 0) { goto err; @@ -4580,8 +4593,9 @@ PyMODINIT_FUNC init_multiarray_umath(void) { } initialize_casting_tables(); initialize_numeric_types(); - if(initscalarmath(m) < 0) + if (initscalarmath(m) < 0) { goto err; + } if (PyType_Ready(&PyArray_Type) < 0) { goto err; diff --git a/numpy/core/src/multiarray/multiarraymodule.h b/numpy/core/src/multiarray/multiarraymodule.h index 3de68c549..60a3965c9 100644 --- a/numpy/core/src/multiarray/multiarraymodule.h +++ b/numpy/core/src/multiarray/multiarraymodule.h @@ -7,6 +7,7 @@ NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_wrap; NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_finalize; NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_buffer; NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_ufunc; +NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_wrapped; NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_order; NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_copy; NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_dtype; diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src index 611bd02a5..53afa817b 100644 --- a/numpy/core/src/umath/loops.c.src +++ b/numpy/core/src/umath/loops.c.src @@ -1886,7 +1886,8 @@ NPY_NO_EXPORT void if (!run_unary_reduce_simd_@kind@_@TYPE@(args, dimensions, steps)) { BINARY_REDUCE_LOOP(@type@) { const @type@ in2 = *(@type@ *)ip2; - io1 = (npy_isnan(io1) || io1 @OP@ in2) ? io1 : in2; + /* Order of operations important for MSVC 2015 */ + io1 = (io1 @OP@ in2 || npy_isnan(io1)) ? io1 : in2; } *((@type@ *)iop1) = io1; } @@ -1895,7 +1896,8 @@ NPY_NO_EXPORT void BINARY_LOOP { @type@ in1 = *(@type@ *)ip1; const @type@ in2 = *(@type@ *)ip2; - in1 = (npy_isnan(in1) || in1 @OP@ in2) ? in1 : in2; + /* Order of operations important for MSVC 2015 */ + in1 = (in1 @OP@ in2 || npy_isnan(in1)) ? in1 : in2; *((@type@ *)op1) = in1; } } @@ -1914,7 +1916,8 @@ NPY_NO_EXPORT void if (IS_BINARY_REDUCE) { BINARY_REDUCE_LOOP(@type@) { const @type@ in2 = *(@type@ *)ip2; - io1 = (npy_isnan(in2) || io1 @OP@ in2) ? io1 : in2; + /* Order of operations important for MSVC 2015 */ + io1 = (io1 @OP@ in2 || npy_isnan(in2)) ? io1 : in2; } *((@type@ *)iop1) = io1; } @@ -1922,7 +1925,8 @@ NPY_NO_EXPORT void BINARY_LOOP { const @type@ in1 = *(@type@ *)ip1; const @type@ in2 = *(@type@ *)ip2; - *((@type@ *)op1) = (npy_isnan(in2) || in1 @OP@ in2) ? in1 : in2; + /* Order of operations important for MSVC 2015 */ + *((@type@ *)op1) = (in1 @OP@ in2 || npy_isnan(in2)) ? in1 : in2; } } npy_clear_floatstatus_barrier((char*)dimensions); diff --git a/numpy/core/src/umath/override.c b/numpy/core/src/umath/override.c index c56f43fa2..2ea23311b 100644 --- a/numpy/core/src/umath/override.c +++ b/numpy/core/src/umath/override.c @@ -86,6 +86,7 @@ get_array_ufunc_overrides(PyObject *args, PyObject *kwds, ++num_override_args; } } + Py_DECREF(out_kwd_obj); return num_override_args; fail: @@ -93,6 +94,7 @@ fail: Py_DECREF(with_override[i]); Py_DECREF(methods[i]); } + Py_DECREF(out_kwd_obj); return -1; } diff --git a/numpy/core/src/umath/simd.inc.src b/numpy/core/src/umath/simd.inc.src index a3e00b5c1..4bb8569be 100644 --- a/numpy/core/src/umath/simd.inc.src +++ b/numpy/core/src/umath/simd.inc.src @@ -32,13 +32,7 @@ #include <float.h> #include <string.h> /* for memcpy */ -#if defined __AVX512F__ -#define VECTOR_SIZE_BYTES 64 -#elif defined __AVX2__ -#define VECTOR_SIZE_BYTES 32 -#else #define VECTOR_SIZE_BYTES 16 -#endif static NPY_INLINE npy_uintp abs_ptrdiff(char *a, char *b) @@ -190,17 +184,24 @@ run_binary_simd_@kind@_@TYPE@(char **args, npy_intp *dimensions, npy_intp *steps @type@ * ip2 = (@type@ *)args[1]; @type@ * op = (@type@ *)args[2]; npy_intp n = dimensions[0]; +#if defined __AVX512F__ + const npy_intp vector_size_bytes = 64; +#elif defined __AVX2__ + const npy_intp vector_size_bytes = 32; +#else + const npy_intp vector_size_bytes = 32; +#endif /* argument one scalar */ - if (IS_BLOCKABLE_BINARY_SCALAR1(sizeof(@type@), VECTOR_SIZE_BYTES)) { + if (IS_BLOCKABLE_BINARY_SCALAR1(sizeof(@type@), vector_size_bytes)) { sse2_binary_scalar1_@kind@_@TYPE@(op, ip1, ip2, n); return 1; } /* argument two scalar */ - else if (IS_BLOCKABLE_BINARY_SCALAR2(sizeof(@type@), VECTOR_SIZE_BYTES)) { + else if (IS_BLOCKABLE_BINARY_SCALAR2(sizeof(@type@), vector_size_bytes)) { sse2_binary_scalar2_@kind@_@TYPE@(op, ip1, ip2, n); return 1; } - else if (IS_BLOCKABLE_BINARY(sizeof(@type@), VECTOR_SIZE_BYTES)) { + else if (IS_BLOCKABLE_BINARY(sizeof(@type@), vector_size_bytes)) { sse2_binary_@kind@_@TYPE@(op, ip1, ip2, n); return 1; } @@ -427,19 +428,20 @@ static void sse2_binary_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n) { #ifdef __AVX512F__ - LOOP_BLOCK_ALIGN_VAR(op, @type@, VECTOR_SIZE_BYTES) + const npy_intp vector_size_bytes = 64; + LOOP_BLOCK_ALIGN_VAR(op, @type@, vector_size_bytes) op[i] = ip1[i] @OP@ ip2[i]; /* lots of specializations, to squeeze out max performance */ - if (npy_is_aligned(&ip1[i], VECTOR_SIZE_BYTES) && npy_is_aligned(&ip2[i], VECTOR_SIZE_BYTES)) { + if (npy_is_aligned(&ip1[i], vector_size_bytes) && npy_is_aligned(&ip2[i], vector_size_bytes)) { if (ip1 == ip2) { - LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) { + LOOP_BLOCKED(@type@, vector_size_bytes) { @vtype512@ a = @vpre512@_load_@vsuf@(&ip1[i]); @vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, a); @vpre512@_store_@vsuf@(&op[i], c); } } else { - LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) { + LOOP_BLOCKED(@type@, vector_size_bytes) { @vtype512@ a = @vpre512@_load_@vsuf@(&ip1[i]); @vtype512@ b = @vpre512@_load_@vsuf@(&ip2[i]); @vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b); @@ -447,16 +449,16 @@ sse2_binary_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n) } } } - else if (npy_is_aligned(&ip1[i], VECTOR_SIZE_BYTES)) { - LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) { + else if (npy_is_aligned(&ip1[i], vector_size_bytes)) { + LOOP_BLOCKED(@type@, vector_size_bytes) { @vtype512@ a = @vpre512@_load_@vsuf@(&ip1[i]); @vtype512@ b = @vpre512@_loadu_@vsuf@(&ip2[i]); @vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b); @vpre512@_store_@vsuf@(&op[i], c); } } - else if (npy_is_aligned(&ip2[i], VECTOR_SIZE_BYTES)) { - LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) { + else if (npy_is_aligned(&ip2[i], vector_size_bytes)) { + LOOP_BLOCKED(@type@, vector_size_bytes) { @vtype512@ a = @vpre512@_loadu_@vsuf@(&ip1[i]); @vtype512@ b = @vpre512@_load_@vsuf@(&ip2[i]); @vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b); @@ -465,14 +467,14 @@ sse2_binary_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n) } else { if (ip1 == ip2) { - LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) { + LOOP_BLOCKED(@type@, vector_size_bytes) { @vtype512@ a = @vpre512@_loadu_@vsuf@(&ip1[i]); @vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, a); @vpre512@_store_@vsuf@(&op[i], c); } } else { - LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) { + LOOP_BLOCKED(@type@, vector_size_bytes) { @vtype512@ a = @vpre512@_loadu_@vsuf@(&ip1[i]); @vtype512@ b = @vpre512@_loadu_@vsuf@(&ip2[i]); @vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b); @@ -481,20 +483,21 @@ sse2_binary_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n) } } #elif __AVX2__ - LOOP_BLOCK_ALIGN_VAR(op, @type@, VECTOR_SIZE_BYTES) + const npy_intp vector_size_bytes = 32; + LOOP_BLOCK_ALIGN_VAR(op, @type@, vector_size_bytes) op[i] = ip1[i] @OP@ ip2[i]; /* lots of specializations, to squeeze out max performance */ - if (npy_is_aligned(&ip1[i], VECTOR_SIZE_BYTES) && - npy_is_aligned(&ip2[i], VECTOR_SIZE_BYTES)) { + if (npy_is_aligned(&ip1[i], vector_size_bytes) && + npy_is_aligned(&ip2[i], vector_size_bytes)) { if (ip1 == ip2) { - LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) { + LOOP_BLOCKED(@type@, vector_size_bytes) { @vtype256@ a = @vpre256@_load_@vsuf@(&ip1[i]); @vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, a); @vpre256@_store_@vsuf@(&op[i], c); } } else { - LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) { + LOOP_BLOCKED(@type@, vector_size_bytes) { @vtype256@ a = @vpre256@_load_@vsuf@(&ip1[i]); @vtype256@ b = @vpre256@_load_@vsuf@(&ip2[i]); @vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b); @@ -502,16 +505,16 @@ sse2_binary_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n) } } } - else if (npy_is_aligned(&ip1[i], VECTOR_SIZE_BYTES)) { - LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) { + else if (npy_is_aligned(&ip1[i], vector_size_bytes)) { + LOOP_BLOCKED(@type@, vector_size_bytes) { @vtype256@ a = @vpre256@_load_@vsuf@(&ip1[i]); @vtype256@ b = @vpre256@_loadu_@vsuf@(&ip2[i]); @vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b); @vpre256@_store_@vsuf@(&op[i], c); } } - else if (npy_is_aligned(&ip2[i], VECTOR_SIZE_BYTES)) { - LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) { + else if (npy_is_aligned(&ip2[i], vector_size_bytes)) { + LOOP_BLOCKED(@type@, vector_size_bytes) { @vtype256@ a = @vpre256@_loadu_@vsuf@(&ip1[i]); @vtype256@ b = @vpre256@_load_@vsuf@(&ip2[i]); @vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b); @@ -520,14 +523,14 @@ sse2_binary_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n) } else { if (ip1 == ip2) { - LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) { + LOOP_BLOCKED(@type@, vector_size_bytes) { @vtype256@ a = @vpre256@_loadu_@vsuf@(&ip1[i]); @vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, a); @vpre256@_store_@vsuf@(&op[i], c); } } else { - LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) { + LOOP_BLOCKED(@type@, vector_size_bytes) { @vtype256@ a = @vpre256@_loadu_@vsuf@(&ip1[i]); @vtype256@ b = @vpre256@_loadu_@vsuf@(&ip2[i]); @vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b); @@ -601,18 +604,19 @@ static void sse2_binary_scalar1_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n) { #ifdef __AVX512F__ + const npy_intp vector_size_bytes = 64; const @vtype512@ a = @vpre512@_set1_@vsuf@(ip1[0]); - LOOP_BLOCK_ALIGN_VAR(op, @type@, VECTOR_SIZE_BYTES) + LOOP_BLOCK_ALIGN_VAR(op, @type@, vector_size_bytes) op[i] = ip1[0] @OP@ ip2[i]; - if (npy_is_aligned(&ip2[i], VECTOR_SIZE_BYTES)) { - LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) { + if (npy_is_aligned(&ip2[i], vector_size_bytes)) { + LOOP_BLOCKED(@type@, vector_size_bytes) { @vtype512@ b = @vpre512@_load_@vsuf@(&ip2[i]); @vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b); @vpre512@_store_@vsuf@(&op[i], c); } } else { - LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) { + LOOP_BLOCKED(@type@, vector_size_bytes) { @vtype512@ b = @vpre512@_loadu_@vsuf@(&ip2[i]); @vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b); @vpre512@_store_@vsuf@(&op[i], c); @@ -621,18 +625,19 @@ sse2_binary_scalar1_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_i #elif __AVX2__ + const npy_intp vector_size_bytes = 32; const @vtype256@ a = @vpre256@_set1_@vsuf@(ip1[0]); - LOOP_BLOCK_ALIGN_VAR(op, @type@, VECTOR_SIZE_BYTES) + LOOP_BLOCK_ALIGN_VAR(op, @type@, vector_size_bytes) op[i] = ip1[0] @OP@ ip2[i]; - if (npy_is_aligned(&ip2[i], VECTOR_SIZE_BYTES)) { - LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) { + if (npy_is_aligned(&ip2[i], vector_size_bytes)) { + LOOP_BLOCKED(@type@, vector_size_bytes) { @vtype256@ b = @vpre256@_load_@vsuf@(&ip2[i]); @vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b); @vpre256@_store_@vsuf@(&op[i], c); } } else { - LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) { + LOOP_BLOCKED(@type@, vector_size_bytes) { @vtype256@ b = @vpre256@_loadu_@vsuf@(&ip2[i]); @vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b); @vpre256@_store_@vsuf@(&op[i], c); @@ -667,18 +672,19 @@ static void sse2_binary_scalar2_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n) { #ifdef __AVX512F__ + const npy_intp vector_size_bytes = 64; const @vtype512@ b = @vpre512@_set1_@vsuf@(ip2[0]); - LOOP_BLOCK_ALIGN_VAR(op, @type@, VECTOR_SIZE_BYTES) + LOOP_BLOCK_ALIGN_VAR(op, @type@, vector_size_bytes) op[i] = ip1[i] @OP@ ip2[0]; - if (npy_is_aligned(&ip1[i], VECTOR_SIZE_BYTES)) { - LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) { + if (npy_is_aligned(&ip1[i], vector_size_bytes)) { + LOOP_BLOCKED(@type@, vector_size_bytes) { @vtype512@ a = @vpre512@_load_@vsuf@(&ip1[i]); @vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b); @vpre512@_store_@vsuf@(&op[i], c); } } else { - LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) { + LOOP_BLOCKED(@type@, vector_size_bytes) { @vtype512@ a = @vpre512@_loadu_@vsuf@(&ip1[i]); @vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b); @vpre512@_store_@vsuf@(&op[i], c); @@ -686,18 +692,19 @@ sse2_binary_scalar2_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_i } #elif __AVX2__ + const npy_intp vector_size_bytes = 32; const @vtype256@ b = @vpre256@_set1_@vsuf@(ip2[0]); - LOOP_BLOCK_ALIGN_VAR(op, @type@, VECTOR_SIZE_BYTES) + LOOP_BLOCK_ALIGN_VAR(op, @type@, vector_size_bytes) op[i] = ip1[i] @OP@ ip2[0]; - if (npy_is_aligned(&ip1[i], VECTOR_SIZE_BYTES)) { - LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) { + if (npy_is_aligned(&ip1[i], vector_size_bytes)) { + LOOP_BLOCKED(@type@, vector_size_bytes) { @vtype256@ a = @vpre256@_load_@vsuf@(&ip1[i]); @vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b); @vpre256@_store_@vsuf@(&op[i], c); } } else { - LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) { + LOOP_BLOCKED(@type@, vector_size_bytes) { @vtype256@ a = @vpre256@_loadu_@vsuf@(&ip1[i]); @vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b); @vpre256@_store_@vsuf@(&op[i], c); @@ -1029,7 +1036,8 @@ sse2_@kind@_@TYPE@(@type@ * ip, @type@ * op, const npy_intp n) { const npy_intp stride = VECTOR_SIZE_BYTES / (npy_intp)sizeof(@type@); LOOP_BLOCK_ALIGN_VAR(ip, @type@, VECTOR_SIZE_BYTES) { - *op = (npy_isnan(*op) || *op @OP@ ip[i]) ? *op : ip[i]; + /* Order of operations important for MSVC 2015 */ + *op = (*op @OP@ ip[i] || npy_isnan(*op)) ? *op : ip[i]; } assert(n < (stride) || npy_is_aligned(&ip[i], VECTOR_SIZE_BYTES)); if (i + 3 * stride <= n) { @@ -1053,11 +1061,13 @@ sse2_@kind@_@TYPE@(@type@ * ip, @type@ * op, const npy_intp n) } else { @type@ tmp = sse2_horizontal_@VOP@_@vtype@(c1); - *op = (npy_isnan(*op) || *op @OP@ tmp) ? *op : tmp; + /* Order of operations important for MSVC 2015 */ + *op = (*op @OP@ tmp || npy_isnan(*op)) ? *op : tmp; } } LOOP_BLOCKED_END { - *op = (npy_isnan(*op) || *op @OP@ ip[i]) ? *op : ip[i]; + /* Order of operations important for MSVC 2015 */ + *op = (*op @OP@ ip[i] || npy_isnan(*op)) ? *op : ip[i]; } npy_clear_floatstatus_barrier((char*)op); } diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c index ea0007a9d..5ef134ac1 100644 --- a/numpy/core/src/umath/ufunc_object.c +++ b/numpy/core/src/umath/ufunc_object.c @@ -5279,6 +5279,8 @@ ufunc_dealloc(PyUFuncObject *ufunc) { PyArray_free(ufunc->core_num_dims); PyArray_free(ufunc->core_dim_ixs); + PyArray_free(ufunc->core_dim_sizes); + PyArray_free(ufunc->core_dim_flags); PyArray_free(ufunc->core_offsets); PyArray_free(ufunc->core_signature); PyArray_free(ufunc->ptr); diff --git a/numpy/core/src/umath/umathmodule.c b/numpy/core/src/umath/umathmodule.c index 8277ad6cc..5de19fec2 100644 --- a/numpy/core/src/umath/umathmodule.c +++ b/numpy/core/src/umath/umathmodule.c @@ -268,10 +268,6 @@ int initumath(PyObject *m) UFUNC_FLOATING_POINT_SUPPORT = 0; #endif - /* Initialize the types */ - if (PyType_Ready(&PyUFunc_Type) < 0) - return -1; - /* Add some symbolic constants to the module */ d = PyModule_GetDict(m); diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py index cdacdabbe..91f4526bd 100644 --- a/numpy/core/tests/test_multiarray.py +++ b/numpy/core/tests/test_multiarray.py @@ -7626,6 +7626,55 @@ class TestCTypes(object): finally: _internal.ctypes = ctypes + def _make_readonly(x): + x.flags.writeable = False + return x + + @pytest.mark.parametrize('arr', [ + np.array([1, 2, 3]), + np.array([['one', 'two'], ['three', 'four']]), + np.array((1, 2), dtype='i4,i4'), + np.zeros((2,), dtype= + np.dtype(dict( + formats=['<i4', '<i4'], + names=['a', 'b'], + offsets=[0, 2], + itemsize=6 + )) + ), + np.array([None], dtype=object), + np.array([]), + np.empty((0, 0)), + _make_readonly(np.array([1, 2, 3])), + ], ids=[ + '1d', + '2d', + 'structured', + 'overlapping', + 'object', + 'empty', + 'empty-2d', + 'readonly' + ]) + def test_ctypes_data_as_holds_reference(self, arr): + # gh-9647 + # create a copy to ensure that pytest does not mess with the refcounts + arr = arr.copy() + + arr_ref = weakref.ref(arr) + + ctypes_ptr = arr.ctypes.data_as(ctypes.c_void_p) + + # `ctypes_ptr` should hold onto `arr` + del arr + gc.collect() + assert_(arr_ref() is not None, "ctypes pointer did not hold onto a reference") + + # but when the `ctypes_ptr` object dies, so should `arr` + del ctypes_ptr + gc.collect() + assert_(arr_ref() is None, "unknowable whether ctypes pointer holds a reference") + class TestWritebackIfCopy(object): # all these tests use the WRITEBACKIFCOPY mechanism @@ -7923,6 +7972,44 @@ def test_uintalignment_and_alignment(): dst = np.zeros((2,2), dtype='c8') dst[:,1] = src[:,1] # assert in lowlevel_strided_loops fails? +class TestAlignment(object): + # adapted from scipy._lib.tests.test__util.test__aligned_zeros + # Checks that unusual memory alignments don't trip up numpy. + # In particular, check RELAXED_STRIDES don't trip alignment assertions in + # NDEBUG mode for size-0 arrays (gh-12503) + + def check(self, shape, dtype, order, align): + err_msg = repr((shape, dtype, order, align)) + x = _aligned_zeros(shape, dtype, order, align=align) + if align is None: + align = np.dtype(dtype).alignment + assert_equal(x.__array_interface__['data'][0] % align, 0) + if hasattr(shape, '__len__'): + assert_equal(x.shape, shape, err_msg) + else: + assert_equal(x.shape, (shape,), err_msg) + assert_equal(x.dtype, dtype) + if order == "C": + assert_(x.flags.c_contiguous, err_msg) + elif order == "F": + if x.size > 0: + assert_(x.flags.f_contiguous, err_msg) + elif order is None: + assert_(x.flags.c_contiguous, err_msg) + else: + raise ValueError() + + def test_various_alignments(self): + for align in [1, 2, 3, 4, 8, 12, 16, 32, 64, None]: + for n in [0, 1, 3, 11]: + for order in ["C", "F", None]: + for dtype in list(np.typecodes["All"]) + ['i4,i4,i4']: + if dtype == 'O': + # object dtype can't be misaligned + continue + for shape in [n, (1, 2, 3, n)]: + self.check(shape, np.dtype(dtype), order, align) + def test_getfield(): a = np.arange(32, dtype='uint16') if sys.byteorder == 'little': diff --git a/numpy/core/tests/test_overrides.py b/numpy/core/tests/test_overrides.py index 62b2a3e53..8f1c16539 100644 --- a/numpy/core/tests/test_overrides.py +++ b/numpy/core/tests/test_overrides.py @@ -7,7 +7,7 @@ import numpy as np from numpy.testing import ( assert_, assert_equal, assert_raises, assert_raises_regex) from numpy.core.overrides import ( - get_overloaded_types_and_args, array_function_dispatch, + _get_implementing_args, array_function_dispatch, verify_matching_signatures, ENABLE_ARRAY_FUNCTION) from numpy.core.numeric import pickle import pytest @@ -18,11 +18,6 @@ requires_array_function = pytest.mark.skipif( reason="__array_function__ dispatch not enabled.") -def _get_overloaded_args(relevant_args): - types, args = get_overloaded_types_and_args(relevant_args) - return args - - def _return_not_implemented(self, *args, **kwargs): return NotImplemented @@ -41,26 +36,21 @@ def dispatched_two_arg(array1, array2): @requires_array_function -class TestGetOverloadedTypesAndArgs(object): +class TestGetImplementingArgs(object): def test_ndarray(self): array = np.array(1) - types, args = get_overloaded_types_and_args([array]) - assert_equal(set(types), {np.ndarray}) + args = _get_implementing_args([array]) assert_equal(list(args), [array]) - types, args = get_overloaded_types_and_args([array, array]) - assert_equal(len(types), 1) - assert_equal(set(types), {np.ndarray}) + args = _get_implementing_args([array, array]) assert_equal(list(args), [array]) - types, args = get_overloaded_types_and_args([array, 1]) - assert_equal(set(types), {np.ndarray}) + args = _get_implementing_args([array, 1]) assert_equal(list(args), [array]) - types, args = get_overloaded_types_and_args([1, array]) - assert_equal(set(types), {np.ndarray}) + args = _get_implementing_args([1, array]) assert_equal(list(args), [array]) def test_ndarray_subclasses(self): @@ -75,17 +65,14 @@ class TestGetOverloadedTypesAndArgs(object): override_sub = np.array(1).view(OverrideSub) no_override_sub = np.array(1).view(NoOverrideSub) - types, args = get_overloaded_types_and_args([array, override_sub]) - assert_equal(set(types), {np.ndarray, OverrideSub}) + args = _get_implementing_args([array, override_sub]) assert_equal(list(args), [override_sub, array]) - types, args = get_overloaded_types_and_args([array, no_override_sub]) - assert_equal(set(types), {np.ndarray, NoOverrideSub}) + args = _get_implementing_args([array, no_override_sub]) assert_equal(list(args), [no_override_sub, array]) - types, args = get_overloaded_types_and_args( + args = _get_implementing_args( [override_sub, no_override_sub]) - assert_equal(set(types), {OverrideSub, NoOverrideSub}) assert_equal(list(args), [override_sub, no_override_sub]) def test_ndarray_and_duck_array(self): @@ -96,12 +83,10 @@ class TestGetOverloadedTypesAndArgs(object): array = np.array(1) other = Other() - types, args = get_overloaded_types_and_args([other, array]) - assert_equal(set(types), {np.ndarray, Other}) + args = _get_implementing_args([other, array]) assert_equal(list(args), [other, array]) - types, args = get_overloaded_types_and_args([array, other]) - assert_equal(set(types), {np.ndarray, Other}) + args = _get_implementing_args([array, other]) assert_equal(list(args), [array, other]) def test_ndarray_subclass_and_duck_array(self): @@ -116,9 +101,9 @@ class TestGetOverloadedTypesAndArgs(object): subarray = np.array(1).view(OverrideSub) other = Other() - assert_equal(_get_overloaded_args([array, subarray, other]), + assert_equal(_get_implementing_args([array, subarray, other]), [subarray, array, other]) - assert_equal(_get_overloaded_args([array, other, subarray]), + assert_equal(_get_implementing_args([array, other, subarray]), [subarray, array, other]) def test_many_duck_arrays(self): @@ -140,15 +125,26 @@ class TestGetOverloadedTypesAndArgs(object): c = C() d = D() - assert_equal(_get_overloaded_args([1]), []) - assert_equal(_get_overloaded_args([a]), [a]) - assert_equal(_get_overloaded_args([a, 1]), [a]) - assert_equal(_get_overloaded_args([a, a, a]), [a]) - assert_equal(_get_overloaded_args([a, d, a]), [a, d]) - assert_equal(_get_overloaded_args([a, b]), [b, a]) - assert_equal(_get_overloaded_args([b, a]), [b, a]) - assert_equal(_get_overloaded_args([a, b, c]), [b, c, a]) - assert_equal(_get_overloaded_args([a, c, b]), [c, b, a]) + assert_equal(_get_implementing_args([1]), []) + assert_equal(_get_implementing_args([a]), [a]) + assert_equal(_get_implementing_args([a, 1]), [a]) + assert_equal(_get_implementing_args([a, a, a]), [a]) + assert_equal(_get_implementing_args([a, d, a]), [a, d]) + assert_equal(_get_implementing_args([a, b]), [b, a]) + assert_equal(_get_implementing_args([b, a]), [b, a]) + assert_equal(_get_implementing_args([a, b, c]), [b, c, a]) + assert_equal(_get_implementing_args([a, c, b]), [c, b, a]) + + def test_too_many_duck_arrays(self): + namespace = dict(__array_function__=_return_not_implemented) + types = [type('A' + str(i), (object,), namespace) for i in range(33)] + relevant_args = [t() for t in types] + + actual = _get_implementing_args(relevant_args[:32]) + assert_equal(actual, relevant_args[:32]) + + with assert_raises_regex(TypeError, 'distinct argument types'): + _get_implementing_args(relevant_args) @requires_array_function @@ -201,6 +197,14 @@ class TestNDArrayArrayFunction(object): result = np.concatenate((array, override_sub)) assert_equal(result, expected.view(OverrideSub)) + def test_no_wrapper(self): + array = np.array(1) + func = dispatched_one_arg.__wrapped__ + with assert_raises_regex(AttributeError, '__wrapped__'): + array.__array_function__(func=func, + types=(np.ndarray,), + args=(array,), kwargs={}) + @requires_array_function class TestArrayFunctionDispatch(object): diff --git a/numpy/core/tests/test_shape_base.py b/numpy/core/tests/test_shape_base.py index ef5c118ec..b996321c2 100644 --- a/numpy/core/tests/test_shape_base.py +++ b/numpy/core/tests/test_shape_base.py @@ -373,6 +373,10 @@ def test_stack(): # empty arrays assert_(stack([[], [], []]).shape == (3, 0)) assert_(stack([[], [], []], axis=1).shape == (0, 3)) + # out + out = np.zeros_like(r1) + np.stack((a, b), out=out) + assert_array_equal(out, r1) # edge cases assert_raises_regex(ValueError, 'need at least one array', stack, []) assert_raises_regex(ValueError, 'must have the same shape', diff --git a/numpy/ctypeslib.py b/numpy/ctypeslib.py index 11368587f..2e9781286 100644 --- a/numpy/ctypeslib.py +++ b/numpy/ctypeslib.py @@ -93,7 +93,7 @@ else: def load_library(libname, loader_path): """ It is possible to load a library using - >>> lib = ctypes.cdll[<full_path_name>] + >>> lib = ctypes.cdll[<full_path_name>] # doctest: +SKIP But there are cross-platform considerations, such as library file extensions, plus the fact Windows will just load the first library it finds with that name. @@ -401,5 +401,5 @@ if ctypes is not None: raise TypeError("readonly arrays unsupported") tp = _ctype_ndarray(_typecodes[ai["typestr"]], ai["shape"]) result = tp.from_address(addr) - result.__keep = ai + result.__keep = obj return result diff --git a/numpy/distutils/ccompiler.py b/numpy/distutils/ccompiler.py index 5b7cb3fcf..100d0d069 100644 --- a/numpy/distutils/ccompiler.py +++ b/numpy/distutils/ccompiler.py @@ -17,7 +17,9 @@ from distutils.version import LooseVersion from numpy.distutils import log from numpy.distutils.compat import get_exception -from numpy.distutils.exec_command import filepath_from_subprocess_output +from numpy.distutils.exec_command import ( + filepath_from_subprocess_output, forward_bytes_to_stdout +) from numpy.distutils.misc_util import cyg2win32, is_sequence, mingw32, \ get_num_build_jobs, \ _commandline_dep_string @@ -159,11 +161,9 @@ def CCompiler_spawn(self, cmd, display=None): if is_sequence(cmd): cmd = ' '.join(list(cmd)) - try: - print(o) - except UnicodeError: - # When installing through pip, `o` can contain non-ascii chars - pass + + forward_bytes_to_stdout(o) + if re.search(b'Too many open files', o): msg = '\nTry rerunning setup command until build succeeds.' else: diff --git a/numpy/distutils/exec_command.py b/numpy/distutils/exec_command.py index aaeca99ee..ede347b03 100644 --- a/numpy/distutils/exec_command.py +++ b/numpy/distutils/exec_command.py @@ -81,6 +81,29 @@ def filepath_from_subprocess_output(output): output = output.encode('ascii', errors='replace') return output + +def forward_bytes_to_stdout(val): + """ + Forward bytes from a subprocess call to the console, without attempting to + decode them. + + The assumption is that the subprocess call already returned bytes in + a suitable encoding. + """ + if sys.version_info.major < 3: + # python 2 has binary output anyway + sys.stdout.write(val) + elif hasattr(sys.stdout, 'buffer'): + # use the underlying binary output if there is one + sys.stdout.buffer.write(val) + elif hasattr(sys.stdout, 'encoding'): + # round-trip the encoding if necessary + sys.stdout.write(val.decode(sys.stdout.encoding)) + else: + # make a best-guess at the encoding + sys.stdout.write(val.decode('utf8', errors='replace')) + + def temp_file_name(): fo, name = make_temp_file() fo.close() diff --git a/numpy/distutils/fcompiler/environment.py b/numpy/distutils/fcompiler/environment.py index 489784580..4238f35cb 100644 --- a/numpy/distutils/fcompiler/environment.py +++ b/numpy/distutils/fcompiler/environment.py @@ -1,6 +1,7 @@ from __future__ import division, absolute_import, print_function import os +import warnings from distutils.dist import Distribution __metaclass__ = type @@ -54,8 +55,18 @@ class EnvironmentConfig(object): if envvar is not None: envvar_contents = os.environ.get(envvar) if envvar_contents is not None: - if var and append and os.environ.get('NPY_DISTUTILS_APPEND_FLAGS', '0') == '1': - var = var + [envvar_contents] + if var and append: + if os.environ.get('NPY_DISTUTILS_APPEND_FLAGS', '0') == '1': + var = var + [envvar_contents] + else: + var = envvar_contents + if 'NPY_DISTUTILS_APPEND_FLAGS' not in os.environ.keys(): + msg = "{} is used as is, not appended ".format(envvar) + \ + "to flags already defined " + \ + "by numpy.distutils! Use NPY_DISTUTILS_APPEND_FLAGS=1 " + \ + "to obtain appending behavior instead (this " + \ + "behavior will become default in a future release)." + warnings.warn(msg, UserWarning, stacklevel=3) else: var = envvar_contents if confvar is not None and self._conf: diff --git a/numpy/distutils/tests/test_fcompiler.py b/numpy/distutils/tests/test_fcompiler.py index 95e44b051..ba19a97ea 100644 --- a/numpy/distutils/tests/test_fcompiler.py +++ b/numpy/distutils/tests/test_fcompiler.py @@ -1,6 +1,8 @@ from __future__ import division, absolute_import, print_function -from numpy.testing import assert_ +import pytest + +from numpy.testing import assert_, suppress_warnings import numpy.distutils.fcompiler customizable_flags = [ @@ -25,6 +27,7 @@ def test_fcompiler_flags(monkeypatch): monkeypatch.setenv(envvar, new_flag) new_flags = getattr(flag_vars, opt) + monkeypatch.delenv(envvar) assert_(new_flags == [new_flag]) @@ -33,12 +36,46 @@ def test_fcompiler_flags(monkeypatch): for opt, envvar in customizable_flags: new_flag = '-dummy-{}-flag'.format(opt) prev_flags = getattr(flag_vars, opt) - monkeypatch.setenv(envvar, new_flag) new_flags = getattr(flag_vars, opt) + monkeypatch.delenv(envvar) if prev_flags is None: assert_(new_flags == [new_flag]) else: assert_(new_flags == prev_flags + [new_flag]) + +def test_fcompiler_flags_append_warning(monkeypatch): + # Test to check that the warning for append behavior changing in future + # is triggered. Need to use a real compiler instance so that we have + # non-empty flags to start with (otherwise the "if var and append" check + # will always be false). + try: + with suppress_warnings() as sup: + sup.record() + fc = numpy.distutils.fcompiler.new_fcompiler(compiler='gnu95') + fc.customize() + except numpy.distutils.fcompiler.CompilerNotFound: + pytest.skip("gfortran not found, so can't execute this test") + + # Ensure NPY_DISTUTILS_APPEND_FLAGS not defined + monkeypatch.delenv('NPY_DISTUTILS_APPEND_FLAGS', raising=False) + + for opt, envvar in customizable_flags: + new_flag = '-dummy-{}-flag'.format(opt) + with suppress_warnings() as sup: + sup.record() + prev_flags = getattr(fc.flag_vars, opt) + + monkeypatch.setenv(envvar, new_flag) + with suppress_warnings() as sup: + sup.record() + new_flags = getattr(fc.flag_vars, opt) + if prev_flags: + # Check that warning was issued + assert len(sup.log) == 1 + + monkeypatch.delenv(envvar) + assert_(new_flags == [new_flag]) + diff --git a/numpy/doc/glossary.py b/numpy/doc/glossary.py index a3b9423a8..a3707340d 100644 --- a/numpy/doc/glossary.py +++ b/numpy/doc/glossary.py @@ -270,13 +270,11 @@ Glossary masked_array(data = [-- 2.0 --], mask = [ True False True], fill_value = 1e+20) - <BLANKLINE> >>> x + [1, 2, 3] masked_array(data = [-- 4.0 --], mask = [ True False True], fill_value = 1e+20) - <BLANKLINE> Masked arrays are often used when operating on arrays containing diff --git a/numpy/doc/structured_arrays.py b/numpy/doc/structured_arrays.py index 0fcdecf00..e92a06124 100644 --- a/numpy/doc/structured_arrays.py +++ b/numpy/doc/structured_arrays.py @@ -397,6 +397,15 @@ typically a non-structured array, except in the case of nested structures. >>> y.dtype, y.shape, y.strides (dtype('float32'), (2,), (12,)) +If the accessed field is a subarray, the dimensions of the subarray +are appended to the shape of the result:: + + >>> x = np.zeros((2,2), dtype=[('a', np.int32), ('b', np.float64, (3,3))]) + >>> x['a'].shape + (2, 2) + >>> x['b'].shape + (2, 2, 3, 3) + Accessing Multiple Fields ``````````````````````````` diff --git a/numpy/dual.py b/numpy/dual.py index 3a16a8ec5..651e845bb 100644 --- a/numpy/dual.py +++ b/numpy/dual.py @@ -51,14 +51,14 @@ _restore_dict = {} def register_func(name, func): if name not in __all__: - raise ValueError("%s not a dual function." % name) + raise ValueError("{} not a dual function.".format(name)) f = sys._getframe(0).f_globals _restore_dict[name] = f[name] f[name] = func def restore_func(name): if name not in __all__: - raise ValueError("%s not a dual function." % name) + raise ValueError("{} not a dual function.".format(name)) try: val = _restore_dict[name] except KeyError: diff --git a/numpy/f2py/__main__.py b/numpy/f2py/__main__.py index 6eff41099..708f7f362 100644 --- a/numpy/f2py/__main__.py +++ b/numpy/f2py/__main__.py @@ -1,6 +1,6 @@ # See http://cens.ioc.ee/projects/f2py2e/ from __future__ import division, print_function -from f2py2e import main +from numpy.f2py.f2py2e import main main() diff --git a/numpy/fft/README.md b/numpy/fft/README.md new file mode 100644 index 000000000..7040a2e9b --- /dev/null +++ b/numpy/fft/README.md @@ -0,0 +1,53 @@ +PocketFFT +--------- + +This is a heavily modified implementation of FFTPack [1,2], with the following +advantages: + +- strictly C99 compliant +- more accurate twiddle factor computation +- very fast plan generation +- worst case complexity for transform sizes with large prime factors is + `N*log(N)`, because Bluestein's algorithm [3] is used for these cases. + +License +------- + +3-clause BSD (see LICENSE.md) + + +Some code details +----------------- + +Twiddle factor computation: + +- making use of symmetries to reduce number of sin/cos evaluations +- all angles are reduced to the range `[0; pi/4]` for higher accuracy +- an adapted implementation of `sincospi()` is used, which actually computes + `sin(x)` and `(cos(x)-1)`. +- if `n` sin/cos pairs are required, the adjusted `sincospi()` is only called + `2*sqrt(n)` times; the remaining values are obtained by evaluating the + angle addition theorems in a numerically accurate way. + +Parallel invocation: + +- Plans only contain read-only data; all temporary arrays are allocated and + deallocated during an individual FFT execution. This means that a single plan + can be used in several threads at the same time. + +Efficient codelets are available for the factors: + +- 2, 3, 4, 5, 7, 11 for complex-valued FFTs +- 2, 3, 4, 5 for real-valued FFTs + +Larger prime factors are handled by somewhat less efficient, generic routines. + +For lengths with very large prime factors, Bluestein's algorithm is used, and +instead of an FFT of length `n`, a convolution of length `n2 >= 2*n-1` +is performed, where `n2` is chosen to be highly composite. + + +[1] Swarztrauber, P. 1982, Vectorizing the Fast Fourier Transforms + (New York: Academic Press), 51 +[2] https://www.netlib.org/fftpack/ +[3] https://en.wikipedia.org/wiki/Chirp_Z-transform diff --git a/numpy/fft/__init__.py b/numpy/fft/__init__.py index 44243b483..64b35bc19 100644 --- a/numpy/fft/__init__.py +++ b/numpy/fft/__init__.py @@ -3,7 +3,7 @@ from __future__ import division, absolute_import, print_function # To get sub-modules from .info import __doc__ -from .fftpack import * +from .pocketfft import * from .helper import * from numpy._pytesttester import PytestTester diff --git a/numpy/fft/fftpack.c b/numpy/fft/fftpack.c deleted file mode 100644 index 07fa2bf4c..000000000 --- a/numpy/fft/fftpack.c +++ /dev/null @@ -1,1536 +0,0 @@ -/* - * fftpack.c : A set of FFT routines in C. - * Algorithmically based on Fortran-77 FFTPACK by Paul N. Swarztrauber (Version 4, 1985). -*/ -#define NPY_NO_DEPRECATED_API NPY_API_VERSION - -#include <Python.h> -#include <math.h> -#include <stdio.h> -#include <numpy/ndarraytypes.h> - -#define DOUBLE -#ifdef DOUBLE -#define Treal double -#else -#define Treal float -#endif - -#define ref(u,a) u[a] - -/* Macros for accurate calculation of the twiddle factors. */ -#define TWOPI 6.283185307179586476925286766559005768391 -#define cos2pi(m, n) cos((TWOPI * (m)) / (n)) -#define sin2pi(m, n) sin((TWOPI * (m)) / (n)) - -#define MAXFAC 13 /* maximum number of factors in factorization of n */ -#define NSPECIAL 4 /* number of factors for which we have special-case routines */ - -#ifdef __cplusplus -extern "C" { -#endif - -static void sincos2pi(int m, int n, Treal* si, Treal* co) -/* Calculates sin(2pi * m/n) and cos(2pi * m/n). It is more accurate - * than the naive calculation as the fraction m/n is reduced to [0, 1/8) first. - * Due to the symmetry of sin(x) and cos(x) the values for all x can be - * determined from the function values of the reduced argument in the first - * octant. - */ - { - int n8, m8, octant; - n8 = 8 * n; - m8 = (8 * m) % n8; - octant = m8 / n; - m8 = m8 % n; - switch(octant) { - case 0: - *co = cos2pi(m8, n8); - *si = sin2pi(m8, n8); - break; - case 1: - *co = sin2pi(n-m8, n8); - *si = cos2pi(n-m8, n8); - break; - case 2: - *co = -sin2pi(m8, n8); - *si = cos2pi(m8, n8); - break; - case 3: - *co = -cos2pi(n-m8, n8); - *si = sin2pi(n-m8, n8); - break; - case 4: - *co = -cos2pi(m8, n8); - *si = -sin2pi(m8, n8); - break; - case 5: - *co = -sin2pi(n-m8, n8); - *si = -cos2pi(n-m8, n8); - break; - case 6: - *co = sin2pi(m8, n8); - *si = -cos2pi(m8, n8); - break; - case 7: - *co = cos2pi(n-m8, n8); - *si = -sin2pi(n-m8, n8); - break; - } - } - -/* ---------------------------------------------------------------------- - passf2, passf3, passf4, passf5, passf. Complex FFT passes fwd and bwd. ------------------------------------------------------------------------ */ - -static void passf2(int ido, int l1, const Treal cc[], Treal ch[], const Treal wa1[], int isign) - /* isign==+1 for backward transform */ - { - int i, k, ah, ac; - Treal ti2, tr2; - if (ido <= 2) { - for (k=0; k<l1; k++) { - ah = k*ido; - ac = 2*k*ido; - ch[ah] = ref(cc,ac) + ref(cc,ac + ido); - ch[ah + ido*l1] = ref(cc,ac) - ref(cc,ac + ido); - ch[ah+1] = ref(cc,ac+1) + ref(cc,ac + ido + 1); - ch[ah + ido*l1 + 1] = ref(cc,ac+1) - ref(cc,ac + ido + 1); - } - } else { - for (k=0; k<l1; k++) { - for (i=0; i<ido-1; i+=2) { - ah = i + k*ido; - ac = i + 2*k*ido; - ch[ah] = ref(cc,ac) + ref(cc,ac + ido); - tr2 = ref(cc,ac) - ref(cc,ac + ido); - ch[ah+1] = ref(cc,ac+1) + ref(cc,ac + 1 + ido); - ti2 = ref(cc,ac+1) - ref(cc,ac + 1 + ido); - ch[ah+l1*ido+1] = wa1[i]*ti2 + isign*wa1[i+1]*tr2; - ch[ah+l1*ido] = wa1[i]*tr2 - isign*wa1[i+1]*ti2; - } - } - } - } /* passf2 */ - - -static void passf3(int ido, int l1, const Treal cc[], Treal ch[], - const Treal wa1[], const Treal wa2[], int isign) - /* isign==+1 for backward transform */ - { - static const Treal taur = -0.5; - static const Treal taui = 0.86602540378443864676; - int i, k, ac, ah; - Treal ci2, ci3, di2, di3, cr2, cr3, dr2, dr3, ti2, tr2; - if (ido == 2) { - for (k=1; k<=l1; k++) { - ac = (3*k - 2)*ido; - tr2 = ref(cc,ac) + ref(cc,ac + ido); - cr2 = ref(cc,ac - ido) + taur*tr2; - ah = (k - 1)*ido; - ch[ah] = ref(cc,ac - ido) + tr2; - - ti2 = ref(cc,ac + 1) + ref(cc,ac + ido + 1); - ci2 = ref(cc,ac - ido + 1) + taur*ti2; - ch[ah + 1] = ref(cc,ac - ido + 1) + ti2; - - cr3 = isign*taui*(ref(cc,ac) - ref(cc,ac + ido)); - ci3 = isign*taui*(ref(cc,ac + 1) - ref(cc,ac + ido + 1)); - ch[ah + l1*ido] = cr2 - ci3; - ch[ah + 2*l1*ido] = cr2 + ci3; - ch[ah + l1*ido + 1] = ci2 + cr3; - ch[ah + 2*l1*ido + 1] = ci2 - cr3; - } - } else { - for (k=1; k<=l1; k++) { - for (i=0; i<ido-1; i+=2) { - ac = i + (3*k - 2)*ido; - tr2 = ref(cc,ac) + ref(cc,ac + ido); - cr2 = ref(cc,ac - ido) + taur*tr2; - ah = i + (k-1)*ido; - ch[ah] = ref(cc,ac - ido) + tr2; - ti2 = ref(cc,ac + 1) + ref(cc,ac + ido + 1); - ci2 = ref(cc,ac - ido + 1) + taur*ti2; - ch[ah + 1] = ref(cc,ac - ido + 1) + ti2; - cr3 = isign*taui*(ref(cc,ac) - ref(cc,ac + ido)); - ci3 = isign*taui*(ref(cc,ac + 1) - ref(cc,ac + ido + 1)); - dr2 = cr2 - ci3; - dr3 = cr2 + ci3; - di2 = ci2 + cr3; - di3 = ci2 - cr3; - ch[ah + l1*ido + 1] = wa1[i]*di2 + isign*wa1[i+1]*dr2; - ch[ah + l1*ido] = wa1[i]*dr2 - isign*wa1[i+1]*di2; - ch[ah + 2*l1*ido + 1] = wa2[i]*di3 + isign*wa2[i+1]*dr3; - ch[ah + 2*l1*ido] = wa2[i]*dr3 - isign*wa2[i+1]*di3; - } - } - } - } /* passf3 */ - - -static void passf4(int ido, int l1, const Treal cc[], Treal ch[], - const Treal wa1[], const Treal wa2[], const Treal wa3[], int isign) - /* isign == -1 for forward transform and +1 for backward transform */ - { - int i, k, ac, ah; - Treal ci2, ci3, ci4, cr2, cr3, cr4, ti1, ti2, ti3, ti4, tr1, tr2, tr3, tr4; - if (ido == 2) { - for (k=0; k<l1; k++) { - ac = 4*k*ido + 1; - ti1 = ref(cc,ac) - ref(cc,ac + 2*ido); - ti2 = ref(cc,ac) + ref(cc,ac + 2*ido); - tr4 = ref(cc,ac + 3*ido) - ref(cc,ac + ido); - ti3 = ref(cc,ac + ido) + ref(cc,ac + 3*ido); - tr1 = ref(cc,ac - 1) - ref(cc,ac + 2*ido - 1); - tr2 = ref(cc,ac - 1) + ref(cc,ac + 2*ido - 1); - ti4 = ref(cc,ac + ido - 1) - ref(cc,ac + 3*ido - 1); - tr3 = ref(cc,ac + ido - 1) + ref(cc,ac + 3*ido - 1); - ah = k*ido; - ch[ah] = tr2 + tr3; - ch[ah + 2*l1*ido] = tr2 - tr3; - ch[ah + 1] = ti2 + ti3; - ch[ah + 2*l1*ido + 1] = ti2 - ti3; - ch[ah + l1*ido] = tr1 + isign*tr4; - ch[ah + 3*l1*ido] = tr1 - isign*tr4; - ch[ah + l1*ido + 1] = ti1 + isign*ti4; - ch[ah + 3*l1*ido + 1] = ti1 - isign*ti4; - } - } else { - for (k=0; k<l1; k++) { - for (i=0; i<ido-1; i+=2) { - ac = i + 1 + 4*k*ido; - ti1 = ref(cc,ac) - ref(cc,ac + 2*ido); - ti2 = ref(cc,ac) + ref(cc,ac + 2*ido); - ti3 = ref(cc,ac + ido) + ref(cc,ac + 3*ido); - tr4 = ref(cc,ac + 3*ido) - ref(cc,ac + ido); - tr1 = ref(cc,ac - 1) - ref(cc,ac + 2*ido - 1); - tr2 = ref(cc,ac - 1) + ref(cc,ac + 2*ido - 1); - ti4 = ref(cc,ac + ido - 1) - ref(cc,ac + 3*ido - 1); - tr3 = ref(cc,ac + ido - 1) + ref(cc,ac + 3*ido - 1); - ah = i + k*ido; - ch[ah] = tr2 + tr3; - cr3 = tr2 - tr3; - ch[ah + 1] = ti2 + ti3; - ci3 = ti2 - ti3; - cr2 = tr1 + isign*tr4; - cr4 = tr1 - isign*tr4; - ci2 = ti1 + isign*ti4; - ci4 = ti1 - isign*ti4; - ch[ah + l1*ido] = wa1[i]*cr2 - isign*wa1[i + 1]*ci2; - ch[ah + l1*ido + 1] = wa1[i]*ci2 + isign*wa1[i + 1]*cr2; - ch[ah + 2*l1*ido] = wa2[i]*cr3 - isign*wa2[i + 1]*ci3; - ch[ah + 2*l1*ido + 1] = wa2[i]*ci3 + isign*wa2[i + 1]*cr3; - ch[ah + 3*l1*ido] = wa3[i]*cr4 -isign*wa3[i + 1]*ci4; - ch[ah + 3*l1*ido + 1] = wa3[i]*ci4 + isign*wa3[i + 1]*cr4; - } - } - } - } /* passf4 */ - - -static void passf5(int ido, int l1, const Treal cc[], Treal ch[], - const Treal wa1[], const Treal wa2[], const Treal wa3[], const Treal wa4[], int isign) - /* isign == -1 for forward transform and +1 for backward transform */ - { - static const Treal tr11 = 0.3090169943749474241; - static const Treal ti11 = 0.95105651629515357212; - static const Treal tr12 = -0.8090169943749474241; - static const Treal ti12 = 0.58778525229247312917; - int i, k, ac, ah; - Treal ci2, ci3, ci4, ci5, di3, di4, di5, di2, cr2, cr3, cr5, cr4, ti2, ti3, - ti4, ti5, dr3, dr4, dr5, dr2, tr2, tr3, tr4, tr5; - if (ido == 2) { - for (k = 1; k <= l1; ++k) { - ac = (5*k - 4)*ido + 1; - ti5 = ref(cc,ac) - ref(cc,ac + 3*ido); - ti2 = ref(cc,ac) + ref(cc,ac + 3*ido); - ti4 = ref(cc,ac + ido) - ref(cc,ac + 2*ido); - ti3 = ref(cc,ac + ido) + ref(cc,ac + 2*ido); - tr5 = ref(cc,ac - 1) - ref(cc,ac + 3*ido - 1); - tr2 = ref(cc,ac - 1) + ref(cc,ac + 3*ido - 1); - tr4 = ref(cc,ac + ido - 1) - ref(cc,ac + 2*ido - 1); - tr3 = ref(cc,ac + ido - 1) + ref(cc,ac + 2*ido - 1); - ah = (k - 1)*ido; - ch[ah] = ref(cc,ac - ido - 1) + tr2 + tr3; - ch[ah + 1] = ref(cc,ac - ido) + ti2 + ti3; - cr2 = ref(cc,ac - ido - 1) + tr11*tr2 + tr12*tr3; - ci2 = ref(cc,ac - ido) + tr11*ti2 + tr12*ti3; - cr3 = ref(cc,ac - ido - 1) + tr12*tr2 + tr11*tr3; - ci3 = ref(cc,ac - ido) + tr12*ti2 + tr11*ti3; - cr5 = isign*(ti11*tr5 + ti12*tr4); - ci5 = isign*(ti11*ti5 + ti12*ti4); - cr4 = isign*(ti12*tr5 - ti11*tr4); - ci4 = isign*(ti12*ti5 - ti11*ti4); - ch[ah + l1*ido] = cr2 - ci5; - ch[ah + 4*l1*ido] = cr2 + ci5; - ch[ah + l1*ido + 1] = ci2 + cr5; - ch[ah + 2*l1*ido + 1] = ci3 + cr4; - ch[ah + 2*l1*ido] = cr3 - ci4; - ch[ah + 3*l1*ido] = cr3 + ci4; - ch[ah + 3*l1*ido + 1] = ci3 - cr4; - ch[ah + 4*l1*ido + 1] = ci2 - cr5; - } - } else { - for (k=1; k<=l1; k++) { - for (i=0; i<ido-1; i+=2) { - ac = i + 1 + (k*5 - 4)*ido; - ti5 = ref(cc,ac) - ref(cc,ac + 3*ido); - ti2 = ref(cc,ac) + ref(cc,ac + 3*ido); - ti4 = ref(cc,ac + ido) - ref(cc,ac + 2*ido); - ti3 = ref(cc,ac + ido) + ref(cc,ac + 2*ido); - tr5 = ref(cc,ac - 1) - ref(cc,ac + 3*ido - 1); - tr2 = ref(cc,ac - 1) + ref(cc,ac + 3*ido - 1); - tr4 = ref(cc,ac + ido - 1) - ref(cc,ac + 2*ido - 1); - tr3 = ref(cc,ac + ido - 1) + ref(cc,ac + 2*ido - 1); - ah = i + (k - 1)*ido; - ch[ah] = ref(cc,ac - ido - 1) + tr2 + tr3; - ch[ah + 1] = ref(cc,ac - ido) + ti2 + ti3; - cr2 = ref(cc,ac - ido - 1) + tr11*tr2 + tr12*tr3; - - ci2 = ref(cc,ac - ido) + tr11*ti2 + tr12*ti3; - cr3 = ref(cc,ac - ido - 1) + tr12*tr2 + tr11*tr3; - - ci3 = ref(cc,ac - ido) + tr12*ti2 + tr11*ti3; - cr5 = isign*(ti11*tr5 + ti12*tr4); - ci5 = isign*(ti11*ti5 + ti12*ti4); - cr4 = isign*(ti12*tr5 - ti11*tr4); - ci4 = isign*(ti12*ti5 - ti11*ti4); - dr3 = cr3 - ci4; - dr4 = cr3 + ci4; - di3 = ci3 + cr4; - di4 = ci3 - cr4; - dr5 = cr2 + ci5; - dr2 = cr2 - ci5; - di5 = ci2 - cr5; - di2 = ci2 + cr5; - ch[ah + l1*ido] = wa1[i]*dr2 - isign*wa1[i+1]*di2; - ch[ah + l1*ido + 1] = wa1[i]*di2 + isign*wa1[i+1]*dr2; - ch[ah + 2*l1*ido] = wa2[i]*dr3 - isign*wa2[i+1]*di3; - ch[ah + 2*l1*ido + 1] = wa2[i]*di3 + isign*wa2[i+1]*dr3; - ch[ah + 3*l1*ido] = wa3[i]*dr4 - isign*wa3[i+1]*di4; - ch[ah + 3*l1*ido + 1] = wa3[i]*di4 + isign*wa3[i+1]*dr4; - ch[ah + 4*l1*ido] = wa4[i]*dr5 - isign*wa4[i+1]*di5; - ch[ah + 4*l1*ido + 1] = wa4[i]*di5 + isign*wa4[i+1]*dr5; - } - } - } - } /* passf5 */ - - -static void passf(int *nac, int ido, int ip, int l1, int idl1, - Treal cc[], Treal ch[], - const Treal wa[], int isign) - /* isign is -1 for forward transform and +1 for backward transform */ - { - int idij, idlj, idot, ipph, i, j, k, l, jc, lc, ik, idj, idl, inc,idp; - Treal wai, war; - - idot = ido / 2; - /* nt = ip*idl1;*/ - ipph = (ip + 1) / 2; - idp = ip*ido; - if (ido >= l1) { - for (j=1; j<ipph; j++) { - jc = ip - j; - for (k=0; k<l1; k++) { - for (i=0; i<ido; i++) { - ch[i + (k + j*l1)*ido] = - ref(cc,i + (j + k*ip)*ido) + ref(cc,i + (jc + k*ip)*ido); - ch[i + (k + jc*l1)*ido] = - ref(cc,i + (j + k*ip)*ido) - ref(cc,i + (jc + k*ip)*ido); - } - } - } - for (k=0; k<l1; k++) - for (i=0; i<ido; i++) - ch[i + k*ido] = ref(cc,i + k*ip*ido); - } else { - for (j=1; j<ipph; j++) { - jc = ip - j; - for (i=0; i<ido; i++) { - for (k=0; k<l1; k++) { - ch[i + (k + j*l1)*ido] = ref(cc,i + (j + k*ip)*ido) + ref(cc,i + (jc + k* - ip)*ido); - ch[i + (k + jc*l1)*ido] = ref(cc,i + (j + k*ip)*ido) - ref(cc,i + (jc + k* - ip)*ido); - } - } - } - for (i=0; i<ido; i++) - for (k=0; k<l1; k++) - ch[i + k*ido] = ref(cc,i + k*ip*ido); - } - - idl = 2 - ido; - inc = 0; - for (l=1; l<ipph; l++) { - lc = ip - l; - idl += ido; - for (ik=0; ik<idl1; ik++) { - cc[ik + l*idl1] = ch[ik] + wa[idl - 2]*ch[ik + idl1]; - cc[ik + lc*idl1] = isign*wa[idl-1]*ch[ik + (ip-1)*idl1]; - } - idlj = idl; - inc += ido; - for (j=2; j<ipph; j++) { - jc = ip - j; - idlj += inc; - if (idlj > idp) idlj -= idp; - war = wa[idlj - 2]; - wai = wa[idlj-1]; - for (ik=0; ik<idl1; ik++) { - cc[ik + l*idl1] += war*ch[ik + j*idl1]; - cc[ik + lc*idl1] += isign*wai*ch[ik + jc*idl1]; - } - } - } - for (j=1; j<ipph; j++) - for (ik=0; ik<idl1; ik++) - ch[ik] += ch[ik + j*idl1]; - for (j=1; j<ipph; j++) { - jc = ip - j; - for (ik=1; ik<idl1; ik+=2) { - ch[ik - 1 + j*idl1] = cc[ik - 1 + j*idl1] - cc[ik + jc*idl1]; - ch[ik - 1 + jc*idl1] = cc[ik - 1 + j*idl1] + cc[ik + jc*idl1]; - ch[ik + j*idl1] = cc[ik + j*idl1] + cc[ik - 1 + jc*idl1]; - ch[ik + jc*idl1] = cc[ik + j*idl1] - cc[ik - 1 + jc*idl1]; - } - } - *nac = 1; - if (ido == 2) return; - *nac = 0; - for (ik=0; ik<idl1; ik++) - cc[ik] = ch[ik]; - for (j=1; j<ip; j++) { - for (k=0; k<l1; k++) { - cc[(k + j*l1)*ido + 0] = ch[(k + j*l1)*ido + 0]; - cc[(k + j*l1)*ido + 1] = ch[(k + j*l1)*ido + 1]; - } - } - if (idot <= l1) { - idij = 0; - for (j=1; j<ip; j++) { - idij += 2; - for (i=3; i<ido; i+=2) { - idij += 2; - for (k=0; k<l1; k++) { - cc[i - 1 + (k + j*l1)*ido] = - wa[idij - 2]*ch[i - 1 + (k + j*l1)*ido] - - isign*wa[idij-1]*ch[i + (k + j*l1)*ido]; - cc[i + (k + j*l1)*ido] = - wa[idij - 2]*ch[i + (k + j*l1)*ido] + - isign*wa[idij-1]*ch[i - 1 + (k + j*l1)*ido]; - } - } - } - } else { - idj = 2 - ido; - for (j=1; j<ip; j++) { - idj += ido; - for (k = 0; k < l1; k++) { - idij = idj; - for (i=3; i<ido; i+=2) { - idij += 2; - cc[i - 1 + (k + j*l1)*ido] = - wa[idij - 2]*ch[i - 1 + (k + j*l1)*ido] - - isign*wa[idij-1]*ch[i + (k + j*l1)*ido]; - cc[i + (k + j*l1)*ido] = - wa[idij - 2]*ch[i + (k + j*l1)*ido] + - isign*wa[idij-1]*ch[i - 1 + (k + j*l1)*ido]; - } - } - } - } - } /* passf */ - - - /* ---------------------------------------------------------------------- -radf2,radb2, radf3,radb3, radf4,radb4, radf5,radb5, radfg,radbg. -Treal FFT passes fwd and bwd. ----------------------------------------------------------------------- */ - -static void radf2(int ido, int l1, const Treal cc[], Treal ch[], const Treal wa1[]) - { - int i, k, ic; - Treal ti2, tr2; - for (k=0; k<l1; k++) { - ch[2*k*ido] = - ref(cc,k*ido) + ref(cc,(k + l1)*ido); - ch[(2*k+1)*ido + ido-1] = - ref(cc,k*ido) - ref(cc,(k + l1)*ido); - } - if (ido < 2) return; - if (ido != 2) { - for (k=0; k<l1; k++) { - for (i=2; i<ido; i+=2) { - ic = ido - i; - tr2 = wa1[i - 2]*ref(cc, i-1 + (k + l1)*ido) + wa1[i - 1]*ref(cc, i + (k + l1)*ido); - ti2 = wa1[i - 2]*ref(cc, i + (k + l1)*ido) - wa1[i - 1]*ref(cc, i-1 + (k + l1)*ido); - ch[i + 2*k*ido] = ref(cc,i + k*ido) + ti2; - ch[ic + (2*k+1)*ido] = ti2 - ref(cc,i + k*ido); - ch[i - 1 + 2*k*ido] = ref(cc,i - 1 + k*ido) + tr2; - ch[ic - 1 + (2*k+1)*ido] = ref(cc,i - 1 + k*ido) - tr2; - } - } - if (ido % 2 == 1) return; - } - for (k=0; k<l1; k++) { - ch[(2*k+1)*ido] = -ref(cc,ido-1 + (k + l1)*ido); - ch[ido-1 + 2*k*ido] = ref(cc,ido-1 + k*ido); - } - } /* radf2 */ - - -static void radb2(int ido, int l1, const Treal cc[], Treal ch[], const Treal wa1[]) - { - int i, k, ic; - Treal ti2, tr2; - for (k=0; k<l1; k++) { - ch[k*ido] = - ref(cc,2*k*ido) + ref(cc,ido-1 + (2*k+1)*ido); - ch[(k + l1)*ido] = - ref(cc,2*k*ido) - ref(cc,ido-1 + (2*k+1)*ido); - } - if (ido < 2) return; - if (ido != 2) { - for (k = 0; k < l1; ++k) { - for (i = 2; i < ido; i += 2) { - ic = ido - i; - ch[i-1 + k*ido] = - ref(cc,i-1 + 2*k*ido) + ref(cc,ic-1 + (2*k+1)*ido); - tr2 = ref(cc,i-1 + 2*k*ido) - ref(cc,ic-1 + (2*k+1)*ido); - ch[i + k*ido] = - ref(cc,i + 2*k*ido) - ref(cc,ic + (2*k+1)*ido); - ti2 = ref(cc,i + (2*k)*ido) + ref(cc,ic + (2*k+1)*ido); - ch[i-1 + (k + l1)*ido] = - wa1[i - 2]*tr2 - wa1[i - 1]*ti2; - ch[i + (k + l1)*ido] = - wa1[i - 2]*ti2 + wa1[i - 1]*tr2; - } - } - if (ido % 2 == 1) return; - } - for (k = 0; k < l1; k++) { - ch[ido-1 + k*ido] = 2*ref(cc,ido-1 + 2*k*ido); - ch[ido-1 + (k + l1)*ido] = -2*ref(cc,(2*k+1)*ido); - } - } /* radb2 */ - - -static void radf3(int ido, int l1, const Treal cc[], Treal ch[], - const Treal wa1[], const Treal wa2[]) - { - static const Treal taur = -0.5; - static const Treal taui = 0.86602540378443864676; - int i, k, ic; - Treal ci2, di2, di3, cr2, dr2, dr3, ti2, ti3, tr2, tr3; - for (k=0; k<l1; k++) { - cr2 = ref(cc,(k + l1)*ido) + ref(cc,(k + 2*l1)*ido); - ch[3*k*ido] = ref(cc,k*ido) + cr2; - ch[(3*k+2)*ido] = taui*(ref(cc,(k + l1*2)*ido) - ref(cc,(k + l1)*ido)); - ch[ido-1 + (3*k + 1)*ido] = ref(cc,k*ido) + taur*cr2; - } - if (ido == 1) return; - for (k=0; k<l1; k++) { - for (i=2; i<ido; i+=2) { - ic = ido - i; - dr2 = wa1[i - 2]*ref(cc,i - 1 + (k + l1)*ido) + - wa1[i - 1]*ref(cc,i + (k + l1)*ido); - di2 = wa1[i - 2]*ref(cc,i + (k + l1)*ido) - wa1[i - 1]*ref(cc,i - 1 + (k + l1)*ido); - dr3 = wa2[i - 2]*ref(cc,i - 1 + (k + l1*2)*ido) + wa2[i - 1]*ref(cc,i + (k + l1*2)*ido); - di3 = wa2[i - 2]*ref(cc,i + (k + l1*2)*ido) - wa2[i - 1]*ref(cc,i - 1 + (k + l1*2)*ido); - cr2 = dr2 + dr3; - ci2 = di2 + di3; - ch[i - 1 + 3*k*ido] = ref(cc,i - 1 + k*ido) + cr2; - ch[i + 3*k*ido] = ref(cc,i + k*ido) + ci2; - tr2 = ref(cc,i - 1 + k*ido) + taur*cr2; - ti2 = ref(cc,i + k*ido) + taur*ci2; - tr3 = taui*(di2 - di3); - ti3 = taui*(dr3 - dr2); - ch[i - 1 + (3*k + 2)*ido] = tr2 + tr3; - ch[ic - 1 + (3*k + 1)*ido] = tr2 - tr3; - ch[i + (3*k + 2)*ido] = ti2 + ti3; - ch[ic + (3*k + 1)*ido] = ti3 - ti2; - } - } - } /* radf3 */ - - -static void radb3(int ido, int l1, const Treal cc[], Treal ch[], - const Treal wa1[], const Treal wa2[]) - { - static const Treal taur = -0.5; - static const Treal taui = 0.86602540378443864676; - int i, k, ic; - Treal ci2, ci3, di2, di3, cr2, cr3, dr2, dr3, ti2, tr2; - for (k=0; k<l1; k++) { - tr2 = 2*ref(cc,ido-1 + (3*k + 1)*ido); - cr2 = ref(cc,3*k*ido) + taur*tr2; - ch[k*ido] = ref(cc,3*k*ido) + tr2; - ci3 = 2*taui*ref(cc,(3*k + 2)*ido); - ch[(k + l1)*ido] = cr2 - ci3; - ch[(k + 2*l1)*ido] = cr2 + ci3; - } - if (ido == 1) return; - for (k=0; k<l1; k++) { - for (i=2; i<ido; i+=2) { - ic = ido - i; - tr2 = ref(cc,i - 1 + (3*k + 2)*ido) + ref(cc,ic - 1 + (3*k + 1)*ido); - cr2 = ref(cc,i - 1 + 3*k*ido) + taur*tr2; - ch[i - 1 + k*ido] = ref(cc,i - 1 + 3*k*ido) + tr2; - ti2 = ref(cc,i + (3*k + 2)*ido) - ref(cc,ic + (3*k + 1)*ido); - ci2 = ref(cc,i + 3*k*ido) + taur*ti2; - ch[i + k*ido] = ref(cc,i + 3*k*ido) + ti2; - cr3 = taui*(ref(cc,i - 1 + (3*k + 2)*ido) - ref(cc,ic - 1 + (3*k + 1)*ido)); - ci3 = taui*(ref(cc,i + (3*k + 2)*ido) + ref(cc,ic + (3*k + 1)*ido)); - dr2 = cr2 - ci3; - dr3 = cr2 + ci3; - di2 = ci2 + cr3; - di3 = ci2 - cr3; - ch[i - 1 + (k + l1)*ido] = wa1[i - 2]*dr2 - wa1[i - 1]*di2; - ch[i + (k + l1)*ido] = wa1[i - 2]*di2 + wa1[i - 1]*dr2; - ch[i - 1 + (k + 2*l1)*ido] = wa2[i - 2]*dr3 - wa2[i - 1]*di3; - ch[i + (k + 2*l1)*ido] = wa2[i - 2]*di3 + wa2[i - 1]*dr3; - } - } - } /* radb3 */ - - -static void radf4(int ido, int l1, const Treal cc[], Treal ch[], - const Treal wa1[], const Treal wa2[], const Treal wa3[]) - { - static const Treal hsqt2 = 0.70710678118654752440; - int i, k, ic; - Treal ci2, ci3, ci4, cr2, cr3, cr4, ti1, ti2, ti3, ti4, tr1, tr2, tr3, tr4; - for (k=0; k<l1; k++) { - tr1 = ref(cc,(k + l1)*ido) + ref(cc,(k + 3*l1)*ido); - tr2 = ref(cc,k*ido) + ref(cc,(k + 2*l1)*ido); - ch[4*k*ido] = tr1 + tr2; - ch[ido-1 + (4*k + 3)*ido] = tr2 - tr1; - ch[ido-1 + (4*k + 1)*ido] = ref(cc,k*ido) - ref(cc,(k + 2*l1)*ido); - ch[(4*k + 2)*ido] = ref(cc,(k + 3*l1)*ido) - ref(cc,(k + l1)*ido); - } - if (ido < 2) return; - if (ido != 2) { - for (k=0; k<l1; k++) { - for (i=2; i<ido; i += 2) { - ic = ido - i; - cr2 = wa1[i - 2]*ref(cc,i - 1 + (k + l1)*ido) + wa1[i - 1]*ref(cc,i + (k + l1)*ido); - ci2 = wa1[i - 2]*ref(cc,i + (k + l1)*ido) - wa1[i - 1]*ref(cc,i - 1 + (k + l1)*ido); - cr3 = wa2[i - 2]*ref(cc,i - 1 + (k + 2*l1)*ido) + wa2[i - 1]*ref(cc,i + (k + 2*l1)* - ido); - ci3 = wa2[i - 2]*ref(cc,i + (k + 2*l1)*ido) - wa2[i - 1]*ref(cc,i - 1 + (k + 2*l1)* - ido); - cr4 = wa3[i - 2]*ref(cc,i - 1 + (k + 3*l1)*ido) + wa3[i - 1]*ref(cc,i + (k + 3*l1)* - ido); - ci4 = wa3[i - 2]*ref(cc,i + (k + 3*l1)*ido) - wa3[i - 1]*ref(cc,i - 1 + (k + 3*l1)* - ido); - tr1 = cr2 + cr4; - tr4 = cr4 - cr2; - ti1 = ci2 + ci4; - ti4 = ci2 - ci4; - ti2 = ref(cc,i + k*ido) + ci3; - ti3 = ref(cc,i + k*ido) - ci3; - tr2 = ref(cc,i - 1 + k*ido) + cr3; - tr3 = ref(cc,i - 1 + k*ido) - cr3; - ch[i - 1 + 4*k*ido] = tr1 + tr2; - ch[ic - 1 + (4*k + 3)*ido] = tr2 - tr1; - ch[i + 4*k*ido] = ti1 + ti2; - ch[ic + (4*k + 3)*ido] = ti1 - ti2; - ch[i - 1 + (4*k + 2)*ido] = ti4 + tr3; - ch[ic - 1 + (4*k + 1)*ido] = tr3 - ti4; - ch[i + (4*k + 2)*ido] = tr4 + ti3; - ch[ic + (4*k + 1)*ido] = tr4 - ti3; - } - } - if (ido % 2 == 1) return; - } - for (k=0; k<l1; k++) { - ti1 = -hsqt2*(ref(cc,ido-1 + (k + l1)*ido) + ref(cc,ido-1 + (k + 3*l1)*ido)); - tr1 = hsqt2*(ref(cc,ido-1 + (k + l1)*ido) - ref(cc,ido-1 + (k + 3*l1)*ido)); - ch[ido-1 + 4*k*ido] = tr1 + ref(cc,ido-1 + k*ido); - ch[ido-1 + (4*k + 2)*ido] = ref(cc,ido-1 + k*ido) - tr1; - ch[(4*k + 1)*ido] = ti1 - ref(cc,ido-1 + (k + 2*l1)*ido); - ch[(4*k + 3)*ido] = ti1 + ref(cc,ido-1 + (k + 2*l1)*ido); - } - } /* radf4 */ - - -static void radb4(int ido, int l1, const Treal cc[], Treal ch[], - const Treal wa1[], const Treal wa2[], const Treal wa3[]) - { - static const Treal sqrt2 = 1.41421356237309504880; - int i, k, ic; - Treal ci2, ci3, ci4, cr2, cr3, cr4, ti1, ti2, ti3, ti4, tr1, tr2, tr3, tr4; - for (k = 0; k < l1; k++) { - tr1 = ref(cc,4*k*ido) - ref(cc,ido-1 + (4*k + 3)*ido); - tr2 = ref(cc,4*k*ido) + ref(cc,ido-1 + (4*k + 3)*ido); - tr3 = ref(cc,ido-1 + (4*k + 1)*ido) + ref(cc,ido-1 + (4*k + 1)*ido); - tr4 = ref(cc,(4*k + 2)*ido) + ref(cc,(4*k + 2)*ido); - ch[k*ido] = tr2 + tr3; - ch[(k + l1)*ido] = tr1 - tr4; - ch[(k + 2*l1)*ido] = tr2 - tr3; - ch[(k + 3*l1)*ido] = tr1 + tr4; - } - if (ido < 2) return; - if (ido != 2) { - for (k = 0; k < l1; ++k) { - for (i = 2; i < ido; i += 2) { - ic = ido - i; - ti1 = ref(cc,i + 4*k*ido) + ref(cc,ic + (4*k + 3)*ido); - ti2 = ref(cc,i + 4*k*ido) - ref(cc,ic + (4*k + 3)*ido); - ti3 = ref(cc,i + (4*k + 2)*ido) - ref(cc,ic + (4*k + 1)*ido); - tr4 = ref(cc,i + (4*k + 2)*ido) + ref(cc,ic + (4*k + 1)*ido); - tr1 = ref(cc,i - 1 + 4*k*ido) - ref(cc,ic - 1 + (4*k + 3)*ido); - tr2 = ref(cc,i - 1 + 4*k*ido) + ref(cc,ic - 1 + (4*k + 3)*ido); - ti4 = ref(cc,i - 1 + (4*k + 2)*ido) - ref(cc,ic - 1 + (4*k + 1)*ido); - tr3 = ref(cc,i - 1 + (4*k + 2)*ido) + ref(cc,ic - 1 + (4*k + 1)*ido); - ch[i - 1 + k*ido] = tr2 + tr3; - cr3 = tr2 - tr3; - ch[i + k*ido] = ti2 + ti3; - ci3 = ti2 - ti3; - cr2 = tr1 - tr4; - cr4 = tr1 + tr4; - ci2 = ti1 + ti4; - ci4 = ti1 - ti4; - ch[i - 1 + (k + l1)*ido] = wa1[i - 2]*cr2 - wa1[i - 1]*ci2; - ch[i + (k + l1)*ido] = wa1[i - 2]*ci2 + wa1[i - 1]*cr2; - ch[i - 1 + (k + 2*l1)*ido] = wa2[i - 2]*cr3 - wa2[i - 1]*ci3; - ch[i + (k + 2*l1)*ido] = wa2[i - 2]*ci3 + wa2[i - 1]*cr3; - ch[i - 1 + (k + 3*l1)*ido] = wa3[i - 2]*cr4 - wa3[i - 1]*ci4; - ch[i + (k + 3*l1)*ido] = wa3[i - 2]*ci4 + wa3[i - 1]*cr4; - } - } - if (ido % 2 == 1) return; - } - for (k = 0; k < l1; k++) { - ti1 = ref(cc,(4*k + 1)*ido) + ref(cc,(4*k + 3)*ido); - ti2 = ref(cc,(4*k + 3)*ido) - ref(cc,(4*k + 1)*ido); - tr1 = ref(cc,ido-1 + 4*k*ido) - ref(cc,ido-1 + (4*k + 2)*ido); - tr2 = ref(cc,ido-1 + 4*k*ido) + ref(cc,ido-1 + (4*k + 2)*ido); - ch[ido-1 + k*ido] = tr2 + tr2; - ch[ido-1 + (k + l1)*ido] = sqrt2*(tr1 - ti1); - ch[ido-1 + (k + 2*l1)*ido] = ti2 + ti2; - ch[ido-1 + (k + 3*l1)*ido] = -sqrt2*(tr1 + ti1); - } - } /* radb4 */ - - -static void radf5(int ido, int l1, const Treal cc[], Treal ch[], - const Treal wa1[], const Treal wa2[], const Treal wa3[], const Treal wa4[]) - { - static const Treal tr11 = 0.3090169943749474241; - static const Treal ti11 = 0.95105651629515357212; - static const Treal tr12 = -0.8090169943749474241; - static const Treal ti12 = 0.58778525229247312917; - int i, k, ic; - Treal ci2, di2, ci4, ci5, di3, di4, di5, ci3, cr2, cr3, dr2, dr3, dr4, dr5, - cr5, cr4, ti2, ti3, ti5, ti4, tr2, tr3, tr4, tr5; - for (k = 0; k < l1; k++) { - cr2 = ref(cc,(k + 4*l1)*ido) + ref(cc,(k + l1)*ido); - ci5 = ref(cc,(k + 4*l1)*ido) - ref(cc,(k + l1)*ido); - cr3 = ref(cc,(k + 3*l1)*ido) + ref(cc,(k + 2*l1)*ido); - ci4 = ref(cc,(k + 3*l1)*ido) - ref(cc,(k + 2*l1)*ido); - ch[5*k*ido] = ref(cc,k*ido) + cr2 + cr3; - ch[ido-1 + (5*k + 1)*ido] = ref(cc,k*ido) + tr11*cr2 + tr12*cr3; - ch[(5*k + 2)*ido] = ti11*ci5 + ti12*ci4; - ch[ido-1 + (5*k + 3)*ido] = ref(cc,k*ido) + tr12*cr2 + tr11*cr3; - ch[(5*k + 4)*ido] = ti12*ci5 - ti11*ci4; - } - if (ido == 1) return; - for (k = 0; k < l1; ++k) { - for (i = 2; i < ido; i += 2) { - ic = ido - i; - dr2 = wa1[i - 2]*ref(cc,i - 1 + (k + l1)*ido) + wa1[i - 1]*ref(cc,i + (k + l1)*ido); - di2 = wa1[i - 2]*ref(cc,i + (k + l1)*ido) - wa1[i - 1]*ref(cc,i - 1 + (k + l1)*ido); - dr3 = wa2[i - 2]*ref(cc,i - 1 + (k + 2*l1)*ido) + wa2[i - 1]*ref(cc,i + (k + 2*l1)*ido); - di3 = wa2[i - 2]*ref(cc,i + (k + 2*l1)*ido) - wa2[i - 1]*ref(cc,i - 1 + (k + 2*l1)*ido); - dr4 = wa3[i - 2]*ref(cc,i - 1 + (k + 3*l1)*ido) + wa3[i - 1]*ref(cc,i + (k + 3*l1)*ido); - di4 = wa3[i - 2]*ref(cc,i + (k + 3*l1)*ido) - wa3[i - 1]*ref(cc,i - 1 + (k + 3*l1)*ido); - dr5 = wa4[i - 2]*ref(cc,i - 1 + (k + 4*l1)*ido) + wa4[i - 1]*ref(cc,i + (k + 4*l1)*ido); - di5 = wa4[i - 2]*ref(cc,i + (k + 4*l1)*ido) - wa4[i - 1]*ref(cc,i - 1 + (k + 4*l1)*ido); - cr2 = dr2 + dr5; - ci5 = dr5 - dr2; - cr5 = di2 - di5; - ci2 = di2 + di5; - cr3 = dr3 + dr4; - ci4 = dr4 - dr3; - cr4 = di3 - di4; - ci3 = di3 + di4; - ch[i - 1 + 5*k*ido] = ref(cc,i - 1 + k*ido) + cr2 + cr3; - ch[i + 5*k*ido] = ref(cc,i + k*ido) + ci2 + ci3; - tr2 = ref(cc,i - 1 + k*ido) + tr11*cr2 + tr12*cr3; - ti2 = ref(cc,i + k*ido) + tr11*ci2 + tr12*ci3; - tr3 = ref(cc,i - 1 + k*ido) + tr12*cr2 + tr11*cr3; - ti3 = ref(cc,i + k*ido) + tr12*ci2 + tr11*ci3; - tr5 = ti11*cr5 + ti12*cr4; - ti5 = ti11*ci5 + ti12*ci4; - tr4 = ti12*cr5 - ti11*cr4; - ti4 = ti12*ci5 - ti11*ci4; - ch[i - 1 + (5*k + 2)*ido] = tr2 + tr5; - ch[ic - 1 + (5*k + 1)*ido] = tr2 - tr5; - ch[i + (5*k + 2)*ido] = ti2 + ti5; - ch[ic + (5*k + 1)*ido] = ti5 - ti2; - ch[i - 1 + (5*k + 4)*ido] = tr3 + tr4; - ch[ic - 1 + (5*k + 3)*ido] = tr3 - tr4; - ch[i + (5*k + 4)*ido] = ti3 + ti4; - ch[ic + (5*k + 3)*ido] = ti4 - ti3; - } - } - } /* radf5 */ - - -static void radb5(int ido, int l1, const Treal cc[], Treal ch[], - const Treal wa1[], const Treal wa2[], const Treal wa3[], const Treal wa4[]) - { - static const Treal tr11 = 0.3090169943749474241; - static const Treal ti11 = 0.95105651629515357212; - static const Treal tr12 = -0.8090169943749474241; - static const Treal ti12 = 0.58778525229247312917; - int i, k, ic; - Treal ci2, ci3, ci4, ci5, di3, di4, di5, di2, cr2, cr3, cr5, cr4, ti2, ti3, - ti4, ti5, dr3, dr4, dr5, dr2, tr2, tr3, tr4, tr5; - for (k = 0; k < l1; k++) { - ti5 = 2*ref(cc,(5*k + 2)*ido); - ti4 = 2*ref(cc,(5*k + 4)*ido); - tr2 = 2*ref(cc,ido-1 + (5*k + 1)*ido); - tr3 = 2*ref(cc,ido-1 + (5*k + 3)*ido); - ch[k*ido] = ref(cc,5*k*ido) + tr2 + tr3; - cr2 = ref(cc,5*k*ido) + tr11*tr2 + tr12*tr3; - cr3 = ref(cc,5*k*ido) + tr12*tr2 + tr11*tr3; - ci5 = ti11*ti5 + ti12*ti4; - ci4 = ti12*ti5 - ti11*ti4; - ch[(k + l1)*ido] = cr2 - ci5; - ch[(k + 2*l1)*ido] = cr3 - ci4; - ch[(k + 3*l1)*ido] = cr3 + ci4; - ch[(k + 4*l1)*ido] = cr2 + ci5; - } - if (ido == 1) return; - for (k = 0; k < l1; ++k) { - for (i = 2; i < ido; i += 2) { - ic = ido - i; - ti5 = ref(cc,i + (5*k + 2)*ido) + ref(cc,ic + (5*k + 1)*ido); - ti2 = ref(cc,i + (5*k + 2)*ido) - ref(cc,ic + (5*k + 1)*ido); - ti4 = ref(cc,i + (5*k + 4)*ido) + ref(cc,ic + (5*k + 3)*ido); - ti3 = ref(cc,i + (5*k + 4)*ido) - ref(cc,ic + (5*k + 3)*ido); - tr5 = ref(cc,i - 1 + (5*k + 2)*ido) - ref(cc,ic - 1 + (5*k + 1)*ido); - tr2 = ref(cc,i - 1 + (5*k + 2)*ido) + ref(cc,ic - 1 + (5*k + 1)*ido); - tr4 = ref(cc,i - 1 + (5*k + 4)*ido) - ref(cc,ic - 1 + (5*k + 3)*ido); - tr3 = ref(cc,i - 1 + (5*k + 4)*ido) + ref(cc,ic - 1 + (5*k + 3)*ido); - ch[i - 1 + k*ido] = ref(cc,i - 1 + 5*k*ido) + tr2 + tr3; - ch[i + k*ido] = ref(cc,i + 5*k*ido) + ti2 + ti3; - cr2 = ref(cc,i - 1 + 5*k*ido) + tr11*tr2 + tr12*tr3; - - ci2 = ref(cc,i + 5*k*ido) + tr11*ti2 + tr12*ti3; - cr3 = ref(cc,i - 1 + 5*k*ido) + tr12*tr2 + tr11*tr3; - - ci3 = ref(cc,i + 5*k*ido) + tr12*ti2 + tr11*ti3; - cr5 = ti11*tr5 + ti12*tr4; - ci5 = ti11*ti5 + ti12*ti4; - cr4 = ti12*tr5 - ti11*tr4; - ci4 = ti12*ti5 - ti11*ti4; - dr3 = cr3 - ci4; - dr4 = cr3 + ci4; - di3 = ci3 + cr4; - di4 = ci3 - cr4; - dr5 = cr2 + ci5; - dr2 = cr2 - ci5; - di5 = ci2 - cr5; - di2 = ci2 + cr5; - ch[i - 1 + (k + l1)*ido] = wa1[i - 2]*dr2 - wa1[i - 1]*di2; - ch[i + (k + l1)*ido] = wa1[i - 2]*di2 + wa1[i - 1]*dr2; - ch[i - 1 + (k + 2*l1)*ido] = wa2[i - 2]*dr3 - wa2[i - 1]*di3; - ch[i + (k + 2*l1)*ido] = wa2[i - 2]*di3 + wa2[i - 1]*dr3; - ch[i - 1 + (k + 3*l1)*ido] = wa3[i - 2]*dr4 - wa3[i - 1]*di4; - ch[i + (k + 3*l1)*ido] = wa3[i - 2]*di4 + wa3[i - 1]*dr4; - ch[i - 1 + (k + 4*l1)*ido] = wa4[i - 2]*dr5 - wa4[i - 1]*di5; - ch[i + (k + 4*l1)*ido] = wa4[i - 2]*di5 + wa4[i - 1]*dr5; - } - } - } /* radb5 */ - - -static void radfg(int ido, int ip, int l1, int idl1, - Treal cc[], Treal ch[], const Treal wa[]) - { - int idij, ipph, i, j, k, l, j2, ic, jc, lc, ik, is, nbd; - Treal dc2, ai1, ai2, ar1, ar2, ds2, dcp, dsp, ar1h, ar2h; - sincos2pi(1, ip, &dsp, &dcp); - ipph = (ip + 1) / 2; - nbd = (ido - 1) / 2; - if (ido != 1) { - for (ik=0; ik<idl1; ik++) ch[ik] = cc[ik]; - for (j=1; j<ip; j++) - for (k=0; k<l1; k++) - ch[(k + j*l1)*ido] = cc[(k + j*l1)*ido]; - if (nbd <= l1) { - is = -ido; - for (j=1; j<ip; j++) { - is += ido; - idij = is-1; - for (i=2; i<ido; i+=2) { - idij += 2; - for (k=0; k<l1; k++) { - ch[i - 1 + (k + j*l1)*ido] = - wa[idij - 1]*cc[i - 1 + (k + j*l1)*ido] + wa[idij]*cc[i + (k + j*l1)*ido]; - ch[i + (k + j*l1)*ido] = - wa[idij - 1]*cc[i + (k + j*l1)*ido] - wa[idij]*cc[i - 1 + (k + j*l1)*ido]; - } - } - } - } else { - is = -ido; - for (j=1; j<ip; j++) { - is += ido; - for (k=0; k<l1; k++) { - idij = is-1; - for (i=2; i<ido; i+=2) { - idij += 2; - ch[i - 1 + (k + j*l1)*ido] = - wa[idij - 1]*cc[i - 1 + (k + j*l1)*ido] + wa[idij]*cc[i + (k + j*l1)*ido]; - ch[i + (k + j*l1)*ido] = - wa[idij - 1]*cc[i + (k + j*l1)*ido] - wa[idij]*cc[i - 1 + (k + j*l1)*ido]; - } - } - } - } - if (nbd >= l1) { - for (j=1; j<ipph; j++) { - jc = ip - j; - for (k=0; k<l1; k++) { - for (i=2; i<ido; i+=2) { - cc[i - 1 + (k + j*l1)*ido] = ch[i - 1 + (k + j*l1)*ido] + ch[i - 1 + (k + jc*l1)*ido]; - cc[i - 1 + (k + jc*l1)*ido] = ch[i + (k + j*l1)*ido] - ch[i + (k + jc*l1)*ido]; - cc[i + (k + j*l1)*ido] = ch[i + (k + j*l1)*ido] + ch[i + (k + jc*l1)*ido]; - cc[i + (k + jc*l1)*ido] = ch[i - 1 + (k + jc*l1)*ido] - ch[i - 1 + (k + j*l1)*ido]; - } - } - } - } else { - for (j=1; j<ipph; j++) { - jc = ip - j; - for (i=2; i<ido; i+=2) { - for (k=0; k<l1; k++) { - cc[i - 1 + (k + j*l1)*ido] = - ch[i - 1 + (k + j*l1)*ido] + ch[i - 1 + (k + jc*l1)*ido]; - cc[i - 1 + (k + jc*l1)*ido] = ch[i + (k + j*l1)*ido] - ch[i + (k + jc*l1)*ido]; - cc[i + (k + j*l1)*ido] = ch[i + (k + j*l1)*ido] + ch[i + (k + jc*l1)*ido]; - cc[i + (k + jc*l1)*ido] = ch[i - 1 + (k + jc*l1)*ido] - ch[i - 1 + (k + j*l1)*ido]; - } - } - } - } - } else { /* now ido == 1 */ - for (ik=0; ik<idl1; ik++) cc[ik] = ch[ik]; - } - for (j=1; j<ipph; j++) { - jc = ip - j; - for (k=0; k<l1; k++) { - cc[(k + j*l1)*ido] = ch[(k + j*l1)*ido] + ch[(k + jc*l1)*ido]; - cc[(k + jc*l1)*ido] = ch[(k + jc*l1)*ido] - ch[(k + j*l1)*ido]; - } - } - - ar1 = 1; - ai1 = 0; - for (l=1; l<ipph; l++) { - lc = ip - l; - ar1h = dcp*ar1 - dsp*ai1; - ai1 = dcp*ai1 + dsp*ar1; - ar1 = ar1h; - for (ik=0; ik<idl1; ik++) { - ch[ik + l*idl1] = cc[ik] + ar1*cc[ik + idl1]; - ch[ik + lc*idl1] = ai1*cc[ik + (ip-1)*idl1]; - } - dc2 = ar1; - ds2 = ai1; - ar2 = ar1; - ai2 = ai1; - for (j=2; j<ipph; j++) { - jc = ip - j; - ar2h = dc2*ar2 - ds2*ai2; - ai2 = dc2*ai2 + ds2*ar2; - ar2 = ar2h; - for (ik=0; ik<idl1; ik++) { - ch[ik + l*idl1] += ar2*cc[ik + j*idl1]; - ch[ik + lc*idl1] += ai2*cc[ik + jc*idl1]; - } - } - } - - for (j=1; j<ipph; j++) - for (ik=0; ik<idl1; ik++) - ch[ik] += cc[ik + j*idl1]; - - if (ido >= l1) { - for (k=0; k<l1; k++) { - for (i=0; i<ido; i++) { - ref(cc,i + k*ip*ido) = ch[i + k*ido]; - } - } - } else { - for (i=0; i<ido; i++) { - for (k=0; k<l1; k++) { - ref(cc,i + k*ip*ido) = ch[i + k*ido]; - } - } - } - for (j=1; j<ipph; j++) { - jc = ip - j; - j2 = 2*j; - for (k=0; k<l1; k++) { - ref(cc,ido-1 + (j2 - 1 + k*ip)*ido) = - ch[(k + j*l1)*ido]; - ref(cc,(j2 + k*ip)*ido) = - ch[(k + jc*l1)*ido]; - } - } - if (ido == 1) return; - if (nbd >= l1) { - for (j=1; j<ipph; j++) { - jc = ip - j; - j2 = 2*j; - for (k=0; k<l1; k++) { - for (i=2; i<ido; i+=2) { - ic = ido - i; - ref(cc,i - 1 + (j2 + k*ip)*ido) = ch[i - 1 + (k + j*l1)*ido] + ch[i - 1 + (k + jc*l1)*ido]; - ref(cc,ic - 1 + (j2 - 1 + k*ip)*ido) = ch[i - 1 + (k + j*l1)*ido] - ch[i - 1 + (k + jc*l1)*ido]; - ref(cc,i + (j2 + k*ip)*ido) = ch[i + (k + j*l1)*ido] + ch[i + (k + jc*l1)*ido]; - ref(cc,ic + (j2 - 1 + k*ip)*ido) = ch[i + (k + jc*l1)*ido] - ch[i + (k + j*l1)*ido]; - } - } - } - } else { - for (j=1; j<ipph; j++) { - jc = ip - j; - j2 = 2*j; - for (i=2; i<ido; i+=2) { - ic = ido - i; - for (k=0; k<l1; k++) { - ref(cc,i - 1 + (j2 + k*ip)*ido) = ch[i - 1 + (k + j*l1)*ido] + ch[i - 1 + (k + jc*l1)*ido]; - ref(cc,ic - 1 + (j2 - 1 + k*ip)*ido) = ch[i - 1 + (k + j*l1)*ido] - ch[i - 1 + (k + jc*l1)*ido]; - ref(cc,i + (j2 + k*ip)*ido) = ch[i + (k + j*l1)*ido] + ch[i + (k + jc*l1)*ido]; - ref(cc,ic + (j2 - 1 + k*ip)*ido) = ch[i + (k + jc*l1)*ido] - ch[i + (k + j*l1)*ido]; - } - } - } - } - } /* radfg */ - - -static void radbg(int ido, int ip, int l1, int idl1, - Treal cc[], Treal ch[], const Treal wa[]) - { - int idij, ipph, i, j, k, l, j2, ic, jc, lc, ik, is; - Treal dc2, ai1, ai2, ar1, ar2, ds2; - int nbd; - Treal dcp, dsp, ar1h, ar2h; - sincos2pi(1, ip, &dsp, &dcp); - nbd = (ido - 1) / 2; - ipph = (ip + 1) / 2; - if (ido >= l1) { - for (k=0; k<l1; k++) { - for (i=0; i<ido; i++) { - ch[i + k*ido] = ref(cc,i + k*ip*ido); - } - } - } else { - for (i=0; i<ido; i++) { - for (k=0; k<l1; k++) { - ch[i + k*ido] = ref(cc,i + k*ip*ido); - } - } - } - for (j=1; j<ipph; j++) { - jc = ip - j; - j2 = 2*j; - for (k=0; k<l1; k++) { - ch[(k + j*l1)*ido] = ref(cc,ido-1 + (j2 - 1 + k*ip)*ido) + ref(cc,ido-1 + (j2 - 1 + k*ip)* - ido); - ch[(k + jc*l1)*ido] = ref(cc,(j2 + k*ip)*ido) + ref(cc,(j2 + k*ip)*ido); - } - } - - if (ido != 1) { - if (nbd >= l1) { - for (j=1; j<ipph; j++) { - jc = ip - j; - for (k=0; k<l1; k++) { - for (i=2; i<ido; i+=2) { - ic = ido - i; - ch[i - 1 + (k + j*l1)*ido] = ref(cc,i - 1 + (2*j + k*ip)*ido) + ref(cc, - ic - 1 + (2*j - 1 + k*ip)*ido); - ch[i - 1 + (k + jc*l1)*ido] = ref(cc,i - 1 + (2*j + k*ip)*ido) - - ref(cc,ic - 1 + (2*j - 1 + k*ip)*ido); - ch[i + (k + j*l1)*ido] = ref(cc,i + (2*j + k*ip)*ido) - ref(cc,ic - + (2*j - 1 + k*ip)*ido); - ch[i + (k + jc*l1)*ido] = ref(cc,i + (2*j + k*ip)*ido) + ref(cc,ic - + (2*j - 1 + k*ip)*ido); - } - } - } - } else { - for (j=1; j<ipph; j++) { - jc = ip - j; - for (i=2; i<ido; i+=2) { - ic = ido - i; - for (k=0; k<l1; k++) { - ch[i - 1 + (k + j*l1)*ido] = ref(cc,i - 1 + (2*j + k*ip)*ido) + ref(cc, - ic - 1 + (2*j - 1 + k*ip)*ido); - ch[i - 1 + (k + jc*l1)*ido] = ref(cc,i - 1 + (2*j + k*ip)*ido) - - ref(cc,ic - 1 + (2*j - 1 + k*ip)*ido); - ch[i + (k + j*l1)*ido] = ref(cc,i + (2*j + k*ip)*ido) - ref(cc,ic - + (2*j - 1 + k*ip)*ido); - ch[i + (k + jc*l1)*ido] = ref(cc,i + (2*j + k*ip)*ido) + ref(cc,ic - + (2*j - 1 + k*ip)*ido); - } - } - } - } - } - - ar1 = 1; - ai1 = 0; - for (l=1; l<ipph; l++) { - lc = ip - l; - ar1h = dcp*ar1 - dsp*ai1; - ai1 = dcp*ai1 + dsp*ar1; - ar1 = ar1h; - for (ik=0; ik<idl1; ik++) { - cc[ik + l*idl1] = ch[ik] + ar1*ch[ik + idl1]; - cc[ik + lc*idl1] = ai1*ch[ik + (ip-1)*idl1]; - } - dc2 = ar1; - ds2 = ai1; - ar2 = ar1; - ai2 = ai1; - for (j=2; j<ipph; j++) { - jc = ip - j; - ar2h = dc2*ar2 - ds2*ai2; - ai2 = dc2*ai2 + ds2*ar2; - ar2 = ar2h; - for (ik=0; ik<idl1; ik++) { - cc[ik + l*idl1] += ar2*ch[ik + j*idl1]; - cc[ik + lc*idl1] += ai2*ch[ik + jc*idl1]; - } - } - } - for (j=1; j<ipph; j++) { - for (ik=0; ik<idl1; ik++) { - ch[ik] += ch[ik + j*idl1]; - } - } - for (j=1; j<ipph; j++) { - jc = ip - j; - for (k=0; k<l1; k++) { - ch[(k + j*l1)*ido] = cc[(k + j*l1)*ido] - cc[(k + jc*l1)*ido]; - ch[(k + jc*l1)*ido] = cc[(k + j*l1)*ido] + cc[(k + jc*l1)*ido]; - } - } - - if (ido == 1) return; - if (nbd >= l1) { - for (j=1; j<ipph; j++) { - jc = ip - j; - for (k=0; k<l1; k++) { - for (i=2; i<ido; i+=2) { - ch[i - 1 + (k + j*l1)*ido] = cc[i - 1 + (k + j*l1)*ido] - cc[i + (k + jc*l1)*ido]; - ch[i - 1 + (k + jc*l1)*ido] = cc[i - 1 + (k + j*l1)*ido] + cc[i + (k + jc*l1)*ido]; - ch[i + (k + j*l1)*ido] = cc[i + (k + j*l1)*ido] + cc[i - 1 + (k + jc*l1)*ido]; - ch[i + (k + jc*l1)*ido] = cc[i + (k + j*l1)*ido] - cc[i - 1 + (k + jc*l1)*ido]; - } - } - } - } else { - for (j=1; j<ipph; j++) { - jc = ip - j; - for (i=2; i<ido; i+=2) { - for (k=0; k<l1; k++) { - ch[i - 1 + (k + j*l1)*ido] = cc[i - 1 + (k + j*l1)*ido] - cc[i + (k + jc*l1)*ido]; - ch[i - 1 + (k + jc*l1)*ido] = cc[i - 1 + (k + j *l1)*ido] + cc[i + (k + jc*l1)*ido]; - ch[i + (k + j*l1)*ido] = cc[i + (k + j*l1)*ido] + cc[i - 1 + (k + jc*l1)*ido]; - ch[i + (k + jc*l1)*ido] = cc[i + (k + j*l1)*ido] - cc[i - 1 + (k + jc*l1)*ido]; - } - } - } - } - for (ik=0; ik<idl1; ik++) cc[ik] = ch[ik]; - for (j=1; j<ip; j++) - for (k=0; k<l1; k++) - cc[(k + j*l1)*ido] = ch[(k + j*l1)*ido]; - if (nbd <= l1) { - is = -ido; - for (j=1; j<ip; j++) { - is += ido; - idij = is-1; - for (i=2; i<ido; i+=2) { - idij += 2; - for (k=0; k<l1; k++) { - cc[i - 1 + (k + j*l1)*ido] = wa[idij - 1]*ch[i - 1 + (k + j*l1)*ido] - wa[idij]* - ch[i + (k + j*l1)*ido]; - cc[i + (k + j*l1)*ido] = wa[idij - 1]*ch[i + (k + j*l1)*ido] + wa[idij]*ch[i - 1 + (k + j*l1)*ido]; - } - } - } - } else { - is = -ido; - for (j=1; j<ip; j++) { - is += ido; - for (k=0; k<l1; k++) { - idij = is - 1; - for (i=2; i<ido; i+=2) { - idij += 2; - cc[i - 1 + (k + j*l1)*ido] = wa[idij-1]*ch[i - 1 + (k + j*l1)*ido] - wa[idij]* - ch[i + (k + j*l1)*ido]; - cc[i + (k + j*l1)*ido] = wa[idij-1]*ch[i + (k + j*l1)*ido] + wa[idij]*ch[i - 1 + (k + j*l1)*ido]; - } - } - } - } - } /* radbg */ - - /* ------------------------------------------------------------ -cfftf1, npy_cfftf, npy_cfftb, cffti1, npy_cffti. Complex FFTs. ---------------------------------------------------------------- */ - -static void cfftf1(int n, Treal c[], Treal ch[], const Treal wa[], const int ifac[MAXFAC+2], int isign) - { - int idot, i; - int k1, l1, l2; - int na, nf, ip, iw, ix2, ix3, ix4, nac, ido, idl1; - Treal *cinput, *coutput; - nf = ifac[1]; - na = 0; - l1 = 1; - iw = 0; - for (k1=2; k1<=nf+1; k1++) { - ip = ifac[k1]; - l2 = ip*l1; - ido = n / l2; - idot = ido + ido; - idl1 = idot*l1; - if (na) { - cinput = ch; - coutput = c; - } else { - cinput = c; - coutput = ch; - } - switch (ip) { - case 4: - ix2 = iw + idot; - ix3 = ix2 + idot; - passf4(idot, l1, cinput, coutput, &wa[iw], &wa[ix2], &wa[ix3], isign); - na = !na; - break; - case 2: - passf2(idot, l1, cinput, coutput, &wa[iw], isign); - na = !na; - break; - case 3: - ix2 = iw + idot; - passf3(idot, l1, cinput, coutput, &wa[iw], &wa[ix2], isign); - na = !na; - break; - case 5: - ix2 = iw + idot; - ix3 = ix2 + idot; - ix4 = ix3 + idot; - passf5(idot, l1, cinput, coutput, &wa[iw], &wa[ix2], &wa[ix3], &wa[ix4], isign); - na = !na; - break; - default: - passf(&nac, idot, ip, l1, idl1, cinput, coutput, &wa[iw], isign); - if (nac != 0) na = !na; - } - l1 = l2; - iw += (ip - 1)*idot; - } - if (na == 0) return; - for (i=0; i<2*n; i++) c[i] = ch[i]; - } /* cfftf1 */ - - -NPY_VISIBILITY_HIDDEN void npy_cfftf(int n, Treal c[], Treal wsave[]) - { - int iw1, iw2; - if (n == 1) return; - iw1 = 2*n; - iw2 = iw1 + 2*n; - cfftf1(n, c, wsave, wsave+iw1, (int*)(wsave+iw2), -1); - } /* npy_cfftf */ - - -NPY_VISIBILITY_HIDDEN void npy_cfftb(int n, Treal c[], Treal wsave[]) - { - int iw1, iw2; - if (n == 1) return; - iw1 = 2*n; - iw2 = iw1 + 2*n; - cfftf1(n, c, wsave, wsave+iw1, (int*)(wsave+iw2), +1); - } /* npy_cfftb */ - - -static void factorize(int n, int ifac[MAXFAC+2], const int ntryh[NSPECIAL]) - /* Factorize n in factors in ntryh and rest. On exit, -ifac[0] contains n and ifac[1] contains number of factors, -the factors start from ifac[2]. */ - { - int ntry=3, i, j=0, ib, nf=0, nl=n, nq, nr; -startloop: - if (j < NSPECIAL) - ntry = ntryh[j]; - else - ntry+= 2; - j++; - do { - nq = nl / ntry; - nr = nl - ntry*nq; - if (nr != 0) goto startloop; - nf++; - ifac[nf + 1] = ntry; - nl = nq; - if (ntry == 2 && nf != 1) { - for (i=2; i<=nf; i++) { - ib = nf - i + 2; - ifac[ib + 1] = ifac[ib]; - } - ifac[2] = 2; - } - } while (nl != 1); - ifac[0] = n; - ifac[1] = nf; - } - - -static void cffti1(int n, Treal wa[], int ifac[MAXFAC+2]) - { - int fi, idot, i, j; - int i1, k1, l1, l2; - int ld, ii, nf, ip; - int ido, ipm; - - static const int ntryh[NSPECIAL] = { - 3,4,2,5 }; /* Do not change the order of these. */ - - factorize(n,ifac,ntryh); - nf = ifac[1]; - i = 1; - l1 = 1; - for (k1=1; k1<=nf; k1++) { - ip = ifac[k1+1]; - ld = 0; - l2 = l1*ip; - ido = n / l2; - idot = ido + ido + 2; - ipm = ip - 1; - for (j=1; j<=ipm; j++) { - i1 = i; - wa[i-1] = 1; - wa[i] = 0; - ld += l1; - fi = 0; - for (ii=4; ii<=idot; ii+=2) { - i+= 2; - fi+= 1; - sincos2pi(fi*ld, n, wa+i, wa+i-1); - } - if (ip > 5) { - wa[i1-1] = wa[i-1]; - wa[i1] = wa[i]; - } - } - l1 = l2; - } - } /* cffti1 */ - - -NPY_VISIBILITY_HIDDEN void npy_cffti(int n, Treal wsave[]) - { - int iw1, iw2; - if (n == 1) return; - iw1 = 2*n; - iw2 = iw1 + 2*n; - cffti1(n, wsave+iw1, (int*)(wsave+iw2)); - } /* npy_cffti */ - - /* ------------------------------------------------------------------- -rfftf1, rfftb1, npy_rfftf, npy_rfftb, rffti1, npy_rffti. Treal FFTs. ----------------------------------------------------------------------- */ - -static void rfftf1(int n, Treal c[], Treal ch[], const Treal wa[], const int ifac[MAXFAC+2]) - { - int i; - int k1, l1, l2, na, kh, nf, ip, iw, ix2, ix3, ix4, ido, idl1; - Treal *cinput, *coutput; - nf = ifac[1]; - na = 1; - l2 = n; - iw = n-1; - for (k1 = 1; k1 <= nf; ++k1) { - kh = nf - k1; - ip = ifac[kh + 2]; - l1 = l2 / ip; - ido = n / l2; - idl1 = ido*l1; - iw -= (ip - 1)*ido; - na = !na; - if (na) { - cinput = ch; - coutput = c; - } else { - cinput = c; - coutput = ch; - } - switch (ip) { - case 4: - ix2 = iw + ido; - ix3 = ix2 + ido; - radf4(ido, l1, cinput, coutput, &wa[iw], &wa[ix2], &wa[ix3]); - break; - case 2: - radf2(ido, l1, cinput, coutput, &wa[iw]); - break; - case 3: - ix2 = iw + ido; - radf3(ido, l1, cinput, coutput, &wa[iw], &wa[ix2]); - break; - case 5: - ix2 = iw + ido; - ix3 = ix2 + ido; - ix4 = ix3 + ido; - radf5(ido, l1, cinput, coutput, &wa[iw], &wa[ix2], &wa[ix3], &wa[ix4]); - break; - default: - if (ido == 1) - na = !na; - if (na == 0) { - radfg(ido, ip, l1, idl1, c, ch, &wa[iw]); - na = 1; - } else { - radfg(ido, ip, l1, idl1, ch, c, &wa[iw]); - na = 0; - } - } - l2 = l1; - } - if (na == 1) return; - for (i = 0; i < n; i++) c[i] = ch[i]; - } /* rfftf1 */ - - -static void rfftb1(int n, Treal c[], Treal ch[], const Treal wa[], const int ifac[MAXFAC+2]) - { - int i; - int k1, l1, l2, na, nf, ip, iw, ix2, ix3, ix4, ido, idl1; - Treal *cinput, *coutput; - nf = ifac[1]; - na = 0; - l1 = 1; - iw = 0; - for (k1=1; k1<=nf; k1++) { - ip = ifac[k1 + 1]; - l2 = ip*l1; - ido = n / l2; - idl1 = ido*l1; - if (na) { - cinput = ch; - coutput = c; - } else { - cinput = c; - coutput = ch; - } - switch (ip) { - case 4: - ix2 = iw + ido; - ix3 = ix2 + ido; - radb4(ido, l1, cinput, coutput, &wa[iw], &wa[ix2], &wa[ix3]); - na = !na; - break; - case 2: - radb2(ido, l1, cinput, coutput, &wa[iw]); - na = !na; - break; - case 3: - ix2 = iw + ido; - radb3(ido, l1, cinput, coutput, &wa[iw], &wa[ix2]); - na = !na; - break; - case 5: - ix2 = iw + ido; - ix3 = ix2 + ido; - ix4 = ix3 + ido; - radb5(ido, l1, cinput, coutput, &wa[iw], &wa[ix2], &wa[ix3], &wa[ix4]); - na = !na; - break; - default: - radbg(ido, ip, l1, idl1, cinput, coutput, &wa[iw]); - if (ido == 1) na = !na; - } - l1 = l2; - iw += (ip - 1)*ido; - } - if (na == 0) return; - for (i=0; i<n; i++) c[i] = ch[i]; - } /* rfftb1 */ - - -NPY_VISIBILITY_HIDDEN void npy_rfftf(int n, Treal r[], Treal wsave[]) - { - if (n == 1) return; - rfftf1(n, r, wsave, wsave+n, (int*)(wsave+2*n)); - } /* npy_rfftf */ - - -NPY_VISIBILITY_HIDDEN void npy_rfftb(int n, Treal r[], Treal wsave[]) - { - if (n == 1) return; - rfftb1(n, r, wsave, wsave+n, (int*)(wsave+2*n)); - } /* npy_rfftb */ - - -static void rffti1(int n, Treal wa[], int ifac[MAXFAC+2]) - { - int fi, i, j; - int k1, l1, l2; - int ld, ii, nf, ip, is; - int ido, ipm, nfm1; - static const int ntryh[NSPECIAL] = { - 4,2,3,5 }; /* Do not change the order of these. */ - factorize(n,ifac,ntryh); - nf = ifac[1]; - is = 0; - nfm1 = nf - 1; - l1 = 1; - if (nfm1 == 0) return; - for (k1 = 1; k1 <= nfm1; k1++) { - ip = ifac[k1 + 1]; - ld = 0; - l2 = l1*ip; - ido = n / l2; - ipm = ip - 1; - for (j = 1; j <= ipm; ++j) { - ld += l1; - i = is; - fi = 0; - for (ii = 3; ii <= ido; ii += 2) { - i += 2; - fi += 1; - sincos2pi(fi*ld, n, wa+i-1, wa+i-2); - } - is += ido; - } - l1 = l2; - } - } /* rffti1 */ - - -NPY_VISIBILITY_HIDDEN void npy_rffti(int n, Treal wsave[]) - { - if (n == 1) return; - rffti1(n, wsave+n, (int*)(wsave+2*n)); - } /* npy_rffti */ - -#ifdef __cplusplus -} -#endif diff --git a/numpy/fft/fftpack.h b/numpy/fft/fftpack.h deleted file mode 100644 index 5e8f4631c..000000000 --- a/numpy/fft/fftpack.h +++ /dev/null @@ -1,28 +0,0 @@ -/* - * This file is part of tela the Tensor Language. - * Copyright (c) 1994-1995 Pekka Janhunen - */ - -#ifdef __cplusplus -extern "C" { -#endif - -#define DOUBLE - -#ifdef DOUBLE -#define Treal double -#else -#define Treal float -#endif - -extern NPY_VISIBILITY_HIDDEN void npy_cfftf(int N, Treal data[], const Treal wrk[]); -extern NPY_VISIBILITY_HIDDEN void npy_cfftb(int N, Treal data[], const Treal wrk[]); -extern NPY_VISIBILITY_HIDDEN void npy_cffti(int N, Treal wrk[]); - -extern NPY_VISIBILITY_HIDDEN void npy_rfftf(int N, Treal data[], const Treal wrk[]); -extern NPY_VISIBILITY_HIDDEN void npy_rfftb(int N, Treal data[], const Treal wrk[]); -extern NPY_VISIBILITY_HIDDEN void npy_rffti(int N, Treal wrk[]); - -#ifdef __cplusplus -} -#endif diff --git a/numpy/fft/fftpack_litemodule.c b/numpy/fft/fftpack_litemodule.c deleted file mode 100644 index bd6cfc120..000000000 --- a/numpy/fft/fftpack_litemodule.c +++ /dev/null @@ -1,366 +0,0 @@ -#define NPY_NO_DEPRECATED_API NPY_API_VERSION - -#include "Python.h" -#include "numpy/arrayobject.h" -#include "fftpack.h" - -static PyObject *ErrorObject; - -static const char fftpack_cfftf__doc__[] = ""; - -static PyObject * -fftpack_cfftf(PyObject *NPY_UNUSED(self), PyObject *args) -{ - PyObject *op1, *op2; - PyArrayObject *data; - PyArray_Descr *descr; - double *wsave, *dptr; - npy_intp nsave; - int npts, nrepeats, i; - - if(!PyArg_ParseTuple(args, "OO:cfftf", &op1, &op2)) { - return NULL; - } - data = (PyArrayObject *)PyArray_CopyFromObject(op1, - NPY_CDOUBLE, 1, 0); - if (data == NULL) { - return NULL; - } - descr = PyArray_DescrFromType(NPY_DOUBLE); - if (PyArray_AsCArray(&op2, (void *)&wsave, &nsave, 1, descr) == -1) { - goto fail; - } - if (data == NULL) { - goto fail; - } - - npts = PyArray_DIM(data, PyArray_NDIM(data) - 1); - if (nsave != npts*4 + 15) { - PyErr_SetString(ErrorObject, "invalid work array for fft size"); - goto fail; - } - - nrepeats = PyArray_SIZE(data)/npts; - dptr = (double *)PyArray_DATA(data); - Py_BEGIN_ALLOW_THREADS; - NPY_SIGINT_ON; - for (i = 0; i < nrepeats; i++) { - npy_cfftf(npts, dptr, wsave); - dptr += npts*2; - } - NPY_SIGINT_OFF; - Py_END_ALLOW_THREADS; - PyArray_Free(op2, (char *)wsave); - return (PyObject *)data; - -fail: - PyArray_Free(op2, (char *)wsave); - Py_DECREF(data); - return NULL; -} - -static const char fftpack_cfftb__doc__[] = ""; - -static PyObject * -fftpack_cfftb(PyObject *NPY_UNUSED(self), PyObject *args) -{ - PyObject *op1, *op2; - PyArrayObject *data; - PyArray_Descr *descr; - double *wsave, *dptr; - npy_intp nsave; - int npts, nrepeats, i; - - if(!PyArg_ParseTuple(args, "OO:cfftb", &op1, &op2)) { - return NULL; - } - data = (PyArrayObject *)PyArray_CopyFromObject(op1, - NPY_CDOUBLE, 1, 0); - if (data == NULL) { - return NULL; - } - descr = PyArray_DescrFromType(NPY_DOUBLE); - if (PyArray_AsCArray(&op2, (void *)&wsave, &nsave, 1, descr) == -1) { - goto fail; - } - if (data == NULL) { - goto fail; - } - - npts = PyArray_DIM(data, PyArray_NDIM(data) - 1); - if (nsave != npts*4 + 15) { - PyErr_SetString(ErrorObject, "invalid work array for fft size"); - goto fail; - } - - nrepeats = PyArray_SIZE(data)/npts; - dptr = (double *)PyArray_DATA(data); - Py_BEGIN_ALLOW_THREADS; - NPY_SIGINT_ON; - for (i = 0; i < nrepeats; i++) { - npy_cfftb(npts, dptr, wsave); - dptr += npts*2; - } - NPY_SIGINT_OFF; - Py_END_ALLOW_THREADS; - PyArray_Free(op2, (char *)wsave); - return (PyObject *)data; - -fail: - PyArray_Free(op2, (char *)wsave); - Py_DECREF(data); - return NULL; -} - -static const char fftpack_cffti__doc__[] = ""; - -static PyObject * -fftpack_cffti(PyObject *NPY_UNUSED(self), PyObject *args) -{ - PyArrayObject *op; - npy_intp dim; - long n; - - if (!PyArg_ParseTuple(args, "l:cffti", &n)) { - return NULL; - } - /*Magic size needed by npy_cffti*/ - dim = 4*n + 15; - /*Create a 1 dimensional array of dimensions of type double*/ - op = (PyArrayObject *)PyArray_SimpleNew(1, &dim, NPY_DOUBLE); - if (op == NULL) { - return NULL; - } - - Py_BEGIN_ALLOW_THREADS; - NPY_SIGINT_ON; - npy_cffti(n, (double *)PyArray_DATA((PyArrayObject*)op)); - NPY_SIGINT_OFF; - Py_END_ALLOW_THREADS; - - return (PyObject *)op; -} - -static const char fftpack_rfftf__doc__[] = ""; - -static PyObject * -fftpack_rfftf(PyObject *NPY_UNUSED(self), PyObject *args) -{ - PyObject *op1, *op2; - PyArrayObject *data, *ret; - PyArray_Descr *descr; - double *wsave = NULL, *dptr, *rptr; - npy_intp nsave; - int npts, nrepeats, i, rstep; - - if(!PyArg_ParseTuple(args, "OO:rfftf", &op1, &op2)) { - return NULL; - } - data = (PyArrayObject *)PyArray_ContiguousFromObject(op1, - NPY_DOUBLE, 1, 0); - if (data == NULL) { - return NULL; - } - /* FIXME, direct access changing contents of data->dimensions */ - npts = PyArray_DIM(data, PyArray_NDIM(data) - 1); - PyArray_DIMS(data)[PyArray_NDIM(data) - 1] = npts/2 + 1; - ret = (PyArrayObject *)PyArray_Zeros(PyArray_NDIM(data), - PyArray_DIMS(data), PyArray_DescrFromType(NPY_CDOUBLE), 0); - if (ret == NULL) { - goto fail; - } - PyArray_DIMS(data)[PyArray_NDIM(data) - 1] = npts; - rstep = PyArray_DIM(ret, PyArray_NDIM(ret) - 1)*2; - - descr = PyArray_DescrFromType(NPY_DOUBLE); - if (PyArray_AsCArray(&op2, (void *)&wsave, &nsave, 1, descr) == -1) { - goto fail; - } - if (data == NULL || ret == NULL) { - goto fail; - } - if (nsave != npts*2+15) { - PyErr_SetString(ErrorObject, "invalid work array for fft size"); - goto fail; - } - - nrepeats = PyArray_SIZE(data)/npts; - rptr = (double *)PyArray_DATA(ret); - dptr = (double *)PyArray_DATA(data); - - Py_BEGIN_ALLOW_THREADS; - NPY_SIGINT_ON; - for (i = 0; i < nrepeats; i++) { - memcpy((char *)(rptr+1), dptr, npts*sizeof(double)); - npy_rfftf(npts, rptr+1, wsave); - rptr[0] = rptr[1]; - rptr[1] = 0.0; - rptr += rstep; - dptr += npts; - } - NPY_SIGINT_OFF; - Py_END_ALLOW_THREADS; - PyArray_Free(op2, (char *)wsave); - Py_DECREF(data); - return (PyObject *)ret; - -fail: - PyArray_Free(op2, (char *)wsave); - Py_XDECREF(data); - Py_XDECREF(ret); - return NULL; -} - -static const char fftpack_rfftb__doc__[] = ""; - -static PyObject * -fftpack_rfftb(PyObject *NPY_UNUSED(self), PyObject *args) -{ - PyObject *op1, *op2; - PyArrayObject *data, *ret; - PyArray_Descr *descr; - double *wsave, *dptr, *rptr; - npy_intp nsave; - int npts, nrepeats, i; - - if(!PyArg_ParseTuple(args, "OO:rfftb", &op1, &op2)) { - return NULL; - } - data = (PyArrayObject *)PyArray_ContiguousFromObject(op1, - NPY_CDOUBLE, 1, 0); - if (data == NULL) { - return NULL; - } - npts = PyArray_DIM(data, PyArray_NDIM(data) - 1); - ret = (PyArrayObject *)PyArray_Zeros(PyArray_NDIM(data), PyArray_DIMS(data), - PyArray_DescrFromType(NPY_DOUBLE), 0); - - descr = PyArray_DescrFromType(NPY_DOUBLE); - if (PyArray_AsCArray(&op2, (void *)&wsave, &nsave, 1, descr) == -1) { - goto fail; - } - if (data == NULL || ret == NULL) { - goto fail; - } - if (nsave != npts*2 + 15) { - PyErr_SetString(ErrorObject, "invalid work array for fft size"); - goto fail; - } - - nrepeats = PyArray_SIZE(ret)/npts; - rptr = (double *)PyArray_DATA(ret); - dptr = (double *)PyArray_DATA(data); - - Py_BEGIN_ALLOW_THREADS; - NPY_SIGINT_ON; - for (i = 0; i < nrepeats; i++) { - memcpy((char *)(rptr + 1), (dptr + 2), (npts - 1)*sizeof(double)); - rptr[0] = dptr[0]; - npy_rfftb(npts, rptr, wsave); - rptr += npts; - dptr += npts*2; - } - NPY_SIGINT_OFF; - Py_END_ALLOW_THREADS; - PyArray_Free(op2, (char *)wsave); - Py_DECREF(data); - return (PyObject *)ret; - -fail: - PyArray_Free(op2, (char *)wsave); - Py_XDECREF(data); - Py_XDECREF(ret); - return NULL; -} - -static const char fftpack_rffti__doc__[] = ""; - -static PyObject * -fftpack_rffti(PyObject *NPY_UNUSED(self), PyObject *args) -{ - PyArrayObject *op; - npy_intp dim; - long n; - - if (!PyArg_ParseTuple(args, "l:rffti", &n)) { - return NULL; - } - /*Magic size needed by npy_rffti*/ - dim = 2*n + 15; - /*Create a 1 dimensional array of dimensions of type double*/ - op = (PyArrayObject *)PyArray_SimpleNew(1, &dim, NPY_DOUBLE); - if (op == NULL) { - return NULL; - } - Py_BEGIN_ALLOW_THREADS; - NPY_SIGINT_ON; - npy_rffti(n, (double *)PyArray_DATA((PyArrayObject*)op)); - NPY_SIGINT_OFF; - Py_END_ALLOW_THREADS; - - return (PyObject *)op; -} - - -/* List of methods defined in the module */ - -static struct PyMethodDef fftpack_methods[] = { - {"cfftf", fftpack_cfftf, 1, fftpack_cfftf__doc__}, - {"cfftb", fftpack_cfftb, 1, fftpack_cfftb__doc__}, - {"cffti", fftpack_cffti, 1, fftpack_cffti__doc__}, - {"rfftf", fftpack_rfftf, 1, fftpack_rfftf__doc__}, - {"rfftb", fftpack_rfftb, 1, fftpack_rfftb__doc__}, - {"rffti", fftpack_rffti, 1, fftpack_rffti__doc__}, - {NULL, NULL, 0, NULL} /* sentinel */ -}; - -#if PY_MAJOR_VERSION >= 3 -static struct PyModuleDef moduledef = { - PyModuleDef_HEAD_INIT, - "fftpack_lite", - NULL, - -1, - fftpack_methods, - NULL, - NULL, - NULL, - NULL -}; -#endif - -/* Initialization function for the module */ -#if PY_MAJOR_VERSION >= 3 -#define RETVAL(x) x -PyMODINIT_FUNC PyInit_fftpack_lite(void) -#else -#define RETVAL(x) -PyMODINIT_FUNC -initfftpack_lite(void) -#endif -{ - PyObject *m,*d; -#if PY_MAJOR_VERSION >= 3 - m = PyModule_Create(&moduledef); -#else - static const char fftpack_module_documentation[] = ""; - - m = Py_InitModule4("fftpack_lite", fftpack_methods, - fftpack_module_documentation, - (PyObject*)NULL,PYTHON_API_VERSION); -#endif - if (m == NULL) { - return RETVAL(NULL); - } - - /* Import the array object */ - import_array(); - - /* Add some symbolic constants to the module */ - d = PyModule_GetDict(m); - ErrorObject = PyErr_NewException("fftpack.error", NULL, NULL); - PyDict_SetItemString(d, "error", ErrorObject); - - /* XXXX Add constants here */ - - return RETVAL(m); -} diff --git a/numpy/fft/helper.py b/numpy/fft/helper.py index 864768df5..a920a4ac0 100644 --- a/numpy/fft/helper.py +++ b/numpy/fft/helper.py @@ -4,11 +4,6 @@ Discrete Fourier Transforms - helper.py """ from __future__ import division, absolute_import, print_function -import collections -try: - import threading -except ImportError: - import dummy_threading as threading from numpy.compat import integer_types from numpy.core import integer, empty, arange, asarray, roll from numpy.core.overrides import array_function_dispatch, set_module @@ -52,7 +47,7 @@ def fftshift(x, axes=None): -------- >>> freqs = np.fft.fftfreq(10, 0.1) >>> freqs - array([ 0., 1., 2., 3., 4., -5., -4., -3., -2., -1.]) + array([ 0., 1., 2., ..., -3., -2., -1.]) >>> np.fft.fftshift(freqs) array([-5., -4., -3., -2., -1., 0., 1., 2., 3., 4.]) @@ -162,7 +157,7 @@ def fftfreq(n, d=1.0): >>> timestep = 0.1 >>> freq = np.fft.fftfreq(n, d=timestep) >>> freq - array([ 0. , 1.25, 2.5 , 3.75, -5. , -3.75, -2.5 , -1.25]) + array([ 0. , 1.25, 2.5 , ..., -3.75, -2.5 , -1.25]) """ if not isinstance(n, integer_types): @@ -215,7 +210,7 @@ def rfftfreq(n, d=1.0): >>> sample_rate = 100 >>> freq = np.fft.fftfreq(n, d=1./sample_rate) >>> freq - array([ 0., 10., 20., 30., 40., -50., -40., -30., -20., -10.]) + array([ 0., 10., 20., ..., -30., -20., -10.]) >>> freq = np.fft.rfftfreq(n, d=1./sample_rate) >>> freq array([ 0., 10., 20., 30., 40., 50.]) @@ -227,99 +222,3 @@ def rfftfreq(n, d=1.0): N = n//2 + 1 results = arange(0, N, dtype=int) return results * val - - -class _FFTCache(object): - """ - Cache for the FFT twiddle factors as an LRU (least recently used) cache. - - Parameters - ---------- - max_size_in_mb : int - Maximum memory usage of the cache before items are being evicted. - max_item_count : int - Maximum item count of the cache before items are being evicted. - - Notes - ----- - Items will be evicted if either limit has been reached upon getting and - setting. The maximum memory usages is not strictly the given - ``max_size_in_mb`` but rather - ``max(max_size_in_mb, 1.5 * size_of_largest_item)``. Thus the cache will - never be completely cleared - at least one item will remain and a single - large item can cause the cache to retain several smaller items even if the - given maximum cache size has been exceeded. - """ - def __init__(self, max_size_in_mb, max_item_count): - self._max_size_in_bytes = max_size_in_mb * 1024 ** 2 - self._max_item_count = max_item_count - self._dict = collections.OrderedDict() - self._lock = threading.Lock() - - def put_twiddle_factors(self, n, factors): - """ - Store twiddle factors for an FFT of length n in the cache. - - Putting multiple twiddle factors for a certain n will store it multiple - times. - - Parameters - ---------- - n : int - Data length for the FFT. - factors : ndarray - The actual twiddle values. - """ - with self._lock: - # Pop + later add to move it to the end for LRU behavior. - # Internally everything is stored in a dictionary whose values are - # lists. - try: - value = self._dict.pop(n) - except KeyError: - value = [] - value.append(factors) - self._dict[n] = value - self._prune_cache() - - def pop_twiddle_factors(self, n): - """ - Pop twiddle factors for an FFT of length n from the cache. - - Will return None if the requested twiddle factors are not available in - the cache. - - Parameters - ---------- - n : int - Data length for the FFT. - - Returns - ------- - out : ndarray or None - The retrieved twiddle factors if available, else None. - """ - with self._lock: - if n not in self._dict or not self._dict[n]: - return None - # Pop + later add to move it to the end for LRU behavior. - all_values = self._dict.pop(n) - value = all_values.pop() - # Only put pack if there are still some arrays left in the list. - if all_values: - self._dict[n] = all_values - return value - - def _prune_cache(self): - # Always keep at least one item. - while len(self._dict) > 1 and ( - len(self._dict) > self._max_item_count or self._check_size()): - self._dict.popitem(last=False) - - def _check_size(self): - item_sizes = [sum(_j.nbytes for _j in _i) - for _i in self._dict.values() if _i] - if not item_sizes: - return False - max_size = max(self._max_size_in_bytes, 1.5 * max(item_sizes)) - return sum(item_sizes) > max_size diff --git a/numpy/fft/pocketfft.c b/numpy/fft/pocketfft.c new file mode 100644 index 000000000..10a741b6f --- /dev/null +++ b/numpy/fft/pocketfft.c @@ -0,0 +1,2398 @@ +/* + * This file is part of pocketfft. + * Licensed under a 3-clause BSD style license - see LICENSE.md + */ + +/* + * Main implementation file. + * + * Copyright (C) 2004-2018 Max-Planck-Society + * \author Martin Reinecke + */ + +#include <math.h> +#include <string.h> +#include <stdlib.h> + +#include "npy_config.h" +#define restrict NPY_RESTRICT + +#define RALLOC(type,num) \ + ((type *)malloc((num)*sizeof(type))) +#define DEALLOC(ptr) \ + do { free(ptr); (ptr)=NULL; } while(0) + +#define SWAP(a,b,type) \ + do { type tmp_=(a); (a)=(b); (b)=tmp_; } while(0) + +#ifdef __GNUC__ +#define NOINLINE __attribute__((noinline)) +#define WARN_UNUSED_RESULT __attribute__ ((warn_unused_result)) +#else +#define NOINLINE +#define WARN_UNUSED_RESULT +#endif + +struct cfft_plan_i; +typedef struct cfft_plan_i * cfft_plan; +struct rfft_plan_i; +typedef struct rfft_plan_i * rfft_plan; + +// adapted from https://stackoverflow.com/questions/42792939/ +// CAUTION: this function only works for arguments in the range [-0.25; 0.25]! +static void my_sincosm1pi (double a, double *restrict res) + { + double s = a * a; + /* Approximate cos(pi*x)-1 for x in [-0.25,0.25] */ + double r = -1.0369917389758117e-4; + r = fma (r, s, 1.9294935641298806e-3); + r = fma (r, s, -2.5806887942825395e-2); + r = fma (r, s, 2.3533063028328211e-1); + r = fma (r, s, -1.3352627688538006e+0); + r = fma (r, s, 4.0587121264167623e+0); + r = fma (r, s, -4.9348022005446790e+0); + double c = r*s; + /* Approximate sin(pi*x) for x in [-0.25,0.25] */ + r = 4.6151442520157035e-4; + r = fma (r, s, -7.3700183130883555e-3); + r = fma (r, s, 8.2145868949323936e-2); + r = fma (r, s, -5.9926452893214921e-1); + r = fma (r, s, 2.5501640398732688e+0); + r = fma (r, s, -5.1677127800499516e+0); + s = s * a; + r = r * s; + s = fma (a, 3.1415926535897931e+0, r); + res[0] = c; + res[1] = s; + } + +NOINLINE static void calc_first_octant(size_t den, double * restrict res) + { + size_t n = (den+4)>>3; + if (n==0) return; + res[0]=1.; res[1]=0.; + if (n==1) return; + size_t l1=(size_t)sqrt(n); + for (size_t i=1; i<l1; ++i) + my_sincosm1pi((2.*i)/den,&res[2*i]); + size_t start=l1; + while(start<n) + { + double cs[2]; + my_sincosm1pi((2.*start)/den,cs); + res[2*start] = cs[0]+1.; + res[2*start+1] = cs[1]; + size_t end = l1; + if (start+end>n) end = n-start; + for (size_t i=1; i<end; ++i) + { + double csx[2]={res[2*i], res[2*i+1]}; + res[2*(start+i)] = ((cs[0]*csx[0] - cs[1]*csx[1] + cs[0]) + csx[0]) + 1.; + res[2*(start+i)+1] = (cs[0]*csx[1] + cs[1]*csx[0]) + cs[1] + csx[1]; + } + start += l1; + } + for (size_t i=1; i<l1; ++i) + res[2*i] += 1.; + } + +NOINLINE static void calc_first_quadrant(size_t n, double * restrict res) + { + double * restrict p = res+n; + calc_first_octant(n<<1, p); + size_t ndone=(n+2)>>2; + size_t i=0, idx1=0, idx2=2*ndone-2; + for (; i+1<ndone; i+=2, idx1+=2, idx2-=2) + { + res[idx1] = p[2*i]; + res[idx1+1] = p[2*i+1]; + res[idx2] = p[2*i+3]; + res[idx2+1] = p[2*i+2]; + } + if (i!=ndone) + { + res[idx1 ] = p[2*i]; + res[idx1+1] = p[2*i+1]; + } + } + +NOINLINE static void calc_first_half(size_t n, double * restrict res) + { + int ndone=(n+1)>>1; + double * p = res+n-1; + calc_first_octant(n<<2, p); + int i4=0, in=n, i=0; + for (; i4<=in-i4; ++i, i4+=4) // octant 0 + { + res[2*i] = p[2*i4]; res[2*i+1] = p[2*i4+1]; + } + for (; i4-in <= 0; ++i, i4+=4) // octant 1 + { + int xm = in-i4; + res[2*i] = p[2*xm+1]; res[2*i+1] = p[2*xm]; + } + for (; i4<=3*in-i4; ++i, i4+=4) // octant 2 + { + int xm = i4-in; + res[2*i] = -p[2*xm+1]; res[2*i+1] = p[2*xm]; + } + for (; i<ndone; ++i, i4+=4) // octant 3 + { + int xm = 2*in-i4; + res[2*i] = -p[2*xm]; res[2*i+1] = p[2*xm+1]; + } + } + +NOINLINE static void fill_first_quadrant(size_t n, double * restrict res) + { + const double hsqt2 = 0.707106781186547524400844362104849; + size_t quart = n>>2; + if ((n&7)==0) + res[quart] = res[quart+1] = hsqt2; + for (size_t i=2, j=2*quart-2; i<quart; i+=2, j-=2) + { + res[j ] = res[i+1]; + res[j+1] = res[i ]; + } + } + +NOINLINE static void fill_first_half(size_t n, double * restrict res) + { + size_t half = n>>1; + if ((n&3)==0) + for (size_t i=0; i<half; i+=2) + { + res[i+half] = -res[i+1]; + res[i+half+1] = res[i ]; + } + else + for (size_t i=2, j=2*half-2; i<half; i+=2, j-=2) + { + res[j ] = -res[i ]; + res[j+1] = res[i+1]; + } + } + +NOINLINE static void fill_second_half(size_t n, double * restrict res) + { + if ((n&1)==0) + for (size_t i=0; i<n; ++i) + res[i+n] = -res[i]; + else + for (size_t i=2, j=2*n-2; i<n; i+=2, j-=2) + { + res[j ] = res[i ]; + res[j+1] = -res[i+1]; + } + } + +NOINLINE static void sincos_2pibyn_half(size_t n, double * restrict res) + { + if ((n&3)==0) + { + calc_first_octant(n, res); + fill_first_quadrant(n, res); + fill_first_half(n, res); + } + else if ((n&1)==0) + { + calc_first_quadrant(n, res); + fill_first_half(n, res); + } + else + calc_first_half(n, res); + } + +NOINLINE static void sincos_2pibyn(size_t n, double * restrict res) + { + sincos_2pibyn_half(n, res); + fill_second_half(n, res); + } + +NOINLINE static size_t largest_prime_factor (size_t n) + { + size_t res=1; + size_t tmp; + while (((tmp=(n>>1))<<1)==n) + { res=2; n=tmp; } + + size_t limit=(size_t)sqrt(n+0.01); + for (size_t x=3; x<=limit; x+=2) + while (((tmp=(n/x))*x)==n) + { + res=x; + n=tmp; + limit=(size_t)sqrt(n+0.01); + } + if (n>1) res=n; + + return res; + } + +NOINLINE static double cost_guess (size_t n) + { + const double lfp=1.1; // penalty for non-hardcoded larger factors + size_t ni=n; + double result=0.; + size_t tmp; + while (((tmp=(n>>1))<<1)==n) + { result+=2; n=tmp; } + + size_t limit=(size_t)sqrt(n+0.01); + for (size_t x=3; x<=limit; x+=2) + while ((tmp=(n/x))*x==n) + { + result+= (x<=5) ? x : lfp*x; // penalize larger prime factors + n=tmp; + limit=(size_t)sqrt(n+0.01); + } + if (n>1) result+=(n<=5) ? n : lfp*n; + + return result*ni; + } + +/* returns the smallest composite of 2, 3, 5, 7 and 11 which is >= n */ +NOINLINE static size_t good_size(size_t n) + { + if (n<=6) return n; + + size_t bestfac=2*n; + for (size_t f2=1; f2<bestfac; f2*=2) + for (size_t f23=f2; f23<bestfac; f23*=3) + for (size_t f235=f23; f235<bestfac; f235*=5) + for (size_t f2357=f235; f2357<bestfac; f2357*=7) + for (size_t f235711=f2357; f235711<bestfac; f235711*=11) + if (f235711>=n) bestfac=f235711; + return bestfac; + } + +typedef struct cmplx { + double r,i; +} cmplx; + +#define NFCT 25 +typedef struct cfftp_fctdata + { + size_t fct; + cmplx *tw, *tws; + } cfftp_fctdata; + +typedef struct cfftp_plan_i + { + size_t length, nfct; + cmplx *mem; + cfftp_fctdata fct[NFCT]; + } cfftp_plan_i; +typedef struct cfftp_plan_i * cfftp_plan; + +#define PMC(a,b,c,d) { a.r=c.r+d.r; a.i=c.i+d.i; b.r=c.r-d.r; b.i=c.i-d.i; } +#define ADDC(a,b,c) { a.r=b.r+c.r; a.i=b.i+c.i; } +#define SCALEC(a,b) { a.r*=b; a.i*=b; } +#define ROT90(a) { double tmp_=a.r; a.r=-a.i; a.i=tmp_; } +#define ROTM90(a) { double tmp_=-a.r; a.r=a.i; a.i=tmp_; } +#define CH(a,b,c) ch[(a)+ido*((b)+l1*(c))] +#define CC(a,b,c) cc[(a)+ido*((b)+cdim*(c))] +#define WA(x,i) wa[(i)-1+(x)*(ido-1)] +/* a = b*c */ +#define A_EQ_B_MUL_C(a,b,c) { a.r=b.r*c.r-b.i*c.i; a.i=b.r*c.i+b.i*c.r; } +/* a = conj(b)*c*/ +#define A_EQ_CB_MUL_C(a,b,c) { a.r=b.r*c.r+b.i*c.i; a.i=b.r*c.i-b.i*c.r; } + +#define PMSIGNC(a,b,c,d) { a.r=c.r+sign*d.r; a.i=c.i+sign*d.i; b.r=c.r-sign*d.r; b.i=c.i-sign*d.i; } +/* a = b*c */ +#define MULPMSIGNC(a,b,c) { a.r=b.r*c.r-sign*b.i*c.i; a.i=b.r*c.i+sign*b.i*c.r; } +/* a *= b */ +#define MULPMSIGNCEQ(a,b) { double xtmp=a.r; a.r=b.r*a.r-sign*b.i*a.i; a.i=b.r*a.i+sign*b.i*xtmp; } + +NOINLINE static void pass2b (size_t ido, size_t l1, const cmplx * restrict cc, + cmplx * restrict ch, const cmplx * restrict wa) + { + const size_t cdim=2; + + if (ido==1) + for (size_t k=0; k<l1; ++k) + PMC (CH(0,k,0),CH(0,k,1),CC(0,0,k),CC(0,1,k)) + else + for (size_t k=0; k<l1; ++k) + { + PMC (CH(0,k,0),CH(0,k,1),CC(0,0,k),CC(0,1,k)) + for (size_t i=1; i<ido; ++i) + { + cmplx t; + PMC (CH(i,k,0),t,CC(i,0,k),CC(i,1,k)) + A_EQ_B_MUL_C (CH(i,k,1),WA(0,i),t) + } + } + } + +NOINLINE static void pass2f (size_t ido, size_t l1, const cmplx * restrict cc, + cmplx * restrict ch, const cmplx * restrict wa) + { + const size_t cdim=2; + + if (ido==1) + for (size_t k=0; k<l1; ++k) + PMC (CH(0,k,0),CH(0,k,1),CC(0,0,k),CC(0,1,k)) + else + for (size_t k=0; k<l1; ++k) + { + PMC (CH(0,k,0),CH(0,k,1),CC(0,0,k),CC(0,1,k)) + for (size_t i=1; i<ido; ++i) + { + cmplx t; + PMC (CH(i,k,0),t,CC(i,0,k),CC(i,1,k)) + A_EQ_CB_MUL_C (CH(i,k,1),WA(0,i),t) + } + } + } + +#define PREP3(idx) \ + cmplx t0 = CC(idx,0,k), t1, t2; \ + PMC (t1,t2,CC(idx,1,k),CC(idx,2,k)) \ + CH(idx,k,0).r=t0.r+t1.r; \ + CH(idx,k,0).i=t0.i+t1.i; +#define PARTSTEP3a(u1,u2,twr,twi) \ + { \ + cmplx ca,cb; \ + ca.r=t0.r+twr*t1.r; \ + ca.i=t0.i+twr*t1.i; \ + cb.i=twi*t2.r; \ + cb.r=-(twi*t2.i); \ + PMC(CH(0,k,u1),CH(0,k,u2),ca,cb) \ + } + +#define PARTSTEP3b(u1,u2,twr,twi) \ + { \ + cmplx ca,cb,da,db; \ + ca.r=t0.r+twr*t1.r; \ + ca.i=t0.i+twr*t1.i; \ + cb.i=twi*t2.r; \ + cb.r=-(twi*t2.i); \ + PMC(da,db,ca,cb) \ + A_EQ_B_MUL_C (CH(i,k,u1),WA(u1-1,i),da) \ + A_EQ_B_MUL_C (CH(i,k,u2),WA(u2-1,i),db) \ + } +NOINLINE static void pass3b (size_t ido, size_t l1, const cmplx * restrict cc, + cmplx * restrict ch, const cmplx * restrict wa) + { + const size_t cdim=3; + const double tw1r=-0.5, tw1i= 0.86602540378443864676; + + if (ido==1) + for (size_t k=0; k<l1; ++k) + { + PREP3(0) + PARTSTEP3a(1,2,tw1r,tw1i) + } + else + for (size_t k=0; k<l1; ++k) + { + { + PREP3(0) + PARTSTEP3a(1,2,tw1r,tw1i) + } + for (size_t i=1; i<ido; ++i) + { + PREP3(i) + PARTSTEP3b(1,2,tw1r,tw1i) + } + } + } +#define PARTSTEP3f(u1,u2,twr,twi) \ + { \ + cmplx ca,cb,da,db; \ + ca.r=t0.r+twr*t1.r; \ + ca.i=t0.i+twr*t1.i; \ + cb.i=twi*t2.r; \ + cb.r=-(twi*t2.i); \ + PMC(da,db,ca,cb) \ + A_EQ_CB_MUL_C (CH(i,k,u1),WA(u1-1,i),da) \ + A_EQ_CB_MUL_C (CH(i,k,u2),WA(u2-1,i),db) \ + } +NOINLINE static void pass3f (size_t ido, size_t l1, const cmplx * restrict cc, + cmplx * restrict ch, const cmplx * restrict wa) + { + const size_t cdim=3; + const double tw1r=-0.5, tw1i= -0.86602540378443864676; + + if (ido==1) + for (size_t k=0; k<l1; ++k) + { + PREP3(0) + PARTSTEP3a(1,2,tw1r,tw1i) + } + else + for (size_t k=0; k<l1; ++k) + { + { + PREP3(0) + PARTSTEP3a(1,2,tw1r,tw1i) + } + for (size_t i=1; i<ido; ++i) + { + PREP3(i) + PARTSTEP3f(1,2,tw1r,tw1i) + } + } + } + +NOINLINE static void pass4b (size_t ido, size_t l1, const cmplx * restrict cc, + cmplx * restrict ch, const cmplx * restrict wa) + { + const size_t cdim=4; + + if (ido==1) + for (size_t k=0; k<l1; ++k) + { + cmplx t1, t2, t3, t4; + PMC(t2,t1,CC(0,0,k),CC(0,2,k)) + PMC(t3,t4,CC(0,1,k),CC(0,3,k)) + ROT90(t4) + PMC(CH(0,k,0),CH(0,k,2),t2,t3) + PMC(CH(0,k,1),CH(0,k,3),t1,t4) + } + else + for (size_t k=0; k<l1; ++k) + { + { + cmplx t1, t2, t3, t4; + PMC(t2,t1,CC(0,0,k),CC(0,2,k)) + PMC(t3,t4,CC(0,1,k),CC(0,3,k)) + ROT90(t4) + PMC(CH(0,k,0),CH(0,k,2),t2,t3) + PMC(CH(0,k,1),CH(0,k,3),t1,t4) + } + for (size_t i=1; i<ido; ++i) + { + cmplx c2, c3, c4, t1, t2, t3, t4; + cmplx cc0=CC(i,0,k), cc1=CC(i,1,k),cc2=CC(i,2,k),cc3=CC(i,3,k); + PMC(t2,t1,cc0,cc2) + PMC(t3,t4,cc1,cc3) + ROT90(t4) + cmplx wa0=WA(0,i), wa1=WA(1,i),wa2=WA(2,i); + PMC(CH(i,k,0),c3,t2,t3) + PMC(c2,c4,t1,t4) + A_EQ_B_MUL_C (CH(i,k,1),wa0,c2) + A_EQ_B_MUL_C (CH(i,k,2),wa1,c3) + A_EQ_B_MUL_C (CH(i,k,3),wa2,c4) + } + } + } +NOINLINE static void pass4f (size_t ido, size_t l1, const cmplx * restrict cc, + cmplx * restrict ch, const cmplx * restrict wa) + { + const size_t cdim=4; + + if (ido==1) + for (size_t k=0; k<l1; ++k) + { + cmplx t1, t2, t3, t4; + PMC(t2,t1,CC(0,0,k),CC(0,2,k)) + PMC(t3,t4,CC(0,1,k),CC(0,3,k)) + ROTM90(t4) + PMC(CH(0,k,0),CH(0,k,2),t2,t3) + PMC(CH(0,k,1),CH(0,k,3),t1,t4) + } + else + for (size_t k=0; k<l1; ++k) + { + { + cmplx t1, t2, t3, t4; + PMC(t2,t1,CC(0,0,k),CC(0,2,k)) + PMC(t3,t4,CC(0,1,k),CC(0,3,k)) + ROTM90(t4) + PMC(CH(0,k,0),CH(0,k,2),t2,t3) + PMC (CH(0,k,1),CH(0,k,3),t1,t4) + } + for (size_t i=1; i<ido; ++i) + { + cmplx c2, c3, c4, t1, t2, t3, t4; + cmplx cc0=CC(i,0,k), cc1=CC(i,1,k),cc2=CC(i,2,k),cc3=CC(i,3,k); + PMC(t2,t1,cc0,cc2) + PMC(t3,t4,cc1,cc3) + ROTM90(t4) + cmplx wa0=WA(0,i), wa1=WA(1,i),wa2=WA(2,i); + PMC(CH(i,k,0),c3,t2,t3) + PMC(c2,c4,t1,t4) + A_EQ_CB_MUL_C (CH(i,k,1),wa0,c2) + A_EQ_CB_MUL_C (CH(i,k,2),wa1,c3) + A_EQ_CB_MUL_C (CH(i,k,3),wa2,c4) + } + } + } + +#define PREP5(idx) \ + cmplx t0 = CC(idx,0,k), t1, t2, t3, t4; \ + PMC (t1,t4,CC(idx,1,k),CC(idx,4,k)) \ + PMC (t2,t3,CC(idx,2,k),CC(idx,3,k)) \ + CH(idx,k,0).r=t0.r+t1.r+t2.r; \ + CH(idx,k,0).i=t0.i+t1.i+t2.i; + +#define PARTSTEP5a(u1,u2,twar,twbr,twai,twbi) \ + { \ + cmplx ca,cb; \ + ca.r=t0.r+twar*t1.r+twbr*t2.r; \ + ca.i=t0.i+twar*t1.i+twbr*t2.i; \ + cb.i=twai*t4.r twbi*t3.r; \ + cb.r=-(twai*t4.i twbi*t3.i); \ + PMC(CH(0,k,u1),CH(0,k,u2),ca,cb) \ + } + +#define PARTSTEP5b(u1,u2,twar,twbr,twai,twbi) \ + { \ + cmplx ca,cb,da,db; \ + ca.r=t0.r+twar*t1.r+twbr*t2.r; \ + ca.i=t0.i+twar*t1.i+twbr*t2.i; \ + cb.i=twai*t4.r twbi*t3.r; \ + cb.r=-(twai*t4.i twbi*t3.i); \ + PMC(da,db,ca,cb) \ + A_EQ_B_MUL_C (CH(i,k,u1),WA(u1-1,i),da) \ + A_EQ_B_MUL_C (CH(i,k,u2),WA(u2-1,i),db) \ + } +NOINLINE static void pass5b (size_t ido, size_t l1, const cmplx * restrict cc, + cmplx * restrict ch, const cmplx * restrict wa) + { + const size_t cdim=5; + const double tw1r= 0.3090169943749474241, + tw1i= 0.95105651629515357212, + tw2r= -0.8090169943749474241, + tw2i= 0.58778525229247312917; + + if (ido==1) + for (size_t k=0; k<l1; ++k) + { + PREP5(0) + PARTSTEP5a(1,4,tw1r,tw2r,+tw1i,+tw2i) + PARTSTEP5a(2,3,tw2r,tw1r,+tw2i,-tw1i) + } + else + for (size_t k=0; k<l1; ++k) + { + { + PREP5(0) + PARTSTEP5a(1,4,tw1r,tw2r,+tw1i,+tw2i) + PARTSTEP5a(2,3,tw2r,tw1r,+tw2i,-tw1i) + } + for (size_t i=1; i<ido; ++i) + { + PREP5(i) + PARTSTEP5b(1,4,tw1r,tw2r,+tw1i,+tw2i) + PARTSTEP5b(2,3,tw2r,tw1r,+tw2i,-tw1i) + } + } + } +#define PARTSTEP5f(u1,u2,twar,twbr,twai,twbi) \ + { \ + cmplx ca,cb,da,db; \ + ca.r=t0.r+twar*t1.r+twbr*t2.r; \ + ca.i=t0.i+twar*t1.i+twbr*t2.i; \ + cb.i=twai*t4.r twbi*t3.r; \ + cb.r=-(twai*t4.i twbi*t3.i); \ + PMC(da,db,ca,cb) \ + A_EQ_CB_MUL_C (CH(i,k,u1),WA(u1-1,i),da) \ + A_EQ_CB_MUL_C (CH(i,k,u2),WA(u2-1,i),db) \ + } +NOINLINE static void pass5f (size_t ido, size_t l1, const cmplx * restrict cc, + cmplx * restrict ch, const cmplx * restrict wa) + { + const size_t cdim=5; + const double tw1r= 0.3090169943749474241, + tw1i= -0.95105651629515357212, + tw2r= -0.8090169943749474241, + tw2i= -0.58778525229247312917; + + if (ido==1) + for (size_t k=0; k<l1; ++k) + { + PREP5(0) + PARTSTEP5a(1,4,tw1r,tw2r,+tw1i,+tw2i) + PARTSTEP5a(2,3,tw2r,tw1r,+tw2i,-tw1i) + } + else + for (size_t k=0; k<l1; ++k) + { + { + PREP5(0) + PARTSTEP5a(1,4,tw1r,tw2r,+tw1i,+tw2i) + PARTSTEP5a(2,3,tw2r,tw1r,+tw2i,-tw1i) + } + for (size_t i=1; i<ido; ++i) + { + PREP5(i) + PARTSTEP5f(1,4,tw1r,tw2r,+tw1i,+tw2i) + PARTSTEP5f(2,3,tw2r,tw1r,+tw2i,-tw1i) + } + } + } + +#define PREP7(idx) \ + cmplx t1 = CC(idx,0,k), t2, t3, t4, t5, t6, t7; \ + PMC (t2,t7,CC(idx,1,k),CC(idx,6,k)) \ + PMC (t3,t6,CC(idx,2,k),CC(idx,5,k)) \ + PMC (t4,t5,CC(idx,3,k),CC(idx,4,k)) \ + CH(idx,k,0).r=t1.r+t2.r+t3.r+t4.r; \ + CH(idx,k,0).i=t1.i+t2.i+t3.i+t4.i; + +#define PARTSTEP7a0(u1,u2,x1,x2,x3,y1,y2,y3,out1,out2) \ + { \ + cmplx ca,cb; \ + ca.r=t1.r+x1*t2.r+x2*t3.r+x3*t4.r; \ + ca.i=t1.i+x1*t2.i+x2*t3.i+x3*t4.i; \ + cb.i=y1*t7.r y2*t6.r y3*t5.r; \ + cb.r=-(y1*t7.i y2*t6.i y3*t5.i); \ + PMC(out1,out2,ca,cb) \ + } +#define PARTSTEP7a(u1,u2,x1,x2,x3,y1,y2,y3) \ + PARTSTEP7a0(u1,u2,x1,x2,x3,y1,y2,y3,CH(0,k,u1),CH(0,k,u2)) +#define PARTSTEP7(u1,u2,x1,x2,x3,y1,y2,y3) \ + { \ + cmplx da,db; \ + PARTSTEP7a0(u1,u2,x1,x2,x3,y1,y2,y3,da,db) \ + MULPMSIGNC (CH(i,k,u1),WA(u1-1,i),da) \ + MULPMSIGNC (CH(i,k,u2),WA(u2-1,i),db) \ + } + +NOINLINE static void pass7(size_t ido, size_t l1, const cmplx * restrict cc, + cmplx * restrict ch, const cmplx * restrict wa, const int sign) + { + const size_t cdim=7; + const double tw1r= 0.623489801858733530525, + tw1i= sign * 0.7818314824680298087084, + tw2r= -0.222520933956314404289, + tw2i= sign * 0.9749279121818236070181, + tw3r= -0.9009688679024191262361, + tw3i= sign * 0.4338837391175581204758; + + if (ido==1) + for (size_t k=0; k<l1; ++k) + { + PREP7(0) + PARTSTEP7a(1,6,tw1r,tw2r,tw3r,+tw1i,+tw2i,+tw3i) + PARTSTEP7a(2,5,tw2r,tw3r,tw1r,+tw2i,-tw3i,-tw1i) + PARTSTEP7a(3,4,tw3r,tw1r,tw2r,+tw3i,-tw1i,+tw2i) + } + else + for (size_t k=0; k<l1; ++k) + { + { + PREP7(0) + PARTSTEP7a(1,6,tw1r,tw2r,tw3r,+tw1i,+tw2i,+tw3i) + PARTSTEP7a(2,5,tw2r,tw3r,tw1r,+tw2i,-tw3i,-tw1i) + PARTSTEP7a(3,4,tw3r,tw1r,tw2r,+tw3i,-tw1i,+tw2i) + } + for (size_t i=1; i<ido; ++i) + { + PREP7(i) + PARTSTEP7(1,6,tw1r,tw2r,tw3r,+tw1i,+tw2i,+tw3i) + PARTSTEP7(2,5,tw2r,tw3r,tw1r,+tw2i,-tw3i,-tw1i) + PARTSTEP7(3,4,tw3r,tw1r,tw2r,+tw3i,-tw1i,+tw2i) + } + } + } + +#define PREP11(idx) \ + cmplx t1 = CC(idx,0,k), t2, t3, t4, t5, t6, t7, t8, t9, t10, t11; \ + PMC (t2,t11,CC(idx,1,k),CC(idx,10,k)) \ + PMC (t3,t10,CC(idx,2,k),CC(idx, 9,k)) \ + PMC (t4,t9 ,CC(idx,3,k),CC(idx, 8,k)) \ + PMC (t5,t8 ,CC(idx,4,k),CC(idx, 7,k)) \ + PMC (t6,t7 ,CC(idx,5,k),CC(idx, 6,k)) \ + CH(idx,k,0).r=t1.r+t2.r+t3.r+t4.r+t5.r+t6.r; \ + CH(idx,k,0).i=t1.i+t2.i+t3.i+t4.i+t5.i+t6.i; + +#define PARTSTEP11a0(u1,u2,x1,x2,x3,x4,x5,y1,y2,y3,y4,y5,out1,out2) \ + { \ + cmplx ca,cb; \ + ca.r=t1.r+x1*t2.r+x2*t3.r+x3*t4.r+x4*t5.r+x5*t6.r; \ + ca.i=t1.i+x1*t2.i+x2*t3.i+x3*t4.i+x4*t5.i+x5*t6.i; \ + cb.i=y1*t11.r y2*t10.r y3*t9.r y4*t8.r y5*t7.r; \ + cb.r=-(y1*t11.i y2*t10.i y3*t9.i y4*t8.i y5*t7.i ); \ + PMC(out1,out2,ca,cb) \ + } +#define PARTSTEP11a(u1,u2,x1,x2,x3,x4,x5,y1,y2,y3,y4,y5) \ + PARTSTEP11a0(u1,u2,x1,x2,x3,x4,x5,y1,y2,y3,y4,y5,CH(0,k,u1),CH(0,k,u2)) +#define PARTSTEP11(u1,u2,x1,x2,x3,x4,x5,y1,y2,y3,y4,y5) \ + { \ + cmplx da,db; \ + PARTSTEP11a0(u1,u2,x1,x2,x3,x4,x5,y1,y2,y3,y4,y5,da,db) \ + MULPMSIGNC (CH(i,k,u1),WA(u1-1,i),da) \ + MULPMSIGNC (CH(i,k,u2),WA(u2-1,i),db) \ + } + +NOINLINE static void pass11 (size_t ido, size_t l1, const cmplx * restrict cc, + cmplx * restrict ch, const cmplx * restrict wa, const int sign) + { + const size_t cdim=11; + const double tw1r = 0.8412535328311811688618, + tw1i = sign * 0.5406408174555975821076, + tw2r = 0.4154150130018864255293, + tw2i = sign * 0.9096319953545183714117, + tw3r = -0.1423148382732851404438, + tw3i = sign * 0.9898214418809327323761, + tw4r = -0.6548607339452850640569, + tw4i = sign * 0.755749574354258283774, + tw5r = -0.9594929736144973898904, + tw5i = sign * 0.2817325568414296977114; + + if (ido==1) + for (size_t k=0; k<l1; ++k) + { + PREP11(0) + PARTSTEP11a(1,10,tw1r,tw2r,tw3r,tw4r,tw5r,+tw1i,+tw2i,+tw3i,+tw4i,+tw5i) + PARTSTEP11a(2, 9,tw2r,tw4r,tw5r,tw3r,tw1r,+tw2i,+tw4i,-tw5i,-tw3i,-tw1i) + PARTSTEP11a(3, 8,tw3r,tw5r,tw2r,tw1r,tw4r,+tw3i,-tw5i,-tw2i,+tw1i,+tw4i) + PARTSTEP11a(4, 7,tw4r,tw3r,tw1r,tw5r,tw2r,+tw4i,-tw3i,+tw1i,+tw5i,-tw2i) + PARTSTEP11a(5, 6,tw5r,tw1r,tw4r,tw2r,tw3r,+tw5i,-tw1i,+tw4i,-tw2i,+tw3i) + } + else + for (size_t k=0; k<l1; ++k) + { + { + PREP11(0) + PARTSTEP11a(1,10,tw1r,tw2r,tw3r,tw4r,tw5r,+tw1i,+tw2i,+tw3i,+tw4i,+tw5i) + PARTSTEP11a(2, 9,tw2r,tw4r,tw5r,tw3r,tw1r,+tw2i,+tw4i,-tw5i,-tw3i,-tw1i) + PARTSTEP11a(3, 8,tw3r,tw5r,tw2r,tw1r,tw4r,+tw3i,-tw5i,-tw2i,+tw1i,+tw4i) + PARTSTEP11a(4, 7,tw4r,tw3r,tw1r,tw5r,tw2r,+tw4i,-tw3i,+tw1i,+tw5i,-tw2i) + PARTSTEP11a(5, 6,tw5r,tw1r,tw4r,tw2r,tw3r,+tw5i,-tw1i,+tw4i,-tw2i,+tw3i) + } + for (size_t i=1; i<ido; ++i) + { + PREP11(i) + PARTSTEP11(1,10,tw1r,tw2r,tw3r,tw4r,tw5r,+tw1i,+tw2i,+tw3i,+tw4i,+tw5i) + PARTSTEP11(2, 9,tw2r,tw4r,tw5r,tw3r,tw1r,+tw2i,+tw4i,-tw5i,-tw3i,-tw1i) + PARTSTEP11(3, 8,tw3r,tw5r,tw2r,tw1r,tw4r,+tw3i,-tw5i,-tw2i,+tw1i,+tw4i) + PARTSTEP11(4, 7,tw4r,tw3r,tw1r,tw5r,tw2r,+tw4i,-tw3i,+tw1i,+tw5i,-tw2i) + PARTSTEP11(5, 6,tw5r,tw1r,tw4r,tw2r,tw3r,+tw5i,-tw1i,+tw4i,-tw2i,+tw3i) + } + } + } + +#define CX(a,b,c) cc[(a)+ido*((b)+l1*(c))] +#define CX2(a,b) cc[(a)+idl1*(b)] +#define CH2(a,b) ch[(a)+idl1*(b)] + +NOINLINE static int passg (size_t ido, size_t ip, size_t l1, + cmplx * restrict cc, cmplx * restrict ch, const cmplx * restrict wa, + const cmplx * restrict csarr, const int sign) + { + const size_t cdim=ip; + size_t ipph = (ip+1)/2; + size_t idl1 = ido*l1; + + cmplx * restrict wal=RALLOC(cmplx,ip); + if (!wal) return -1; + wal[0]=(cmplx){1.,0.}; + for (size_t i=1; i<ip; ++i) + wal[i]=(cmplx){csarr[i].r,sign*csarr[i].i}; + + for (size_t k=0; k<l1; ++k) + for (size_t i=0; i<ido; ++i) + CH(i,k,0) = CC(i,0,k); + for (size_t j=1, jc=ip-1; j<ipph; ++j, --jc) + for (size_t k=0; k<l1; ++k) + for (size_t i=0; i<ido; ++i) + PMC(CH(i,k,j),CH(i,k,jc),CC(i,j,k),CC(i,jc,k)) + for (size_t k=0; k<l1; ++k) + for (size_t i=0; i<ido; ++i) + { + cmplx tmp = CH(i,k,0); + for (size_t j=1; j<ipph; ++j) + ADDC(tmp,tmp,CH(i,k,j)) + CX(i,k,0) = tmp; + } + for (size_t l=1, lc=ip-1; l<ipph; ++l, --lc) + { + // j=0 + for (size_t ik=0; ik<idl1; ++ik) + { + CX2(ik,l).r = CH2(ik,0).r+wal[l].r*CH2(ik,1).r+wal[2*l].r*CH2(ik,2).r; + CX2(ik,l).i = CH2(ik,0).i+wal[l].r*CH2(ik,1).i+wal[2*l].r*CH2(ik,2).i; + CX2(ik,lc).r=-wal[l].i*CH2(ik,ip-1).i-wal[2*l].i*CH2(ik,ip-2).i; + CX2(ik,lc).i=wal[l].i*CH2(ik,ip-1).r+wal[2*l].i*CH2(ik,ip-2).r; + } + + size_t iwal=2*l; + size_t j=3, jc=ip-3; + for (; j<ipph-1; j+=2, jc-=2) + { + iwal+=l; if (iwal>ip) iwal-=ip; + cmplx xwal=wal[iwal]; + iwal+=l; if (iwal>ip) iwal-=ip; + cmplx xwal2=wal[iwal]; + for (size_t ik=0; ik<idl1; ++ik) + { + CX2(ik,l).r += CH2(ik,j).r*xwal.r+CH2(ik,j+1).r*xwal2.r; + CX2(ik,l).i += CH2(ik,j).i*xwal.r+CH2(ik,j+1).i*xwal2.r; + CX2(ik,lc).r -= CH2(ik,jc).i*xwal.i+CH2(ik,jc-1).i*xwal2.i; + CX2(ik,lc).i += CH2(ik,jc).r*xwal.i+CH2(ik,jc-1).r*xwal2.i; + } + } + for (; j<ipph; ++j, --jc) + { + iwal+=l; if (iwal>ip) iwal-=ip; + cmplx xwal=wal[iwal]; + for (size_t ik=0; ik<idl1; ++ik) + { + CX2(ik,l).r += CH2(ik,j).r*xwal.r; + CX2(ik,l).i += CH2(ik,j).i*xwal.r; + CX2(ik,lc).r -= CH2(ik,jc).i*xwal.i; + CX2(ik,lc).i += CH2(ik,jc).r*xwal.i; + } + } + } + DEALLOC(wal); + + // shuffling and twiddling + if (ido==1) + for (size_t j=1, jc=ip-1; j<ipph; ++j, --jc) + for (size_t ik=0; ik<idl1; ++ik) + { + cmplx t1=CX2(ik,j), t2=CX2(ik,jc); + PMC(CX2(ik,j),CX2(ik,jc),t1,t2) + } + else + { + for (size_t j=1, jc=ip-1; j<ipph; ++j,--jc) + for (size_t k=0; k<l1; ++k) + { + cmplx t1=CX(0,k,j), t2=CX(0,k,jc); + PMC(CX(0,k,j),CX(0,k,jc),t1,t2) + for (size_t i=1; i<ido; ++i) + { + cmplx x1, x2; + PMC(x1,x2,CX(i,k,j),CX(i,k,jc)) + size_t idij=(j-1)*(ido-1)+i-1; + MULPMSIGNC (CX(i,k,j),wa[idij],x1) + idij=(jc-1)*(ido-1)+i-1; + MULPMSIGNC (CX(i,k,jc),wa[idij],x2) + } + } + } + return 0; + } + +#undef CH2 +#undef CX2 +#undef CX + +NOINLINE WARN_UNUSED_RESULT static int pass_all(cfftp_plan plan, cmplx c[], double fct, + const int sign) + { + if (plan->length==1) return 0; + size_t len=plan->length; + size_t l1=1, nf=plan->nfct; + cmplx *ch = RALLOC(cmplx, len); + if (!ch) return -1; + cmplx *p1=c, *p2=ch; + + for(size_t k1=0; k1<nf; k1++) + { + size_t ip=plan->fct[k1].fct; + size_t l2=ip*l1; + size_t ido = len/l2; + if (ip==4) + sign>0 ? pass4b (ido, l1, p1, p2, plan->fct[k1].tw) + : pass4f (ido, l1, p1, p2, plan->fct[k1].tw); + else if(ip==2) + sign>0 ? pass2b (ido, l1, p1, p2, plan->fct[k1].tw) + : pass2f (ido, l1, p1, p2, plan->fct[k1].tw); + else if(ip==3) + sign>0 ? pass3b (ido, l1, p1, p2, plan->fct[k1].tw) + : pass3f (ido, l1, p1, p2, plan->fct[k1].tw); + else if(ip==5) + sign>0 ? pass5b (ido, l1, p1, p2, plan->fct[k1].tw) + : pass5f (ido, l1, p1, p2, plan->fct[k1].tw); + else if(ip==7) pass7 (ido, l1, p1, p2, plan->fct[k1].tw, sign); + else if(ip==11) pass11(ido, l1, p1, p2, plan->fct[k1].tw, sign); + else + { + if (passg(ido, ip, l1, p1, p2, plan->fct[k1].tw, plan->fct[k1].tws, sign)) + { DEALLOC(ch); return -1; } + SWAP(p1,p2,cmplx *); + } + SWAP(p1,p2,cmplx *); + l1=l2; + } + if (p1!=c) + { + if (fct!=1.) + for (size_t i=0; i<len; ++i) + { + c[i].r = ch[i].r*fct; + c[i].i = ch[i].i*fct; + } + else + memcpy (c,p1,len*sizeof(cmplx)); + } + else + if (fct!=1.) + for (size_t i=0; i<len; ++i) + { + c[i].r *= fct; + c[i].i *= fct; + } + DEALLOC(ch); + return 0; + } + +#undef PMSIGNC +#undef A_EQ_B_MUL_C +#undef A_EQ_CB_MUL_C +#undef MULPMSIGNC +#undef MULPMSIGNCEQ + +#undef WA +#undef CC +#undef CH +#undef ROT90 +#undef SCALEC +#undef ADDC +#undef PMC + +NOINLINE WARN_UNUSED_RESULT +static int cfftp_forward(cfftp_plan plan, double c[], double fct) + { return pass_all(plan,(cmplx *)c, fct, -1); } + +NOINLINE WARN_UNUSED_RESULT +static int cfftp_backward(cfftp_plan plan, double c[], double fct) + { return pass_all(plan,(cmplx *)c, fct, 1); } + +NOINLINE WARN_UNUSED_RESULT +static int cfftp_factorize (cfftp_plan plan) + { + size_t length=plan->length; + size_t nfct=0; + while ((length%4)==0) + { if (nfct>=NFCT) return -1; plan->fct[nfct++].fct=4; length>>=2; } + if ((length%2)==0) + { + length>>=1; + // factor 2 should be at the front of the factor list + if (nfct>=NFCT) return -1; + plan->fct[nfct++].fct=2; + SWAP(plan->fct[0].fct, plan->fct[nfct-1].fct,size_t); + } + size_t maxl=(size_t)(sqrt((double)length))+1; + for (size_t divisor=3; (length>1)&&(divisor<maxl); divisor+=2) + if ((length%divisor)==0) + { + while ((length%divisor)==0) + { + if (nfct>=NFCT) return -1; + plan->fct[nfct++].fct=divisor; + length/=divisor; + } + maxl=(size_t)(sqrt((double)length))+1; + } + if (length>1) plan->fct[nfct++].fct=length; + plan->nfct=nfct; + return 0; + } + +NOINLINE static size_t cfftp_twsize (cfftp_plan plan) + { + size_t twsize=0, l1=1; + for (size_t k=0; k<plan->nfct; ++k) + { + size_t ip=plan->fct[k].fct, ido= plan->length/(l1*ip); + twsize+=(ip-1)*(ido-1); + if (ip>11) + twsize+=ip; + l1*=ip; + } + return twsize; + } + +NOINLINE WARN_UNUSED_RESULT static int cfftp_comp_twiddle (cfftp_plan plan) + { + size_t length=plan->length; + double *twid = RALLOC(double, 2*length); + if (!twid) return -1; + sincos_2pibyn(length, twid); + size_t l1=1; + size_t memofs=0; + for (size_t k=0; k<plan->nfct; ++k) + { + size_t ip=plan->fct[k].fct, ido= length/(l1*ip); + plan->fct[k].tw=plan->mem+memofs; + memofs+=(ip-1)*(ido-1); + for (size_t j=1; j<ip; ++j) + for (size_t i=1; i<ido; ++i) + { + plan->fct[k].tw[(j-1)*(ido-1)+i-1].r = twid[2*j*l1*i]; + plan->fct[k].tw[(j-1)*(ido-1)+i-1].i = twid[2*j*l1*i+1]; + } + if (ip>11) + { + plan->fct[k].tws=plan->mem+memofs; + memofs+=ip; + for (size_t j=0; j<ip; ++j) + { + plan->fct[k].tws[j].r = twid[2*j*l1*ido]; + plan->fct[k].tws[j].i = twid[2*j*l1*ido+1]; + } + } + l1*=ip; + } + DEALLOC(twid); + return 0; + } + +static cfftp_plan make_cfftp_plan (size_t length) + { + if (length==0) return NULL; + cfftp_plan plan = RALLOC(cfftp_plan_i,1); + if (!plan) return NULL; + plan->length=length; + plan->nfct=0; + for (size_t i=0; i<NFCT; ++i) + plan->fct[i]=(cfftp_fctdata){0,0,0}; + plan->mem=0; + if (length==1) return plan; + if (cfftp_factorize(plan)!=0) { DEALLOC(plan); return NULL; } + size_t tws=cfftp_twsize(plan); + plan->mem=RALLOC(cmplx,tws); + if (!plan->mem) { DEALLOC(plan); return NULL; } + if (cfftp_comp_twiddle(plan)!=0) + { DEALLOC(plan->mem); DEALLOC(plan); return NULL; } + return plan; + } + +static void destroy_cfftp_plan (cfftp_plan plan) + { + DEALLOC(plan->mem); + DEALLOC(plan); + } + +typedef struct rfftp_fctdata + { + size_t fct; + double *tw, *tws; + } rfftp_fctdata; + +typedef struct rfftp_plan_i + { + size_t length, nfct; + double *mem; + rfftp_fctdata fct[NFCT]; + } rfftp_plan_i; +typedef struct rfftp_plan_i * rfftp_plan; + +#define WA(x,i) wa[(i)+(x)*(ido-1)] +#define PM(a,b,c,d) { a=c+d; b=c-d; } +/* (a+ib) = conj(c+id) * (e+if) */ +#define MULPM(a,b,c,d,e,f) { a=c*e+d*f; b=c*f-d*e; } + +#define CC(a,b,c) cc[(a)+ido*((b)+l1*(c))] +#define CH(a,b,c) ch[(a)+ido*((b)+cdim*(c))] + +NOINLINE static void radf2 (size_t ido, size_t l1, const double * restrict cc, + double * restrict ch, const double * restrict wa) + { + const size_t cdim=2; + + for (size_t k=0; k<l1; k++) + PM (CH(0,0,k),CH(ido-1,1,k),CC(0,k,0),CC(0,k,1)) + if ((ido&1)==0) + for (size_t k=0; k<l1; k++) + { + CH( 0,1,k) = -CC(ido-1,k,1); + CH(ido-1,0,k) = CC(ido-1,k,0); + } + if (ido<=2) return; + for (size_t k=0; k<l1; k++) + for (size_t i=2; i<ido; i+=2) + { + size_t ic=ido-i; + double tr2, ti2; + MULPM (tr2,ti2,WA(0,i-2),WA(0,i-1),CC(i-1,k,1),CC(i,k,1)) + PM (CH(i-1,0,k),CH(ic-1,1,k),CC(i-1,k,0),tr2) + PM (CH(i ,0,k),CH(ic ,1,k),ti2,CC(i ,k,0)) + } + } + +NOINLINE static void radf3(size_t ido, size_t l1, const double * restrict cc, + double * restrict ch, const double * restrict wa) + { + const size_t cdim=3; + static const double taur=-0.5, taui=0.86602540378443864676; + + for (size_t k=0; k<l1; k++) + { + double cr2=CC(0,k,1)+CC(0,k,2); + CH(0,0,k) = CC(0,k,0)+cr2; + CH(0,2,k) = taui*(CC(0,k,2)-CC(0,k,1)); + CH(ido-1,1,k) = CC(0,k,0)+taur*cr2; + } + if (ido==1) return; + for (size_t k=0; k<l1; k++) + for (size_t i=2; i<ido; i+=2) + { + size_t ic=ido-i; + double di2, di3, dr2, dr3; + MULPM (dr2,di2,WA(0,i-2),WA(0,i-1),CC(i-1,k,1),CC(i,k,1)) // d2=conj(WA0)*CC1 + MULPM (dr3,di3,WA(1,i-2),WA(1,i-1),CC(i-1,k,2),CC(i,k,2)) // d3=conj(WA1)*CC2 + double cr2=dr2+dr3; // c add + double ci2=di2+di3; + CH(i-1,0,k) = CC(i-1,k,0)+cr2; // c add + CH(i ,0,k) = CC(i ,k,0)+ci2; + double tr2 = CC(i-1,k,0)+taur*cr2; // c add + double ti2 = CC(i ,k,0)+taur*ci2; + double tr3 = taui*(di2-di3); // t3 = taui*i*(d3-d2)? + double ti3 = taui*(dr3-dr2); + PM(CH(i-1,2,k),CH(ic-1,1,k),tr2,tr3) // PM(i) = t2+t3 + PM(CH(i ,2,k),CH(ic ,1,k),ti3,ti2) // PM(ic) = conj(t2-t3) + } + } + +NOINLINE static void radf4(size_t ido, size_t l1, const double * restrict cc, + double * restrict ch, const double * restrict wa) + { + const size_t cdim=4; + static const double hsqt2=0.70710678118654752440; + + for (size_t k=0; k<l1; k++) + { + double tr1,tr2; + PM (tr1,CH(0,2,k),CC(0,k,3),CC(0,k,1)) + PM (tr2,CH(ido-1,1,k),CC(0,k,0),CC(0,k,2)) + PM (CH(0,0,k),CH(ido-1,3,k),tr2,tr1) + } + if ((ido&1)==0) + for (size_t k=0; k<l1; k++) + { + double ti1=-hsqt2*(CC(ido-1,k,1)+CC(ido-1,k,3)); + double tr1= hsqt2*(CC(ido-1,k,1)-CC(ido-1,k,3)); + PM (CH(ido-1,0,k),CH(ido-1,2,k),CC(ido-1,k,0),tr1) + PM (CH( 0,3,k),CH( 0,1,k),ti1,CC(ido-1,k,2)) + } + if (ido<=2) return; + for (size_t k=0; k<l1; k++) + for (size_t i=2; i<ido; i+=2) + { + size_t ic=ido-i; + double ci2, ci3, ci4, cr2, cr3, cr4, ti1, ti2, ti3, ti4, tr1, tr2, tr3, tr4; + MULPM(cr2,ci2,WA(0,i-2),WA(0,i-1),CC(i-1,k,1),CC(i,k,1)) + MULPM(cr3,ci3,WA(1,i-2),WA(1,i-1),CC(i-1,k,2),CC(i,k,2)) + MULPM(cr4,ci4,WA(2,i-2),WA(2,i-1),CC(i-1,k,3),CC(i,k,3)) + PM(tr1,tr4,cr4,cr2) + PM(ti1,ti4,ci2,ci4) + PM(tr2,tr3,CC(i-1,k,0),cr3) + PM(ti2,ti3,CC(i ,k,0),ci3) + PM(CH(i-1,0,k),CH(ic-1,3,k),tr2,tr1) + PM(CH(i ,0,k),CH(ic ,3,k),ti1,ti2) + PM(CH(i-1,2,k),CH(ic-1,1,k),tr3,ti4) + PM(CH(i ,2,k),CH(ic ,1,k),tr4,ti3) + } + } + +NOINLINE static void radf5(size_t ido, size_t l1, const double * restrict cc, + double * restrict ch, const double * restrict wa) + { + const size_t cdim=5; + static const double tr11= 0.3090169943749474241, ti11=0.95105651629515357212, + tr12=-0.8090169943749474241, ti12=0.58778525229247312917; + + for (size_t k=0; k<l1; k++) + { + double cr2, cr3, ci4, ci5; + PM (cr2,ci5,CC(0,k,4),CC(0,k,1)) + PM (cr3,ci4,CC(0,k,3),CC(0,k,2)) + CH(0,0,k)=CC(0,k,0)+cr2+cr3; + CH(ido-1,1,k)=CC(0,k,0)+tr11*cr2+tr12*cr3; + CH(0,2,k)=ti11*ci5+ti12*ci4; + CH(ido-1,3,k)=CC(0,k,0)+tr12*cr2+tr11*cr3; + CH(0,4,k)=ti12*ci5-ti11*ci4; + } + if (ido==1) return; + for (size_t k=0; k<l1;++k) + for (size_t i=2; i<ido; i+=2) + { + double ci2, di2, ci4, ci5, di3, di4, di5, ci3, cr2, cr3, dr2, dr3, + dr4, dr5, cr5, cr4, ti2, ti3, ti5, ti4, tr2, tr3, tr4, tr5; + size_t ic=ido-i; + MULPM (dr2,di2,WA(0,i-2),WA(0,i-1),CC(i-1,k,1),CC(i,k,1)) + MULPM (dr3,di3,WA(1,i-2),WA(1,i-1),CC(i-1,k,2),CC(i,k,2)) + MULPM (dr4,di4,WA(2,i-2),WA(2,i-1),CC(i-1,k,3),CC(i,k,3)) + MULPM (dr5,di5,WA(3,i-2),WA(3,i-1),CC(i-1,k,4),CC(i,k,4)) + PM(cr2,ci5,dr5,dr2) + PM(ci2,cr5,di2,di5) + PM(cr3,ci4,dr4,dr3) + PM(ci3,cr4,di3,di4) + CH(i-1,0,k)=CC(i-1,k,0)+cr2+cr3; + CH(i ,0,k)=CC(i ,k,0)+ci2+ci3; + tr2=CC(i-1,k,0)+tr11*cr2+tr12*cr3; + ti2=CC(i ,k,0)+tr11*ci2+tr12*ci3; + tr3=CC(i-1,k,0)+tr12*cr2+tr11*cr3; + ti3=CC(i ,k,0)+tr12*ci2+tr11*ci3; + MULPM(tr5,tr4,cr5,cr4,ti11,ti12) + MULPM(ti5,ti4,ci5,ci4,ti11,ti12) + PM(CH(i-1,2,k),CH(ic-1,1,k),tr2,tr5) + PM(CH(i ,2,k),CH(ic ,1,k),ti5,ti2) + PM(CH(i-1,4,k),CH(ic-1,3,k),tr3,tr4) + PM(CH(i ,4,k),CH(ic ,3,k),ti4,ti3) + } + } + +#undef CC +#undef CH +#define C1(a,b,c) cc[(a)+ido*((b)+l1*(c))] +#define C2(a,b) cc[(a)+idl1*(b)] +#define CH2(a,b) ch[(a)+idl1*(b)] +#define CC(a,b,c) cc[(a)+ido*((b)+cdim*(c))] +#define CH(a,b,c) ch[(a)+ido*((b)+l1*(c))] +NOINLINE static void radfg(size_t ido, size_t ip, size_t l1, + double * restrict cc, double * restrict ch, const double * restrict wa, + const double * restrict csarr) + { + const size_t cdim=ip; + size_t ipph=(ip+1)/2; + size_t idl1 = ido*l1; + + if (ido>1) + { + for (size_t j=1, jc=ip-1; j<ipph; ++j,--jc) // 114 + { + size_t is=(j-1)*(ido-1), + is2=(jc-1)*(ido-1); + for (size_t k=0; k<l1; ++k) // 113 + { + size_t idij=is; + size_t idij2=is2; + for (size_t i=1; i<=ido-2; i+=2) // 112 + { + double t1=C1(i,k,j ), t2=C1(i+1,k,j ), + t3=C1(i,k,jc), t4=C1(i+1,k,jc); + double x1=wa[idij]*t1 + wa[idij+1]*t2, + x2=wa[idij]*t2 - wa[idij+1]*t1, + x3=wa[idij2]*t3 + wa[idij2+1]*t4, + x4=wa[idij2]*t4 - wa[idij2+1]*t3; + C1(i ,k,j ) = x1+x3; + C1(i ,k,jc) = x2-x4; + C1(i+1,k,j ) = x2+x4; + C1(i+1,k,jc) = x3-x1; + idij+=2; + idij2+=2; + } + } + } + } + + for (size_t j=1, jc=ip-1; j<ipph; ++j,--jc) // 123 + for (size_t k=0; k<l1; ++k) // 122 + { + double t1=C1(0,k,j), t2=C1(0,k,jc); + C1(0,k,j ) = t1+t2; + C1(0,k,jc) = t2-t1; + } + +//everything in C +//memset(ch,0,ip*l1*ido*sizeof(double)); + + for (size_t l=1,lc=ip-1; l<ipph; ++l,--lc) // 127 + { + for (size_t ik=0; ik<idl1; ++ik) // 124 + { + CH2(ik,l ) = C2(ik,0)+csarr[2*l]*C2(ik,1)+csarr[4*l]*C2(ik,2); + CH2(ik,lc) = csarr[2*l+1]*C2(ik,ip-1)+csarr[4*l+1]*C2(ik,ip-2); + } + size_t iang = 2*l; + size_t j=3, jc=ip-3; + for (; j<ipph-3; j+=4,jc-=4) // 126 + { + iang+=l; if (iang>=ip) iang-=ip; + double ar1=csarr[2*iang], ai1=csarr[2*iang+1]; + iang+=l; if (iang>=ip) iang-=ip; + double ar2=csarr[2*iang], ai2=csarr[2*iang+1]; + iang+=l; if (iang>=ip) iang-=ip; + double ar3=csarr[2*iang], ai3=csarr[2*iang+1]; + iang+=l; if (iang>=ip) iang-=ip; + double ar4=csarr[2*iang], ai4=csarr[2*iang+1]; + for (size_t ik=0; ik<idl1; ++ik) // 125 + { + CH2(ik,l ) += ar1*C2(ik,j )+ar2*C2(ik,j +1) + +ar3*C2(ik,j +2)+ar4*C2(ik,j +3); + CH2(ik,lc) += ai1*C2(ik,jc)+ai2*C2(ik,jc-1) + +ai3*C2(ik,jc-2)+ai4*C2(ik,jc-3); + } + } + for (; j<ipph-1; j+=2,jc-=2) // 126 + { + iang+=l; if (iang>=ip) iang-=ip; + double ar1=csarr[2*iang], ai1=csarr[2*iang+1]; + iang+=l; if (iang>=ip) iang-=ip; + double ar2=csarr[2*iang], ai2=csarr[2*iang+1]; + for (size_t ik=0; ik<idl1; ++ik) // 125 + { + CH2(ik,l ) += ar1*C2(ik,j )+ar2*C2(ik,j +1); + CH2(ik,lc) += ai1*C2(ik,jc)+ai2*C2(ik,jc-1); + } + } + for (; j<ipph; ++j,--jc) // 126 + { + iang+=l; if (iang>=ip) iang-=ip; + double ar=csarr[2*iang], ai=csarr[2*iang+1]; + for (size_t ik=0; ik<idl1; ++ik) // 125 + { + CH2(ik,l ) += ar*C2(ik,j ); + CH2(ik,lc) += ai*C2(ik,jc); + } + } + } + for (size_t ik=0; ik<idl1; ++ik) // 101 + CH2(ik,0) = C2(ik,0); + for (size_t j=1; j<ipph; ++j) // 129 + for (size_t ik=0; ik<idl1; ++ik) // 128 + CH2(ik,0) += C2(ik,j); + +// everything in CH at this point! +//memset(cc,0,ip*l1*ido*sizeof(double)); + + for (size_t k=0; k<l1; ++k) // 131 + for (size_t i=0; i<ido; ++i) // 130 + CC(i,0,k) = CH(i,k,0); + + for (size_t j=1, jc=ip-1; j<ipph; ++j,--jc) // 137 + { + size_t j2=2*j-1; + for (size_t k=0; k<l1; ++k) // 136 + { + CC(ido-1,j2,k) = CH(0,k,j); + CC(0,j2+1,k) = CH(0,k,jc); + } + } + + if (ido==1) return; + + for (size_t j=1, jc=ip-1; j<ipph; ++j,--jc) // 140 + { + size_t j2=2*j-1; + for(size_t k=0; k<l1; ++k) // 139 + for(size_t i=1, ic=ido-i-2; i<=ido-2; i+=2, ic-=2) // 138 + { + CC(i ,j2+1,k) = CH(i ,k,j )+CH(i ,k,jc); + CC(ic ,j2 ,k) = CH(i ,k,j )-CH(i ,k,jc); + CC(i+1 ,j2+1,k) = CH(i+1,k,j )+CH(i+1,k,jc); + CC(ic+1,j2 ,k) = CH(i+1,k,jc)-CH(i+1,k,j ); + } + } + } +#undef C1 +#undef C2 +#undef CH2 + +#undef CH +#undef CC +#define CH(a,b,c) ch[(a)+ido*((b)+l1*(c))] +#define CC(a,b,c) cc[(a)+ido*((b)+cdim*(c))] + +NOINLINE static void radb2(size_t ido, size_t l1, const double * restrict cc, + double * restrict ch, const double * restrict wa) + { + const size_t cdim=2; + + for (size_t k=0; k<l1; k++) + PM (CH(0,k,0),CH(0,k,1),CC(0,0,k),CC(ido-1,1,k)) + if ((ido&1)==0) + for (size_t k=0; k<l1; k++) + { + CH(ido-1,k,0) = 2.*CC(ido-1,0,k); + CH(ido-1,k,1) =-2.*CC(0 ,1,k); + } + if (ido<=2) return; + for (size_t k=0; k<l1;++k) + for (size_t i=2; i<ido; i+=2) + { + size_t ic=ido-i; + double ti2, tr2; + PM (CH(i-1,k,0),tr2,CC(i-1,0,k),CC(ic-1,1,k)) + PM (ti2,CH(i ,k,0),CC(i ,0,k),CC(ic ,1,k)) + MULPM (CH(i,k,1),CH(i-1,k,1),WA(0,i-2),WA(0,i-1),ti2,tr2) + } + } + +NOINLINE static void radb3(size_t ido, size_t l1, const double * restrict cc, + double * restrict ch, const double * restrict wa) + { + const size_t cdim=3; + static const double taur=-0.5, taui=0.86602540378443864676; + + for (size_t k=0; k<l1; k++) + { + double tr2=2.*CC(ido-1,1,k); + double cr2=CC(0,0,k)+taur*tr2; + CH(0,k,0)=CC(0,0,k)+tr2; + double ci3=2.*taui*CC(0,2,k); + PM (CH(0,k,2),CH(0,k,1),cr2,ci3); + } + if (ido==1) return; + for (size_t k=0; k<l1; k++) + for (size_t i=2; i<ido; i+=2) + { + size_t ic=ido-i; + double tr2=CC(i-1,2,k)+CC(ic-1,1,k); // t2=CC(I) + conj(CC(ic)) + double ti2=CC(i ,2,k)-CC(ic ,1,k); + double cr2=CC(i-1,0,k)+taur*tr2; // c2=CC +taur*t2 + double ci2=CC(i ,0,k)+taur*ti2; + CH(i-1,k,0)=CC(i-1,0,k)+tr2; // CH=CC+t2 + CH(i ,k,0)=CC(i ,0,k)+ti2; + double cr3=taui*(CC(i-1,2,k)-CC(ic-1,1,k));// c3=taui*(CC(i)-conj(CC(ic))) + double ci3=taui*(CC(i ,2,k)+CC(ic ,1,k)); + double di2, di3, dr2, dr3; + PM(dr3,dr2,cr2,ci3) // d2= (cr2-ci3, ci2+cr3) = c2+i*c3 + PM(di2,di3,ci2,cr3) // d3= (cr2+ci3, ci2-cr3) = c2-i*c3 + MULPM(CH(i,k,1),CH(i-1,k,1),WA(0,i-2),WA(0,i-1),di2,dr2) // ch = WA*d2 + MULPM(CH(i,k,2),CH(i-1,k,2),WA(1,i-2),WA(1,i-1),di3,dr3) + } + } + +NOINLINE static void radb4(size_t ido, size_t l1, const double * restrict cc, + double * restrict ch, const double * restrict wa) + { + const size_t cdim=4; + static const double sqrt2=1.41421356237309504880; + + for (size_t k=0; k<l1; k++) + { + double tr1, tr2; + PM (tr2,tr1,CC(0,0,k),CC(ido-1,3,k)) + double tr3=2.*CC(ido-1,1,k); + double tr4=2.*CC(0,2,k); + PM (CH(0,k,0),CH(0,k,2),tr2,tr3) + PM (CH(0,k,3),CH(0,k,1),tr1,tr4) + } + if ((ido&1)==0) + for (size_t k=0; k<l1; k++) + { + double tr1,tr2,ti1,ti2; + PM (ti1,ti2,CC(0 ,3,k),CC(0 ,1,k)) + PM (tr2,tr1,CC(ido-1,0,k),CC(ido-1,2,k)) + CH(ido-1,k,0)=tr2+tr2; + CH(ido-1,k,1)=sqrt2*(tr1-ti1); + CH(ido-1,k,2)=ti2+ti2; + CH(ido-1,k,3)=-sqrt2*(tr1+ti1); + } + if (ido<=2) return; + for (size_t k=0; k<l1;++k) + for (size_t i=2; i<ido; i+=2) + { + double ci2, ci3, ci4, cr2, cr3, cr4, ti1, ti2, ti3, ti4, tr1, tr2, tr3, tr4; + size_t ic=ido-i; + PM (tr2,tr1,CC(i-1,0,k),CC(ic-1,3,k)) + PM (ti1,ti2,CC(i ,0,k),CC(ic ,3,k)) + PM (tr4,ti3,CC(i ,2,k),CC(ic ,1,k)) + PM (tr3,ti4,CC(i-1,2,k),CC(ic-1,1,k)) + PM (CH(i-1,k,0),cr3,tr2,tr3) + PM (CH(i ,k,0),ci3,ti2,ti3) + PM (cr4,cr2,tr1,tr4) + PM (ci2,ci4,ti1,ti4) + MULPM (CH(i,k,1),CH(i-1,k,1),WA(0,i-2),WA(0,i-1),ci2,cr2) + MULPM (CH(i,k,2),CH(i-1,k,2),WA(1,i-2),WA(1,i-1),ci3,cr3) + MULPM (CH(i,k,3),CH(i-1,k,3),WA(2,i-2),WA(2,i-1),ci4,cr4) + } + } + +NOINLINE static void radb5(size_t ido, size_t l1, const double * restrict cc, + double * restrict ch, const double * restrict wa) + { + const size_t cdim=5; + static const double tr11= 0.3090169943749474241, ti11=0.95105651629515357212, + tr12=-0.8090169943749474241, ti12=0.58778525229247312917; + + for (size_t k=0; k<l1; k++) + { + double ti5=CC(0,2,k)+CC(0,2,k); + double ti4=CC(0,4,k)+CC(0,4,k); + double tr2=CC(ido-1,1,k)+CC(ido-1,1,k); + double tr3=CC(ido-1,3,k)+CC(ido-1,3,k); + CH(0,k,0)=CC(0,0,k)+tr2+tr3; + double cr2=CC(0,0,k)+tr11*tr2+tr12*tr3; + double cr3=CC(0,0,k)+tr12*tr2+tr11*tr3; + double ci4, ci5; + MULPM(ci5,ci4,ti5,ti4,ti11,ti12) + PM(CH(0,k,4),CH(0,k,1),cr2,ci5) + PM(CH(0,k,3),CH(0,k,2),cr3,ci4) + } + if (ido==1) return; + for (size_t k=0; k<l1;++k) + for (size_t i=2; i<ido; i+=2) + { + size_t ic=ido-i; + double tr2, tr3, tr4, tr5, ti2, ti3, ti4, ti5; + PM(tr2,tr5,CC(i-1,2,k),CC(ic-1,1,k)) + PM(ti5,ti2,CC(i ,2,k),CC(ic ,1,k)) + PM(tr3,tr4,CC(i-1,4,k),CC(ic-1,3,k)) + PM(ti4,ti3,CC(i ,4,k),CC(ic ,3,k)) + CH(i-1,k,0)=CC(i-1,0,k)+tr2+tr3; + CH(i ,k,0)=CC(i ,0,k)+ti2+ti3; + double cr2=CC(i-1,0,k)+tr11*tr2+tr12*tr3; + double ci2=CC(i ,0,k)+tr11*ti2+tr12*ti3; + double cr3=CC(i-1,0,k)+tr12*tr2+tr11*tr3; + double ci3=CC(i ,0,k)+tr12*ti2+tr11*ti3; + double ci4, ci5, cr5, cr4; + MULPM(cr5,cr4,tr5,tr4,ti11,ti12) + MULPM(ci5,ci4,ti5,ti4,ti11,ti12) + double dr2, dr3, dr4, dr5, di2, di3, di4, di5; + PM(dr4,dr3,cr3,ci4) + PM(di3,di4,ci3,cr4) + PM(dr5,dr2,cr2,ci5) + PM(di2,di5,ci2,cr5) + MULPM(CH(i,k,1),CH(i-1,k,1),WA(0,i-2),WA(0,i-1),di2,dr2) + MULPM(CH(i,k,2),CH(i-1,k,2),WA(1,i-2),WA(1,i-1),di3,dr3) + MULPM(CH(i,k,3),CH(i-1,k,3),WA(2,i-2),WA(2,i-1),di4,dr4) + MULPM(CH(i,k,4),CH(i-1,k,4),WA(3,i-2),WA(3,i-1),di5,dr5) + } + } + +#undef CC +#undef CH +#define CC(a,b,c) cc[(a)+ido*((b)+cdim*(c))] +#define CH(a,b,c) ch[(a)+ido*((b)+l1*(c))] +#define C1(a,b,c) cc[(a)+ido*((b)+l1*(c))] +#define C2(a,b) cc[(a)+idl1*(b)] +#define CH2(a,b) ch[(a)+idl1*(b)] + +NOINLINE static void radbg(size_t ido, size_t ip, size_t l1, + double * restrict cc, double * restrict ch, const double * restrict wa, + const double * restrict csarr) + { + const size_t cdim=ip; + size_t ipph=(ip+1)/ 2; + size_t idl1 = ido*l1; + + for (size_t k=0; k<l1; ++k) // 102 + for (size_t i=0; i<ido; ++i) // 101 + CH(i,k,0) = CC(i,0,k); + for (size_t j=1, jc=ip-1; j<ipph; ++j, --jc) // 108 + { + size_t j2=2*j-1; + for (size_t k=0; k<l1; ++k) + { + CH(0,k,j ) = 2*CC(ido-1,j2,k); + CH(0,k,jc) = 2*CC(0,j2+1,k); + } + } + + if (ido!=1) + { + for (size_t j=1, jc=ip-1; j<ipph; ++j,--jc) // 111 + { + size_t j2=2*j-1; + for (size_t k=0; k<l1; ++k) + for (size_t i=1, ic=ido-i-2; i<=ido-2; i+=2, ic-=2) // 109 + { + CH(i ,k,j ) = CC(i ,j2+1,k)+CC(ic ,j2,k); + CH(i ,k,jc) = CC(i ,j2+1,k)-CC(ic ,j2,k); + CH(i+1,k,j ) = CC(i+1,j2+1,k)-CC(ic+1,j2,k); + CH(i+1,k,jc) = CC(i+1,j2+1,k)+CC(ic+1,j2,k); + } + } + } + for (size_t l=1,lc=ip-1; l<ipph; ++l,--lc) + { + for (size_t ik=0; ik<idl1; ++ik) + { + C2(ik,l ) = CH2(ik,0)+csarr[2*l]*CH2(ik,1)+csarr[4*l]*CH2(ik,2); + C2(ik,lc) = csarr[2*l+1]*CH2(ik,ip-1)+csarr[4*l+1]*CH2(ik,ip-2); + } + size_t iang=2*l; + size_t j=3,jc=ip-3; + for(; j<ipph-3; j+=4,jc-=4) + { + iang+=l; if(iang>ip) iang-=ip; + double ar1=csarr[2*iang], ai1=csarr[2*iang+1]; + iang+=l; if(iang>ip) iang-=ip; + double ar2=csarr[2*iang], ai2=csarr[2*iang+1]; + iang+=l; if(iang>ip) iang-=ip; + double ar3=csarr[2*iang], ai3=csarr[2*iang+1]; + iang+=l; if(iang>ip) iang-=ip; + double ar4=csarr[2*iang], ai4=csarr[2*iang+1]; + for (size_t ik=0; ik<idl1; ++ik) + { + C2(ik,l ) += ar1*CH2(ik,j )+ar2*CH2(ik,j +1) + +ar3*CH2(ik,j +2)+ar4*CH2(ik,j +3); + C2(ik,lc) += ai1*CH2(ik,jc)+ai2*CH2(ik,jc-1) + +ai3*CH2(ik,jc-2)+ai4*CH2(ik,jc-3); + } + } + for(; j<ipph-1; j+=2,jc-=2) + { + iang+=l; if(iang>ip) iang-=ip; + double ar1=csarr[2*iang], ai1=csarr[2*iang+1]; + iang+=l; if(iang>ip) iang-=ip; + double ar2=csarr[2*iang], ai2=csarr[2*iang+1]; + for (size_t ik=0; ik<idl1; ++ik) + { + C2(ik,l ) += ar1*CH2(ik,j )+ar2*CH2(ik,j +1); + C2(ik,lc) += ai1*CH2(ik,jc)+ai2*CH2(ik,jc-1); + } + } + for(; j<ipph; ++j,--jc) + { + iang+=l; if(iang>ip) iang-=ip; + double war=csarr[2*iang], wai=csarr[2*iang+1]; + for (size_t ik=0; ik<idl1; ++ik) + { + C2(ik,l ) += war*CH2(ik,j ); + C2(ik,lc) += wai*CH2(ik,jc); + } + } + } + for (size_t j=1; j<ipph; ++j) + for (size_t ik=0; ik<idl1; ++ik) + CH2(ik,0) += CH2(ik,j); + for (size_t j=1, jc=ip-1; j<ipph; ++j,--jc) // 124 + for (size_t k=0; k<l1; ++k) + { + CH(0,k,j ) = C1(0,k,j)-C1(0,k,jc); + CH(0,k,jc) = C1(0,k,j)+C1(0,k,jc); + } + + if (ido==1) return; + + for (size_t j=1, jc=ip-1; j<ipph; ++j, --jc) // 127 + for (size_t k=0; k<l1; ++k) + for (size_t i=1; i<=ido-2; i+=2) + { + CH(i ,k,j ) = C1(i ,k,j)-C1(i+1,k,jc); + CH(i ,k,jc) = C1(i ,k,j)+C1(i+1,k,jc); + CH(i+1,k,j ) = C1(i+1,k,j)+C1(i ,k,jc); + CH(i+1,k,jc) = C1(i+1,k,j)-C1(i ,k,jc); + } + +// All in CH + + for (size_t j=1; j<ip; ++j) + { + size_t is = (j-1)*(ido-1); + for (size_t k=0; k<l1; ++k) + { + size_t idij = is; + for (size_t i=1; i<=ido-2; i+=2) + { + double t1=CH(i,k,j), t2=CH(i+1,k,j); + CH(i ,k,j) = wa[idij]*t1-wa[idij+1]*t2; + CH(i+1,k,j) = wa[idij]*t2+wa[idij+1]*t1; + idij+=2; + } + } + } + } +#undef C1 +#undef C2 +#undef CH2 + +#undef CC +#undef CH +#undef PM +#undef MULPM +#undef WA + +static void copy_and_norm(double *c, double *p1, size_t n, double fct) + { + if (p1!=c) + { + if (fct!=1.) + for (size_t i=0; i<n; ++i) + c[i] = fct*p1[i]; + else + memcpy (c,p1,n*sizeof(double)); + } + else + if (fct!=1.) + for (size_t i=0; i<n; ++i) + c[i] *= fct; + } + +WARN_UNUSED_RESULT +static int rfftp_forward(rfftp_plan plan, double c[], double fct) + { + if (plan->length==1) return 0; + size_t n=plan->length; + size_t l1=n, nf=plan->nfct; + double *ch = RALLOC(double, n); + if (!ch) return -1; + double *p1=c, *p2=ch; + + for(size_t k1=0; k1<nf;++k1) + { + size_t k=nf-k1-1; + size_t ip=plan->fct[k].fct; + size_t ido=n / l1; + l1 /= ip; + if(ip==4) + radf4(ido, l1, p1, p2, plan->fct[k].tw); + else if(ip==2) + radf2(ido, l1, p1, p2, plan->fct[k].tw); + else if(ip==3) + radf3(ido, l1, p1, p2, plan->fct[k].tw); + else if(ip==5) + radf5(ido, l1, p1, p2, plan->fct[k].tw); + else + { + radfg(ido, ip, l1, p1, p2, plan->fct[k].tw, plan->fct[k].tws); + SWAP (p1,p2,double *); + } + SWAP (p1,p2,double *); + } + copy_and_norm(c,p1,n,fct); + DEALLOC(ch); + return 0; + } + +WARN_UNUSED_RESULT +static int rfftp_backward(rfftp_plan plan, double c[], double fct) + { + if (plan->length==1) return 0; + size_t n=plan->length; + size_t l1=1, nf=plan->nfct; + double *ch = RALLOC(double, n); + if (!ch) return -1; + double *p1=c, *p2=ch; + + for(size_t k=0; k<nf; k++) + { + size_t ip = plan->fct[k].fct, + ido= n/(ip*l1); + if(ip==4) + radb4(ido, l1, p1, p2, plan->fct[k].tw); + else if(ip==2) + radb2(ido, l1, p1, p2, plan->fct[k].tw); + else if(ip==3) + radb3(ido, l1, p1, p2, plan->fct[k].tw); + else if(ip==5) + radb5(ido, l1, p1, p2, plan->fct[k].tw); + else + radbg(ido, ip, l1, p1, p2, plan->fct[k].tw, plan->fct[k].tws); + SWAP (p1,p2,double *); + l1*=ip; + } + copy_and_norm(c,p1,n,fct); + DEALLOC(ch); + return 0; + } + +WARN_UNUSED_RESULT +static int rfftp_factorize (rfftp_plan plan) + { + size_t length=plan->length; + size_t nfct=0; + while ((length%4)==0) + { if (nfct>=NFCT) return -1; plan->fct[nfct++].fct=4; length>>=2; } + if ((length%2)==0) + { + length>>=1; + // factor 2 should be at the front of the factor list + if (nfct>=NFCT) return -1; + plan->fct[nfct++].fct=2; + SWAP(plan->fct[0].fct, plan->fct[nfct-1].fct,size_t); + } + size_t maxl=(size_t)(sqrt((double)length))+1; + for (size_t divisor=3; (length>1)&&(divisor<maxl); divisor+=2) + if ((length%divisor)==0) + { + while ((length%divisor)==0) + { + if (nfct>=NFCT) return -1; + plan->fct[nfct++].fct=divisor; + length/=divisor; + } + maxl=(size_t)(sqrt((double)length))+1; + } + if (length>1) plan->fct[nfct++].fct=length; + plan->nfct=nfct; + return 0; + } + +static size_t rfftp_twsize(rfftp_plan plan) + { + size_t twsize=0, l1=1; + for (size_t k=0; k<plan->nfct; ++k) + { + size_t ip=plan->fct[k].fct, ido= plan->length/(l1*ip); + twsize+=(ip-1)*(ido-1); + if (ip>5) twsize+=2*ip; + l1*=ip; + } + return twsize; + return 0; + } + +WARN_UNUSED_RESULT NOINLINE static int rfftp_comp_twiddle (rfftp_plan plan) + { + size_t length=plan->length; + double *twid = RALLOC(double, 2*length); + if (!twid) return -1; + sincos_2pibyn_half(length, twid); + size_t l1=1; + double *ptr=plan->mem; + for (size_t k=0; k<plan->nfct; ++k) + { + size_t ip=plan->fct[k].fct, ido=length/(l1*ip); + if (k<plan->nfct-1) // last factor doesn't need twiddles + { + plan->fct[k].tw=ptr; ptr+=(ip-1)*(ido-1); + for (size_t j=1; j<ip; ++j) + for (size_t i=1; i<=(ido-1)/2; ++i) + { + plan->fct[k].tw[(j-1)*(ido-1)+2*i-2] = twid[2*j*l1*i]; + plan->fct[k].tw[(j-1)*(ido-1)+2*i-1] = twid[2*j*l1*i+1]; + } + } + if (ip>5) // special factors required by *g functions + { + plan->fct[k].tws=ptr; ptr+=2*ip; + plan->fct[k].tws[0] = 1.; + plan->fct[k].tws[1] = 0.; + for (size_t i=1; i<=(ip>>1); ++i) + { + plan->fct[k].tws[2*i ] = twid[2*i*(length/ip)]; + plan->fct[k].tws[2*i+1] = twid[2*i*(length/ip)+1]; + plan->fct[k].tws[2*(ip-i) ] = twid[2*i*(length/ip)]; + plan->fct[k].tws[2*(ip-i)+1] = -twid[2*i*(length/ip)+1]; + } + } + l1*=ip; + } + DEALLOC(twid); + return 0; + } + +NOINLINE static rfftp_plan make_rfftp_plan (size_t length) + { + if (length==0) return NULL; + rfftp_plan plan = RALLOC(rfftp_plan_i,1); + if (!plan) return NULL; + plan->length=length; + plan->nfct=0; + plan->mem=NULL; + for (size_t i=0; i<NFCT; ++i) + plan->fct[i]=(rfftp_fctdata){0,0,0}; + if (length==1) return plan; + if (rfftp_factorize(plan)!=0) { DEALLOC(plan); return NULL; } + size_t tws=rfftp_twsize(plan); + plan->mem=RALLOC(double,tws); + if (!plan->mem) { DEALLOC(plan); return NULL; } + if (rfftp_comp_twiddle(plan)!=0) + { DEALLOC(plan->mem); DEALLOC(plan); return NULL; } + return plan; + } + +NOINLINE static void destroy_rfftp_plan (rfftp_plan plan) + { + DEALLOC(plan->mem); + DEALLOC(plan); + } + +typedef struct fftblue_plan_i + { + size_t n, n2; + cfftp_plan plan; + double *mem; + double *bk, *bkf; + } fftblue_plan_i; +typedef struct fftblue_plan_i * fftblue_plan; + +NOINLINE static fftblue_plan make_fftblue_plan (size_t length) + { + fftblue_plan plan = RALLOC(fftblue_plan_i,1); + if (!plan) return NULL; + plan->n = length; + plan->n2 = good_size(plan->n*2-1); + plan->mem = RALLOC(double, 2*plan->n+2*plan->n2); + if (!plan->mem) { DEALLOC(plan); return NULL; } + plan->bk = plan->mem; + plan->bkf = plan->bk+2*plan->n; + +/* initialize b_k */ + double *tmp = RALLOC(double,4*plan->n); + if (!tmp) { DEALLOC(plan->mem); DEALLOC(plan); return NULL; } + sincos_2pibyn(2*plan->n,tmp); + plan->bk[0] = 1; + plan->bk[1] = 0; + + size_t coeff=0; + for (size_t m=1; m<plan->n; ++m) + { + coeff+=2*m-1; + if (coeff>=2*plan->n) coeff-=2*plan->n; + plan->bk[2*m ] = tmp[2*coeff ]; + plan->bk[2*m+1] = tmp[2*coeff+1]; + } + + /* initialize the zero-padded, Fourier transformed b_k. Add normalisation. */ + double xn2 = 1./plan->n2; + plan->bkf[0] = plan->bk[0]*xn2; + plan->bkf[1] = plan->bk[1]*xn2; + for (size_t m=2; m<2*plan->n; m+=2) + { + plan->bkf[m] = plan->bkf[2*plan->n2-m] = plan->bk[m] *xn2; + plan->bkf[m+1] = plan->bkf[2*plan->n2-m+1] = plan->bk[m+1] *xn2; + } + for (size_t m=2*plan->n;m<=(2*plan->n2-2*plan->n+1);++m) + plan->bkf[m]=0.; + plan->plan=make_cfftp_plan(plan->n2); + if (!plan->plan) + { DEALLOC(tmp); DEALLOC(plan->mem); DEALLOC(plan); return NULL; } + if (cfftp_forward(plan->plan,plan->bkf,1.)!=0) + { DEALLOC(tmp); DEALLOC(plan->mem); DEALLOC(plan); return NULL; } + DEALLOC(tmp); + + return plan; + } + +NOINLINE static void destroy_fftblue_plan (fftblue_plan plan) + { + DEALLOC(plan->mem); + destroy_cfftp_plan(plan->plan); + DEALLOC(plan); + } + +NOINLINE WARN_UNUSED_RESULT +static int fftblue_fft(fftblue_plan plan, double c[], int isign, double fct) + { + size_t n=plan->n; + size_t n2=plan->n2; + double *bk = plan->bk; + double *bkf = plan->bkf; + double *akf = RALLOC(double, 2*n2); + if (!akf) return -1; + +/* initialize a_k and FFT it */ + if (isign>0) + for (size_t m=0; m<2*n; m+=2) + { + akf[m] = c[m]*bk[m] - c[m+1]*bk[m+1]; + akf[m+1] = c[m]*bk[m+1] + c[m+1]*bk[m]; + } + else + for (size_t m=0; m<2*n; m+=2) + { + akf[m] = c[m]*bk[m] + c[m+1]*bk[m+1]; + akf[m+1] =-c[m]*bk[m+1] + c[m+1]*bk[m]; + } + for (size_t m=2*n; m<2*n2; ++m) + akf[m]=0; + + if (cfftp_forward (plan->plan,akf,fct)!=0) + { DEALLOC(akf); return -1; } + +/* do the convolution */ + if (isign>0) + for (size_t m=0; m<2*n2; m+=2) + { + double im = -akf[m]*bkf[m+1] + akf[m+1]*bkf[m]; + akf[m ] = akf[m]*bkf[m] + akf[m+1]*bkf[m+1]; + akf[m+1] = im; + } + else + for (size_t m=0; m<2*n2; m+=2) + { + double im = akf[m]*bkf[m+1] + akf[m+1]*bkf[m]; + akf[m ] = akf[m]*bkf[m] - akf[m+1]*bkf[m+1]; + akf[m+1] = im; + } + +/* inverse FFT */ + if (cfftp_backward (plan->plan,akf,1.)!=0) + { DEALLOC(akf); return -1; } + +/* multiply by b_k */ + if (isign>0) + for (size_t m=0; m<2*n; m+=2) + { + c[m] = bk[m] *akf[m] - bk[m+1]*akf[m+1]; + c[m+1] = bk[m+1]*akf[m] + bk[m] *akf[m+1]; + } + else + for (size_t m=0; m<2*n; m+=2) + { + c[m] = bk[m] *akf[m] + bk[m+1]*akf[m+1]; + c[m+1] =-bk[m+1]*akf[m] + bk[m] *akf[m+1]; + } + DEALLOC(akf); + return 0; + } + +WARN_UNUSED_RESULT +static int cfftblue_backward(fftblue_plan plan, double c[], double fct) + { return fftblue_fft(plan,c,1,fct); } + +WARN_UNUSED_RESULT +static int cfftblue_forward(fftblue_plan plan, double c[], double fct) + { return fftblue_fft(plan,c,-1,fct); } + +WARN_UNUSED_RESULT +static int rfftblue_backward(fftblue_plan plan, double c[], double fct) + { + size_t n=plan->n; + double *tmp = RALLOC(double,2*n); + if (!tmp) return -1; + tmp[0]=c[0]; + tmp[1]=0.; + memcpy (tmp+2,c+1, (n-1)*sizeof(double)); + if ((n&1)==0) tmp[n+1]=0.; + for (size_t m=2; m<n; m+=2) + { + tmp[2*n-m]=tmp[m]; + tmp[2*n-m+1]=-tmp[m+1]; + } + if (fftblue_fft(plan,tmp,1,fct)!=0) + { DEALLOC(tmp); return -1; } + for (size_t m=0; m<n; ++m) + c[m] = tmp[2*m]; + DEALLOC(tmp); + return 0; + } + +WARN_UNUSED_RESULT +static int rfftblue_forward(fftblue_plan plan, double c[], double fct) + { + size_t n=plan->n; + double *tmp = RALLOC(double,2*n); + if (!tmp) return -1; + for (size_t m=0; m<n; ++m) + { + tmp[2*m] = c[m]; + tmp[2*m+1] = 0.; + } + if (fftblue_fft(plan,tmp,-1,fct)!=0) + { DEALLOC(tmp); return -1; } + c[0] = tmp[0]; + memcpy (c+1, tmp+2, (n-1)*sizeof(double)); + DEALLOC(tmp); + return 0; + } + +typedef struct cfft_plan_i + { + cfftp_plan packplan; + fftblue_plan blueplan; + } cfft_plan_i; + +static cfft_plan make_cfft_plan (size_t length) + { + if (length==0) return NULL; + cfft_plan plan = RALLOC(cfft_plan_i,1); + if (!plan) return NULL; + plan->blueplan=0; + plan->packplan=0; + if ((length<50) || (largest_prime_factor(length)<=sqrt(length))) + { + plan->packplan=make_cfftp_plan(length); + if (!plan->packplan) { DEALLOC(plan); return NULL; } + return plan; + } + double comp1 = cost_guess(length); + double comp2 = 2*cost_guess(good_size(2*length-1)); + comp2*=1.5; /* fudge factor that appears to give good overall performance */ + if (comp2<comp1) // use Bluestein + { + plan->blueplan=make_fftblue_plan(length); + if (!plan->blueplan) { DEALLOC(plan); return NULL; } + } + else + { + plan->packplan=make_cfftp_plan(length); + if (!plan->packplan) { DEALLOC(plan); return NULL; } + } + return plan; + } + +static void destroy_cfft_plan (cfft_plan plan) + { + if (plan->blueplan) + destroy_fftblue_plan(plan->blueplan); + if (plan->packplan) + destroy_cfftp_plan(plan->packplan); + DEALLOC(plan); + } + +WARN_UNUSED_RESULT static int cfft_backward(cfft_plan plan, double c[], double fct) + { + if (plan->packplan) + return cfftp_backward(plan->packplan,c,fct); + // if (plan->blueplan) + return cfftblue_backward(plan->blueplan,c,fct); + } + +WARN_UNUSED_RESULT static int cfft_forward(cfft_plan plan, double c[], double fct) + { + if (plan->packplan) + return cfftp_forward(plan->packplan,c,fct); + // if (plan->blueplan) + return cfftblue_forward(plan->blueplan,c,fct); + } + +typedef struct rfft_plan_i + { + rfftp_plan packplan; + fftblue_plan blueplan; + } rfft_plan_i; + +static rfft_plan make_rfft_plan (size_t length) + { + if (length==0) return NULL; + rfft_plan plan = RALLOC(rfft_plan_i,1); + if (!plan) return NULL; + plan->blueplan=0; + plan->packplan=0; + if ((length<50) || (largest_prime_factor(length)<=sqrt(length))) + { + plan->packplan=make_rfftp_plan(length); + if (!plan->packplan) { DEALLOC(plan); return NULL; } + return plan; + } + double comp1 = 0.5*cost_guess(length); + double comp2 = 2*cost_guess(good_size(2*length-1)); + comp2*=1.5; /* fudge factor that appears to give good overall performance */ + if (comp2<comp1) // use Bluestein + { + plan->blueplan=make_fftblue_plan(length); + if (!plan->blueplan) { DEALLOC(plan); return NULL; } + } + else + { + plan->packplan=make_rfftp_plan(length); + if (!plan->packplan) { DEALLOC(plan); return NULL; } + } + return plan; + } + +static void destroy_rfft_plan (rfft_plan plan) + { + if (plan->blueplan) + destroy_fftblue_plan(plan->blueplan); + if (plan->packplan) + destroy_rfftp_plan(plan->packplan); + DEALLOC(plan); + } + +WARN_UNUSED_RESULT static int rfft_backward(rfft_plan plan, double c[], double fct) + { + if (plan->packplan) + return rfftp_backward(plan->packplan,c,fct); + else // if (plan->blueplan) + return rfftblue_backward(plan->blueplan,c,fct); + } + +WARN_UNUSED_RESULT static int rfft_forward(rfft_plan plan, double c[], double fct) + { + if (plan->packplan) + return rfftp_forward(plan->packplan,c,fct); + else // if (plan->blueplan) + return rfftblue_forward(plan->blueplan,c,fct); + } + +#define NPY_NO_DEPRECATED_API NPY_API_VERSION + +#include "Python.h" +#include "numpy/arrayobject.h" + +static PyObject * +execute_complex(PyObject *a1, int is_forward, double fct) +{ + PyArrayObject *data = (PyArrayObject *)PyArray_CopyFromObject(a1, NPY_CDOUBLE, 1, 0); + if (!data) return NULL; + + int npts = PyArray_DIM(data, PyArray_NDIM(data) - 1); + cfft_plan plan=NULL; + + int nrepeats = PyArray_SIZE(data)/npts; + double *dptr = (double *)PyArray_DATA(data); + int fail=0; + Py_BEGIN_ALLOW_THREADS; + NPY_SIGINT_ON; + plan = make_cfft_plan(npts); + if (!plan) fail=1; + if (!fail) + for (int i = 0; i < nrepeats; i++) { + int res = is_forward ? + cfft_forward(plan, dptr, fct) : cfft_backward(plan, dptr, fct); + if (res!=0) { fail=1; break; } + dptr += npts*2; + } + if (plan) destroy_cfft_plan(plan); + NPY_SIGINT_OFF; + Py_END_ALLOW_THREADS; + if (fail) { + Py_XDECREF(data); + return PyErr_NoMemory(); + } + return (PyObject *)data; +} + +static PyObject * +execute_real_forward(PyObject *a1, double fct) +{ + rfft_plan plan=NULL; + int fail = 0; + PyArrayObject *data = (PyArrayObject *)PyArray_ContiguousFromObject(a1, + NPY_DOUBLE, 1, 0); + if (!data) return NULL; + + int ndim = PyArray_NDIM(data); + const npy_intp *odim = PyArray_DIMS(data); + int npts = odim[ndim - 1]; + npy_intp *tdim=(npy_intp *)malloc(ndim*sizeof(npy_intp)); + if (!tdim) + { Py_XDECREF(data); return NULL; } + for (int d=0; d<ndim-1; ++d) + tdim[d] = odim[d]; + tdim[ndim-1] = npts/2 + 1; + PyArrayObject *ret = (PyArrayObject *)PyArray_Empty(ndim, + tdim, PyArray_DescrFromType(NPY_CDOUBLE), 0); + free(tdim); + if (!ret) fail=1; + if (!fail) { + int rstep = PyArray_DIM(ret, PyArray_NDIM(ret) - 1)*2; + + int nrepeats = PyArray_SIZE(data)/npts; + double *rptr = (double *)PyArray_DATA(ret), + *dptr = (double *)PyArray_DATA(data); + + Py_BEGIN_ALLOW_THREADS; + NPY_SIGINT_ON; + plan = make_rfft_plan(npts); + if (!plan) fail=1; + if (!fail) + for (int i = 0; i < nrepeats; i++) { + rptr[rstep-1] = 0.0; + memcpy((char *)(rptr+1), dptr, npts*sizeof(double)); + if (rfft_forward(plan, rptr+1, fct)!=0) {fail=1; break;} + rptr[0] = rptr[1]; + rptr[1] = 0.0; + rptr += rstep; + dptr += npts; + } + if (plan) destroy_rfft_plan(plan); + NPY_SIGINT_OFF; + Py_END_ALLOW_THREADS; + } + if (fail) { + Py_XDECREF(data); + Py_XDECREF(ret); + return PyErr_NoMemory(); + } + Py_DECREF(data); + return (PyObject *)ret; +} +static PyObject * +execute_real_backward(PyObject *a1, double fct) +{ + rfft_plan plan=NULL; + PyArrayObject *data = (PyArrayObject *)PyArray_ContiguousFromObject(a1, + NPY_CDOUBLE, 1, 0); + if (!data) return NULL; + int npts = PyArray_DIM(data, PyArray_NDIM(data) - 1); + PyArrayObject *ret = (PyArrayObject *)PyArray_Empty(PyArray_NDIM(data), + PyArray_DIMS(data), PyArray_DescrFromType(NPY_DOUBLE), 0); + int fail = 0; + if (!ret) fail=1; + if (!fail) { + int nrepeats = PyArray_SIZE(ret)/npts; + double *rptr = (double *)PyArray_DATA(ret), + *dptr = (double *)PyArray_DATA(data); + + Py_BEGIN_ALLOW_THREADS; + NPY_SIGINT_ON; + plan = make_rfft_plan(npts); + if (!plan) fail=1; + if (!fail) { + for (int i = 0; i < nrepeats; i++) { + memcpy((char *)(rptr + 1), (dptr + 2), (npts - 1)*sizeof(double)); + rptr[0] = dptr[0]; + if (rfft_backward(plan, rptr, fct)!=0) {fail=1; break;} + rptr += npts; + dptr += npts*2; + } + } + if (plan) destroy_rfft_plan(plan); + NPY_SIGINT_OFF; + Py_END_ALLOW_THREADS; + } + if (fail) { + Py_XDECREF(data); + Py_XDECREF(ret); + return PyErr_NoMemory(); + } + Py_DECREF(data); + return (PyObject *)ret; +} + +static PyObject * +execute_real(PyObject *a1, int is_forward, double fct) +{ + return is_forward ? execute_real_forward(a1, fct) + : execute_real_backward(a1, fct); +} + +static const char execute__doc__[] = ""; + +static PyObject * +execute(PyObject *NPY_UNUSED(self), PyObject *args) +{ + PyObject *a1; + int is_real, is_forward; + double fct; + + if(!PyArg_ParseTuple(args, "Oiid:execute", &a1, &is_real, &is_forward, &fct)) { + return NULL; + } + + return is_real ? execute_real(a1, is_forward, fct) + : execute_complex(a1, is_forward, fct); +} + +/* List of methods defined in the module */ + +static struct PyMethodDef methods[] = { + {"execute", execute, 1, execute__doc__}, + {NULL, NULL, 0, NULL} /* sentinel */ +}; + +#if PY_MAJOR_VERSION >= 3 +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "pocketfft_internal", + NULL, + -1, + methods, + NULL, + NULL, + NULL, + NULL +}; +#endif + +/* Initialization function for the module */ +#if PY_MAJOR_VERSION >= 3 +#define RETVAL(x) x +PyMODINIT_FUNC PyInit_pocketfft_internal(void) +#else +#define RETVAL(x) +PyMODINIT_FUNC +initpocketfft_internal(void) +#endif +{ + PyObject *m; +#if PY_MAJOR_VERSION >= 3 + m = PyModule_Create(&moduledef); +#else + static const char module_documentation[] = ""; + + m = Py_InitModule4("pocketfft_internal", methods, + module_documentation, + (PyObject*)NULL,PYTHON_API_VERSION); +#endif + if (m == NULL) { + return RETVAL(NULL); + } + + /* Import the array object */ + import_array(); + + /* XXXX Add constants here */ + + return RETVAL(m); +} diff --git a/numpy/fft/fftpack.py b/numpy/fft/pocketfft.py index de675936f..794d13937 100644 --- a/numpy/fft/fftpack.py +++ b/numpy/fft/pocketfft.py @@ -26,9 +26,6 @@ n = n-dimensional transform (Note: 2D routines are just nD routines with different default behavior.) -The underlying code for these functions is an f2c-translated and modified -version of the FFTPACK routines. - """ from __future__ import division, absolute_import, print_function @@ -37,26 +34,18 @@ __all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn', import functools -from numpy.core import (array, asarray, zeros, swapaxes, shape, conjugate, - take, sqrt) +from numpy.core import asarray, zeros, swapaxes, conjugate, take, sqrt +from . import pocketfft_internal as pfi from numpy.core.multiarray import normalize_axis_index from numpy.core import overrides -from . import fftpack_lite as fftpack -from .helper import _FFTCache - -_fft_cache = _FFTCache(max_size_in_mb=100, max_item_count=32) -_real_fft_cache = _FFTCache(max_size_in_mb=100, max_item_count=32) array_function_dispatch = functools.partial( overrides.array_function_dispatch, module='numpy.fft') -def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti, - work_function=fftpack.cfftf, fft_cache=_fft_cache): - a = asarray(a) +def _raw_fft(a, n, axis, is_real, is_forward, fct): axis = normalize_axis_index(axis, a.ndim) - if n is None: n = a.shape[axis] @@ -64,15 +53,6 @@ def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti, raise ValueError("Invalid number of FFT data points (%d) specified." % n) - # We have to ensure that only a single thread can access a wsave array - # at any given time. Thus we remove it from the cache and insert it - # again after it has been used. Multiple threads might create multiple - # copies of the wsave array. This is intentional and a limitation of - # the current C code. - wsave = fft_cache.pop_twiddle_factors(n) - if wsave is None: - wsave = init_function(n) - if a.shape[axis] != n: s = list(a.shape) if s[axis] > n: @@ -87,25 +67,22 @@ def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti, z[tuple(index)] = a a = z - if axis != a.ndim - 1: + if axis == a.ndim-1: + r = pfi.execute(a, is_real, is_forward, fct) + else: a = swapaxes(a, axis, -1) - r = work_function(a, wsave) - if axis != a.ndim - 1: + r = pfi.execute(a, is_real, is_forward, fct) r = swapaxes(r, axis, -1) - - # As soon as we put wsave back into the cache, another thread could pick it - # up and start using it, so we must not do this until after we're - # completely done using it ourselves. - fft_cache.put_twiddle_factors(n, wsave) - return r def _unitary(norm): - if norm not in (None, "ortho"): - raise ValueError("Invalid norm value %s, should be None or \"ortho\"." - % norm) - return norm is not None + if norm is None: + return False + if norm=="ortho": + return True + raise ValueError("Invalid norm value %s, should be None or \"ortho\"." + % norm) def _fft_dispatcher(a, n=None, axis=None, norm=None): @@ -177,19 +154,17 @@ def fft(a, n=None, axis=-1, norm=None): Examples -------- >>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8)) - array([ -3.44505240e-16 +1.14383329e-17j, - 8.00000000e+00 -5.71092652e-15j, - 2.33482938e-16 +1.22460635e-16j, - 1.64863782e-15 +1.77635684e-15j, - 9.95839695e-17 +2.33482938e-16j, - 0.00000000e+00 +1.66837030e-15j, - 1.14383329e-17 +1.22460635e-16j, - -1.64863782e-15 +1.77635684e-15j]) + array([-2.33486982e-16+1.14423775e-17j, 8.00000000e+00-1.25557246e-15j, + 2.33486982e-16+2.33486982e-16j, 0.00000000e+00+1.22464680e-16j, + -1.14423775e-17+2.33486982e-16j, 0.00000000e+00+5.20784380e-16j, + 1.14423775e-17+1.14423775e-17j, 0.00000000e+00+1.22464680e-16j]) In this example, real input has an FFT which is Hermitian, i.e., symmetric in the real part and anti-symmetric in the imaginary part, as described in the `numpy.fft` documentation: + >>> import matplotlib + >>> matplotlib.use('Agg') >>> import matplotlib.pyplot as plt >>> t = np.arange(256) >>> sp = np.fft.fft(np.sin(t)) @@ -200,12 +175,13 @@ def fft(a, n=None, axis=-1, norm=None): """ - a = asarray(a).astype(complex, copy=False) + a = asarray(a) if n is None: n = a.shape[axis] - output = _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftf, _fft_cache) - if _unitary(norm): - output *= 1 / sqrt(n) + fct = 1 + if norm is not None and _unitary(norm): + fct = 1 / sqrt(n) + output = _raw_fft(a, n, axis, False, True, fct) return output @@ -278,29 +254,32 @@ def ifft(a, n=None, axis=-1, norm=None): Examples -------- >>> np.fft.ifft([0, 4, 0, 0]) - array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j]) + array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j]) # may vary Create and plot a band-limited signal with random phases: + >>> import matplotlib + >>> matplotlib.use('agg') >>> import matplotlib.pyplot as plt >>> t = np.arange(400) >>> n = np.zeros((400,), dtype=complex) >>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,))) >>> s = np.fft.ifft(n) >>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--') - ... + [<matplotlib.lines.Line2D object at ...>, <matplotlib.lines.Line2D object at ...>] >>> plt.legend(('real', 'imaginary')) - ... + <matplotlib.legend.Legend object at ...> >>> plt.show() """ - # The copy may be required for multithreading. - a = array(a, copy=True, dtype=complex) + a = asarray(a) if n is None: n = a.shape[axis] - unitary = _unitary(norm) - output = _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftb, _fft_cache) - return output * (1 / (sqrt(n) if unitary else n)) + fct = 1/n + if norm is not None and _unitary(norm): + fct = 1/sqrt(n) + output = _raw_fft(a, n, axis, False, False, fct) + return output @@ -374,23 +353,22 @@ def rfft(a, n=None, axis=-1, norm=None): Examples -------- >>> np.fft.fft([0, 1, 0, 0]) - array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j]) + array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j]) # may vary >>> np.fft.rfft([0, 1, 0, 0]) - array([ 1.+0.j, 0.-1.j, -1.+0.j]) + array([ 1.+0.j, 0.-1.j, -1.+0.j]) # may vary Notice how the final element of the `fft` output is the complex conjugate of the second element, for real input. For `rfft`, this symmetry is exploited to compute only the non-negative frequency terms. """ - # The copy may be required for multithreading. - a = array(a, copy=True, dtype=float) - output = _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftf, - _real_fft_cache) - if _unitary(norm): + a = asarray(a) + fct = 1 + if norm is not None and _unitary(norm): if n is None: n = a.shape[axis] - output *= 1 / sqrt(n) + fct = 1/sqrt(n) + output = _raw_fft(a, n, axis, True, True, fct) return output @@ -465,9 +443,9 @@ def irfft(a, n=None, axis=-1, norm=None): Examples -------- >>> np.fft.ifft([1, -1j, -1, 1j]) - array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]) + array([0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]) # may vary >>> np.fft.irfft([1, -1j, -1]) - array([ 0., 1., 0., 0.]) + array([0., 1., 0., 0.]) Notice how the last term in the input to the ordinary `ifft` is the complex conjugate of the second term, and the output has zero imaginary @@ -475,14 +453,14 @@ def irfft(a, n=None, axis=-1, norm=None): specified, and the output array is purely real. """ - # The copy may be required for multithreading. - a = array(a, copy=True, dtype=complex) + a = asarray(a) if n is None: n = (a.shape[axis] - 1) * 2 - unitary = _unitary(norm) - output = _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftb, - _real_fft_cache) - return output * (1 / (sqrt(n) if unitary else n)) + fct = 1/n + if norm is not None and _unitary(norm): + fct = 1/sqrt(n) + output = _raw_fft(a, n, axis, True, False, fct) + return output @array_function_dispatch(_fft_dispatcher) @@ -543,16 +521,16 @@ def hfft(a, n=None, axis=-1, norm=None): -------- >>> signal = np.array([1, 2, 3, 4, 3, 2]) >>> np.fft.fft(signal) - array([ 15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j]) + array([15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j]) # may vary >>> np.fft.hfft(signal[:4]) # Input first half of signal - array([ 15., -4., 0., -1., 0., -4.]) + array([15., -4., 0., -1., 0., -4.]) >>> np.fft.hfft(signal, 6) # Input entire signal and truncate - array([ 15., -4., 0., -1., 0., -4.]) + array([15., -4., 0., -1., 0., -4.]) >>> signal = np.array([[1, 1.j], [-1.j, 2]]) >>> np.conj(signal.T) - signal # check Hermitian symmetry - array([[ 0.-0.j, 0.+0.j], + array([[ 0.-0.j, -0.+0.j], # may vary [ 0.+0.j, 0.-0.j]]) >>> freq_spectrum = np.fft.hfft(signal) >>> freq_spectrum @@ -560,8 +538,7 @@ def hfft(a, n=None, axis=-1, norm=None): [ 2., -2.]]) """ - # The copy may be required for multithreading. - a = array(a, copy=True, dtype=complex) + a = asarray(a) if n is None: n = (a.shape[axis] - 1) * 2 unitary = _unitary(norm) @@ -616,13 +593,12 @@ def ihfft(a, n=None, axis=-1, norm=None): -------- >>> spectrum = np.array([ 15, -4, 0, -1, 0, -4]) >>> np.fft.ifft(spectrum) - array([ 1.+0.j, 2.-0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.-0.j]) + array([1.+0.j, 2.+0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.+0.j]) # may vary >>> np.fft.ihfft(spectrum) - array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j]) + array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j]) # may vary """ - # The copy may be required for multithreading. - a = array(a, copy=True, dtype=float) + a = asarray(a) if n is None: n = a.shape[axis] unitary = _unitary(norm) @@ -732,17 +708,17 @@ def fftn(a, s=None, axes=None, norm=None): -------- >>> a = np.mgrid[:3, :3, :3][0] >>> np.fft.fftn(a, axes=(1, 2)) - array([[[ 0.+0.j, 0.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j, 0.+0.j]], - [[ 9.+0.j, 0.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j, 0.+0.j]], - [[ 18.+0.j, 0.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j, 0.+0.j]]]) + array([[[ 0.+0.j, 0.+0.j, 0.+0.j], # may vary + [ 0.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j]], + [[ 9.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j]], + [[18.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j], + [ 0.+0.j, 0.+0.j, 0.+0.j]]]) >>> np.fft.fftn(a, (2, 2), axes=(0, 1)) - array([[[ 2.+0.j, 2.+0.j, 2.+0.j], + array([[[ 2.+0.j, 2.+0.j, 2.+0.j], # may vary [ 0.+0.j, 0.+0.j, 0.+0.j]], [[-2.+0.j, -2.+0.j, -2.+0.j], [ 0.+0.j, 0.+0.j, 0.+0.j]]]) @@ -838,10 +814,10 @@ def ifftn(a, s=None, axes=None, norm=None): -------- >>> a = np.eye(4) >>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,)) - array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], - [ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]]) + array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary + [0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j], + [0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j], + [0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]]) Create and plot an image with band-limited frequency content: @@ -934,16 +910,16 @@ def fft2(a, s=None, axes=(-2, -1), norm=None): -------- >>> a = np.mgrid[:5, :5][0] >>> np.fft.fft2(a) - array([[ 50.0 +0.j , 0.0 +0.j , 0.0 +0.j , - 0.0 +0.j , 0.0 +0.j ], - [-12.5+17.20477401j, 0.0 +0.j , 0.0 +0.j , - 0.0 +0.j , 0.0 +0.j ], - [-12.5 +4.0614962j , 0.0 +0.j , 0.0 +0.j , - 0.0 +0.j , 0.0 +0.j ], - [-12.5 -4.0614962j , 0.0 +0.j , 0.0 +0.j , - 0.0 +0.j , 0.0 +0.j ], - [-12.5-17.20477401j, 0.0 +0.j , 0.0 +0.j , - 0.0 +0.j , 0.0 +0.j ]]) + array([[ 50. +0.j , 0. +0.j , 0. +0.j , # may vary + 0. +0.j , 0. +0.j ], + [-12.5+17.20477401j, 0. +0.j , 0. +0.j , + 0. +0.j , 0. +0.j ], + [-12.5 +4.0614962j , 0. +0.j , 0. +0.j , + 0. +0.j , 0. +0.j ], + [-12.5 -4.0614962j , 0. +0.j , 0. +0.j , + 0. +0.j , 0. +0.j ], + [-12.5-17.20477401j, 0. +0.j , 0. +0.j , + 0. +0.j , 0. +0.j ]]) """ @@ -1028,10 +1004,10 @@ def ifft2(a, s=None, axes=(-2, -1), norm=None): -------- >>> a = 4 * np.eye(4) >>> np.fft.ifft2(a) - array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j], - [ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j], - [ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]]) + array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary + [0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j], + [0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j], + [0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]]) """ @@ -1110,20 +1086,19 @@ def rfftn(a, s=None, axes=None, norm=None): -------- >>> a = np.ones((2, 2, 2)) >>> np.fft.rfftn(a) - array([[[ 8.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j]], - [[ 0.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j]]]) + array([[[8.+0.j, 0.+0.j], # may vary + [0.+0.j, 0.+0.j]], + [[0.+0.j, 0.+0.j], + [0.+0.j, 0.+0.j]]]) >>> np.fft.rfftn(a, axes=(2, 0)) - array([[[ 4.+0.j, 0.+0.j], - [ 4.+0.j, 0.+0.j]], - [[ 0.+0.j, 0.+0.j], - [ 0.+0.j, 0.+0.j]]]) + array([[[4.+0.j, 0.+0.j], # may vary + [4.+0.j, 0.+0.j]], + [[0.+0.j, 0.+0.j], + [0.+0.j, 0.+0.j]]]) """ - # The copy may be required for multithreading. - a = array(a, copy=True, dtype=float) + a = asarray(a) s, axes = _cook_nd_args(a, s, axes) a = rfft(a, s[-1], axes[-1], norm) for ii in range(len(axes)-1): @@ -1247,16 +1222,15 @@ def irfftn(a, s=None, axes=None, norm=None): >>> a = np.zeros((3, 2, 2)) >>> a[0, 0, 0] = 3 * 2 * 2 >>> np.fft.irfftn(a) - array([[[ 1., 1.], - [ 1., 1.]], - [[ 1., 1.], - [ 1., 1.]], - [[ 1., 1.], - [ 1., 1.]]]) + array([[[1., 1.], + [1., 1.]], + [[1., 1.], + [1., 1.]], + [[1., 1.], + [1., 1.]]]) """ - # The copy may be required for multithreading. - a = array(a, copy=True, dtype=complex) + a = asarray(a) s, axes = _cook_nd_args(a, s, axes, invreal=1) for ii in range(len(axes)-1): a = ifft(a, s[ii], axes[ii], norm) diff --git a/numpy/fft/setup.py b/numpy/fft/setup.py index cd99a82d7..6c3548b65 100644 --- a/numpy/fft/setup.py +++ b/numpy/fft/setup.py @@ -7,9 +7,9 @@ def configuration(parent_package='',top_path=None): config.add_data_dir('tests') - # Configure fftpack_lite - config.add_extension('fftpack_lite', - sources=['fftpack_litemodule.c', 'fftpack.c'] + # Configure pocketfft_internal + config.add_extension('pocketfft_internal', + sources=['pocketfft.c'] ) return config diff --git a/numpy/fft/tests/test_helper.py b/numpy/fft/tests/test_helper.py index 8d315fa02..6613c8002 100644 --- a/numpy/fft/tests/test_helper.py +++ b/numpy/fft/tests/test_helper.py @@ -7,7 +7,6 @@ from __future__ import division, absolute_import, print_function import numpy as np from numpy.testing import assert_array_almost_equal, assert_equal from numpy import fft, pi -from numpy.fft.helper import _FFTCache class TestFFTShift(object): @@ -168,81 +167,3 @@ class TestIRFFTN(object): # Should not raise error fft.irfftn(a, axes=axes) - - -class TestFFTCache(object): - - def test_basic_behaviour(self): - c = _FFTCache(max_size_in_mb=1, max_item_count=4) - - # Put - c.put_twiddle_factors(1, np.ones(2, dtype=np.float32)) - c.put_twiddle_factors(2, np.zeros(2, dtype=np.float32)) - - # Get - assert_array_almost_equal(c.pop_twiddle_factors(1), - np.ones(2, dtype=np.float32)) - assert_array_almost_equal(c.pop_twiddle_factors(2), - np.zeros(2, dtype=np.float32)) - - # Nothing should be left. - assert_equal(len(c._dict), 0) - - # Now put everything in twice so it can be retrieved once and each will - # still have one item left. - for _ in range(2): - c.put_twiddle_factors(1, np.ones(2, dtype=np.float32)) - c.put_twiddle_factors(2, np.zeros(2, dtype=np.float32)) - assert_array_almost_equal(c.pop_twiddle_factors(1), - np.ones(2, dtype=np.float32)) - assert_array_almost_equal(c.pop_twiddle_factors(2), - np.zeros(2, dtype=np.float32)) - assert_equal(len(c._dict), 2) - - def test_automatic_pruning(self): - # That's around 2600 single precision samples. - c = _FFTCache(max_size_in_mb=0.01, max_item_count=4) - - c.put_twiddle_factors(1, np.ones(200, dtype=np.float32)) - c.put_twiddle_factors(2, np.ones(200, dtype=np.float32)) - assert_equal(list(c._dict.keys()), [1, 2]) - - # This is larger than the limit but should still be kept. - c.put_twiddle_factors(3, np.ones(3000, dtype=np.float32)) - assert_equal(list(c._dict.keys()), [1, 2, 3]) - # Add one more. - c.put_twiddle_factors(4, np.ones(3000, dtype=np.float32)) - # The other three should no longer exist. - assert_equal(list(c._dict.keys()), [4]) - - # Now test the max item count pruning. - c = _FFTCache(max_size_in_mb=0.01, max_item_count=2) - c.put_twiddle_factors(2, np.empty(2)) - c.put_twiddle_factors(1, np.empty(2)) - # Can still be accessed. - assert_equal(list(c._dict.keys()), [2, 1]) - - c.put_twiddle_factors(3, np.empty(2)) - # 1 and 3 can still be accessed - c[2] has been touched least recently - # and is thus evicted. - assert_equal(list(c._dict.keys()), [1, 3]) - - # One last test. We will add a single large item that is slightly - # bigger then the cache size. Some small items can still be added. - c = _FFTCache(max_size_in_mb=0.01, max_item_count=5) - c.put_twiddle_factors(1, np.ones(3000, dtype=np.float32)) - c.put_twiddle_factors(2, np.ones(2, dtype=np.float32)) - c.put_twiddle_factors(3, np.ones(2, dtype=np.float32)) - c.put_twiddle_factors(4, np.ones(2, dtype=np.float32)) - assert_equal(list(c._dict.keys()), [1, 2, 3, 4]) - - # One more big item. This time it is 6 smaller ones but they are - # counted as one big item. - for _ in range(6): - c.put_twiddle_factors(5, np.ones(500, dtype=np.float32)) - # '1' no longer in the cache. Rest still in the cache. - assert_equal(list(c._dict.keys()), [2, 3, 4, 5]) - - # Another big item - should now be the only item in the cache. - c.put_twiddle_factors(6, np.ones(4000, dtype=np.float32)) - assert_equal(list(c._dict.keys()), [6]) diff --git a/numpy/fft/tests/test_fftpack.py b/numpy/fft/tests/test_pocketfft.py index 8d6cd8407..0552f6afd 100644 --- a/numpy/fft/tests/test_fftpack.py +++ b/numpy/fft/tests/test_pocketfft.py @@ -28,6 +28,16 @@ class TestFFTShift(object): class TestFFT1D(object): + def test_identity(self): + maxlen = 512 + x = random(maxlen) + 1j*random(maxlen) + xr = random(maxlen) + for i in range(1,maxlen): + assert_array_almost_equal(np.fft.ifft(np.fft.fft(x[0:i])), x[0:i], + decimal=12) + assert_array_almost_equal(np.fft.irfft(np.fft.rfft(xr[0:i]),i), + xr[0:i], decimal=12) + def test_fft(self): x = random(30) + 1j*random(30) assert_array_almost_equal(fft1(x), np.fft.fft(x)) diff --git a/numpy/lib/_datasource.py b/numpy/lib/_datasource.py index 30237b76f..3a0e67f60 100644 --- a/numpy/lib/_datasource.py +++ b/numpy/lib/_datasource.py @@ -20,17 +20,18 @@ gzip, bz2 and xz are supported. Example:: >>> # Create a DataSource, use os.curdir (default) for local storage. - >>> ds = datasource.DataSource() + >>> from numpy import DataSource + >>> ds = DataSource() >>> >>> # Open a remote file. >>> # DataSource downloads the file, stores it locally in: >>> # './www.google.com/index.html' >>> # opens the file and returns a file object. - >>> fp = ds.open('http://www.google.com/index.html') + >>> fp = ds.open('http://www.google.com/') # doctest: +SKIP >>> >>> # Use the file as you normally would - >>> fp.read() - >>> fp.close() + >>> fp.read() # doctest: +SKIP + >>> fp.close() # doctest: +SKIP """ from __future__ import division, absolute_import, print_function @@ -156,6 +157,7 @@ class _FileOpeners(object): Examples -------- + >>> import gzip >>> np.lib._datasource._file_openers.keys() [None, '.bz2', '.gz', '.xz', '.lzma'] >>> np.lib._datasource._file_openers['.gz'] is gzip.open @@ -290,7 +292,7 @@ class DataSource(object): URLs require a scheme string (``http://``) to be used, without it they will fail:: - >>> repos = DataSource() + >>> repos = np.DataSource() >>> repos.exists('www.google.com/index.html') False >>> repos.exists('http://www.google.com/index.html') @@ -302,17 +304,17 @@ class DataSource(object): -------- :: - >>> ds = DataSource('/home/guido') - >>> urlname = 'http://www.google.com/index.html' - >>> gfile = ds.open('http://www.google.com/index.html') # remote file + >>> ds = np.DataSource('/home/guido') + >>> urlname = 'http://www.google.com/' + >>> gfile = ds.open('http://www.google.com/') >>> ds.abspath(urlname) - '/home/guido/www.google.com/site/index.html' + '/home/guido/www.google.com/index.html' - >>> ds = DataSource(None) # use with temporary file + >>> ds = np.DataSource(None) # use with temporary file >>> ds.open('/home/guido/foobar.txt') <open file '/home/guido.foobar.txt', mode 'r' at 0x91d4430> >>> ds.abspath('/home/guido/foobar.txt') - '/tmp/tmpy4pgsP/home/guido/foobar.txt' + '/tmp/.../home/guido/foobar.txt' """ diff --git a/numpy/lib/_iotools.py b/numpy/lib/_iotools.py index 8a042f190..0ebd39b8c 100644 --- a/numpy/lib/_iotools.py +++ b/numpy/lib/_iotools.py @@ -146,11 +146,17 @@ def flatten_dtype(ndtype, flatten_base=False): >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float), ... ('block', int, (2, 3))]) >>> np.lib._iotools.flatten_dtype(dt) - [dtype('|S4'), dtype('float64'), dtype('float64'), dtype('int32')] + [dtype('S4'), dtype('float64'), dtype('float64'), dtype('int64')] >>> np.lib._iotools.flatten_dtype(dt, flatten_base=True) - [dtype('|S4'), dtype('float64'), dtype('float64'), dtype('int32'), - dtype('int32'), dtype('int32'), dtype('int32'), dtype('int32'), - dtype('int32')] + [dtype('S4'), + dtype('float64'), + dtype('float64'), + dtype('int64'), + dtype('int64'), + dtype('int64'), + dtype('int64'), + dtype('int64'), + dtype('int64')] """ names = ndtype.names @@ -309,13 +315,13 @@ class NameValidator(object): -------- >>> validator = np.lib._iotools.NameValidator() >>> validator(['file', 'field2', 'with space', 'CaSe']) - ['file_', 'field2', 'with_space', 'CaSe'] + ('file_', 'field2', 'with_space', 'CaSe') >>> validator = np.lib._iotools.NameValidator(excludelist=['excl'], - deletechars='q', - case_sensitive='False') + ... deletechars='q', + ... case_sensitive=False) >>> validator(['excl', 'field2', 'no_q', 'with space', 'CaSe']) - ['excl_', 'field2', 'no_', 'with_space', 'case'] + ('EXCL', 'FIELD2', 'NO_Q', 'WITH_SPACE', 'CASE') """ # @@ -599,7 +605,7 @@ class StringConverter(object): -------- >>> import dateutil.parser >>> import datetime - >>> dateparser = datetustil.parser.parse + >>> dateparser = dateutil.parser.parse >>> defaultdate = datetime.date(2000, 1, 1) >>> StringConverter.upgrade_mapper(dateparser, default=defaultdate) """ diff --git a/numpy/lib/_version.py b/numpy/lib/_version.py index c3563a7fa..8aa999fc9 100644 --- a/numpy/lib/_version.py +++ b/numpy/lib/_version.py @@ -47,9 +47,12 @@ class NumpyVersion(): >>> from numpy.lib import NumpyVersion >>> if NumpyVersion(np.__version__) < '1.7.0': ... print('skip') - skip + >>> # skip >>> NumpyVersion('1.7') # raises ValueError, add ".0" + Traceback (most recent call last): + ... + ValueError: Not a valid numpy version string """ diff --git a/numpy/lib/arraypad.py b/numpy/lib/arraypad.py index 4f6371058..b236cc449 100644 --- a/numpy/lib/arraypad.py +++ b/numpy/lib/arraypad.py @@ -1100,10 +1100,10 @@ def pad(array, pad_width, mode, **kwargs): -------- >>> a = [1, 2, 3, 4, 5] >>> np.pad(a, (2,3), 'constant', constant_values=(4, 6)) - array([4, 4, 1, 2, 3, 4, 5, 6, 6, 6]) + array([4, 4, 1, ..., 6, 6, 6]) >>> np.pad(a, (2, 3), 'edge') - array([1, 1, 1, 2, 3, 4, 5, 5, 5, 5]) + array([1, 1, 1, ..., 5, 5, 5]) >>> np.pad(a, (2, 3), 'linear_ramp', end_values=(5, -4)) array([ 5, 3, 1, 2, 3, 4, 5, 2, -1, -4]) diff --git a/numpy/lib/arraysetops.py b/numpy/lib/arraysetops.py index fd64ecbd6..558150e48 100644 --- a/numpy/lib/arraysetops.py +++ b/numpy/lib/arraysetops.py @@ -82,7 +82,7 @@ def ediff1d(ary, to_end=None, to_begin=None): array([ 1, 2, 3, -7]) >>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99])) - array([-99, 1, 2, 3, -7, 88, 99]) + array([-99, 1, 2, ..., -7, 88, 99]) The returned array is always 1D. @@ -241,13 +241,11 @@ def unique(ar, return_index=False, return_inverse=False, >>> a = np.array(['a', 'b', 'b', 'c', 'a']) >>> u, indices = np.unique(a, return_index=True) >>> u - array(['a', 'b', 'c'], - dtype='|S1') + array(['a', 'b', 'c'], dtype='<U1') >>> indices array([0, 1, 3]) >>> a[indices] - array(['a', 'b', 'c'], - dtype='|S1') + array(['a', 'b', 'c'], dtype='<U1') Reconstruct the input array from the unique values: @@ -256,9 +254,9 @@ def unique(ar, return_index=False, return_inverse=False, >>> u array([1, 2, 3, 4, 6]) >>> indices - array([0, 1, 4, 3, 1, 2, 1]) + array([0, 1, 4, ..., 1, 2, 1]) >>> u[indices] - array([1, 2, 6, 4, 2, 3, 2]) + array([1, 2, 6, ..., 2, 3, 2]) """ ar = np.asanyarray(ar) @@ -661,8 +659,8 @@ def isin(element, test_elements, assume_unique=False, invert=False): >>> test_elements = [1, 2, 4, 8] >>> mask = np.isin(element, test_elements) >>> mask - array([[ False, True], - [ True, False]]) + array([[False, True], + [ True, False]]) >>> element[mask] array([2, 4]) @@ -676,7 +674,7 @@ def isin(element, test_elements, assume_unique=False, invert=False): >>> mask = np.isin(element, test_elements, invert=True) >>> mask array([[ True, False], - [ False, True]]) + [False, True]]) >>> element[mask] array([0, 6]) @@ -685,14 +683,14 @@ def isin(element, test_elements, assume_unique=False, invert=False): >>> test_set = {1, 2, 4, 8} >>> np.isin(element, test_set) - array([[ False, False], - [ False, False]]) + array([[False, False], + [False, False]]) Casting the set to a list gives the expected result: >>> np.isin(element, list(test_set)) - array([[ False, True], - [ True, False]]) + array([[False, True], + [ True, False]]) """ element = np.asarray(element) return in1d(element, test_elements, assume_unique=assume_unique, diff --git a/numpy/lib/arrayterator.py b/numpy/lib/arrayterator.py index f2d4fe9fd..c16668582 100644 --- a/numpy/lib/arrayterator.py +++ b/numpy/lib/arrayterator.py @@ -80,9 +80,8 @@ class Arrayterator(object): >>> for subarr in a_itor: ... if not subarr.all(): - ... print(subarr, subarr.shape) - ... - [[[[0 1]]]] (1, 1, 1, 2) + ... print(subarr, subarr.shape) # doctest: +SKIP + >>> # [[[[0 1]]]] (1, 1, 1, 2) """ @@ -160,7 +159,7 @@ class Arrayterator(object): ... if not subarr: ... print(subarr, type(subarr)) ... - 0 <type 'numpy.int32'> + 0 <class 'numpy.int64'> """ for block in self: diff --git a/numpy/lib/financial.py b/numpy/lib/financial.py index e1e297492..216687475 100644 --- a/numpy/lib/financial.py +++ b/numpy/lib/financial.py @@ -127,7 +127,7 @@ def fv(rate, nper, pmt, pv, when='end'): >>> a = np.array((0.05, 0.06, 0.07))/12 >>> np.fv(a, 10*12, -100, -100) - array([ 15692.92889434, 16569.87435405, 17509.44688102]) + array([ 15692.92889434, 16569.87435405, 17509.44688102]) # may vary """ when = _convert_when(when) @@ -275,7 +275,7 @@ def nper(rate, pmt, pv, fv=0, when='end'): If you only had $150/month to pay towards the loan, how long would it take to pay-off a loan of $8,000 at 7% annual interest? - >>> print(round(np.nper(0.07/12, -150, 8000), 5)) + >>> print(np.round(np.nper(0.07/12, -150, 8000), 5)) 64.07335 So, over 64 months would be required to pay off the loan. @@ -286,10 +286,10 @@ def nper(rate, pmt, pv, fv=0, when='end'): >>> np.nper(*(np.ogrid[0.07/12: 0.08/12: 0.01/12, ... -150 : -99 : 50 , ... 8000 : 9001 : 1000])) - array([[[ 64.07334877, 74.06368256], - [ 108.07548412, 127.99022654]], - [[ 66.12443902, 76.87897353], - [ 114.70165583, 137.90124779]]]) + array([[[ 64.07334877, 74.06368256], + [108.07548412, 127.99022654]], + [[ 66.12443902, 76.87897353], + [114.70165583, 137.90124779]]]) """ when = _convert_when(when) @@ -539,7 +539,7 @@ def pv(rate, nper, pmt, fv=0, when='end'): >>> a = np.array((0.05, 0.04, 0.03))/12 >>> np.pv(a, 10*12, -100, 15692.93) - array([ -100.00067132, -649.26771385, -1273.78633713]) + array([ -100.00067132, -649.26771385, -1273.78633713]) # may vary So, to end up with the same $15692.93 under the same $100 per month "savings plan," for annual interest rates of 4% and 3%, one would @@ -704,15 +704,15 @@ def irr(values): Examples -------- - >>> round(irr([-100, 39, 59, 55, 20]), 5) + >>> round(np.irr([-100, 39, 59, 55, 20]), 5) 0.28095 - >>> round(irr([-100, 0, 0, 74]), 5) + >>> round(np.irr([-100, 0, 0, 74]), 5) -0.0955 - >>> round(irr([-100, 100, 0, -7]), 5) + >>> round(np.irr([-100, 100, 0, -7]), 5) -0.0833 - >>> round(irr([-100, 100, 0, 7]), 5) + >>> round(np.irr([-100, 100, 0, 7]), 5) 0.06206 - >>> round(irr([-5, 10.5, 1, -8, 1]), 5) + >>> round(np.irr([-5, 10.5, 1, -8, 1]), 5) 0.0886 (Compare with the Example given for numpy.lib.financial.npv) @@ -777,7 +777,7 @@ def npv(rate, values): Examples -------- >>> np.npv(0.281,[-100, 39, 59, 55, 20]) - -0.0084785916384548798 + -0.0084785916384548798 # may vary (Compare with the Example given for numpy.lib.financial.irr) diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index 5f87c8b2c..274f957db 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -218,12 +218,12 @@ def flip(m, axis=None): [2, 3]], [[4, 5], [6, 7]]]) - >>> flip(A, 0) + >>> np.flip(A, 0) array([[[4, 5], [6, 7]], [[0, 1], [2, 3]]]) - >>> flip(A, 1) + >>> np.flip(A, 1) array([[[2, 3], [0, 1]], [[6, 7], @@ -239,7 +239,7 @@ def flip(m, axis=None): [[1, 0], [3, 2]]]) >>> A = np.random.randn(3,4,5) - >>> np.all(flip(A,2) == A[:,:,::-1,...]) + >>> np.all(np.flip(A,2) == A[:,:,::-1,...]) True """ if not hasattr(m, 'ndim'): @@ -359,7 +359,7 @@ def average(a, axis=None, weights=None, returned=False): Examples -------- - >>> data = range(1,5) + >>> data = list(range(1,5)) >>> data [1, 2, 3, 4] >>> np.average(data) @@ -373,11 +373,10 @@ def average(a, axis=None, weights=None, returned=False): [2, 3], [4, 5]]) >>> np.average(data, axis=1, weights=[1./4, 3./4]) - array([ 0.75, 2.75, 4.75]) + array([0.75, 2.75, 4.75]) >>> np.average(data, weights=[1./4, 3./4]) - Traceback (most recent call last): - ... + ... TypeError: Axis must be specified when shapes of a and weights differ. >>> a = np.ones(5, dtype=np.float128) @@ -586,7 +585,7 @@ def piecewise(x, condlist, funclist, *args, **kw): ``x >= 0``. >>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x]) - array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5]) + array([2.5, 1.5, 0.5, 0.5, 1.5, 2.5]) Apply the same function to a scalar value. @@ -671,7 +670,7 @@ def select(condlist, choicelist, default=0): >>> condlist = [x<3, x>5] >>> choicelist = [x, x**2] >>> np.select(condlist, choicelist) - array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81]) + array([ 0, 1, 2, ..., 49, 64, 81]) """ # Check the size of condlist and choicelist are the same, or abort. @@ -854,9 +853,9 @@ def gradient(f, *varargs, **kwargs): -------- >>> f = np.array([1, 2, 4, 7, 11, 16], dtype=float) >>> np.gradient(f) - array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ]) + array([1. , 1.5, 2.5, 3.5, 4.5, 5. ]) >>> np.gradient(f, 2) - array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ]) + array([0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ]) Spacing can be also specified with an array that represents the coordinates of the values F along the dimensions. @@ -864,13 +863,13 @@ def gradient(f, *varargs, **kwargs): >>> x = np.arange(f.size) >>> np.gradient(f, x) - array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ]) + array([1. , 1.5, 2.5, 3.5, 4.5, 5. ]) Or a non uniform one: >>> x = np.array([0., 1., 1.5, 3.5, 4., 6.], dtype=float) >>> np.gradient(f, x) - array([ 1. , 3. , 3.5, 6.7, 6.9, 2.5]) + array([1. , 3. , 3.5, 6.7, 6.9, 2.5]) For two dimensional arrays, the return will be two arrays ordered by axis. In this example the first array stands for the gradient in @@ -878,8 +877,8 @@ def gradient(f, *varargs, **kwargs): >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float)) [array([[ 2., 2., -1.], - [ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ], - [ 1. , 1. , 1. ]])] + [ 2., 2., -1.]]), array([[1. , 2.5, 4. ], + [1. , 1. , 1. ]])] In this example the spacing is also specified: uniform for axis=0 and non uniform for axis=1 @@ -888,17 +887,17 @@ def gradient(f, *varargs, **kwargs): >>> y = [1., 1.5, 3.5] >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float), dx, y) [array([[ 1. , 1. , -0.5], - [ 1. , 1. , -0.5]]), array([[ 2. , 2. , 2. ], - [ 2. , 1.7, 0.5]])] + [ 1. , 1. , -0.5]]), array([[2. , 2. , 2. ], + [2. , 1.7, 0.5]])] It is possible to specify how boundaries are treated using `edge_order` >>> x = np.array([0, 1, 2, 3, 4]) >>> f = x**2 >>> np.gradient(f, edge_order=1) - array([ 1., 2., 4., 6., 7.]) + array([1., 2., 4., 6., 7.]) >>> np.gradient(f, edge_order=2) - array([-0., 2., 4., 6., 8.]) + array([0., 2., 4., 6., 8.]) The `axis` keyword can be used to specify a subset of axes of which the gradient is calculated @@ -1200,7 +1199,7 @@ def diff(a, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue): >>> np.diff(u8_arr) array([255], dtype=uint8) >>> u8_arr[1,...] - u8_arr[0,...] - array(255, np.uint8) + 255 If this is not desirable, then the array should be cast to a larger integer type first: @@ -1340,7 +1339,7 @@ def interp(x, xp, fp, left=None, right=None, period=None): >>> np.interp(2.5, xp, fp) 1.0 >>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp) - array([ 3. , 3. , 2.5 , 0.56, 0. ]) + array([3. , 3. , 2.5 , 0.56, 0. ]) >>> UNDEF = -99.0 >>> np.interp(3.14, xp, fp, right=UNDEF) -99.0 @@ -1364,7 +1363,7 @@ def interp(x, xp, fp, left=None, right=None, period=None): >>> xp = [190, -190, 350, -350] >>> fp = [5, 10, 3, 4] >>> np.interp(x, xp, fp, period=360) - array([7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75]) + array([7.5 , 5. , 8.75, 6.25, 3. , 3.25, 3.5 , 3.75]) Complex interpolation: @@ -1372,7 +1371,7 @@ def interp(x, xp, fp, left=None, right=None, period=None): >>> xp = [2,3,5] >>> fp = [1.0j, 0, 2+3j] >>> np.interp(x, xp, fp) - array([ 0.+1.j , 1.+1.5j]) + array([0.+1.j , 1.+1.5j]) """ @@ -1445,7 +1444,7 @@ def angle(z, deg=False): Examples -------- >>> np.angle([1.0, 1.0j, 1+1j]) # in radians - array([ 0. , 1.57079633, 0.78539816]) + array([ 0. , 1.57079633, 0.78539816]) # may vary >>> np.angle(1+1j, deg=True) # in degrees 45.0 @@ -1505,9 +1504,9 @@ def unwrap(p, discont=pi, axis=-1): >>> phase = np.linspace(0, np.pi, num=5) >>> phase[3:] += np.pi >>> phase - array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531]) + array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531]) # may vary >>> np.unwrap(phase) - array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ]) + array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ]) # may vary """ p = asarray(p) @@ -1547,10 +1546,10 @@ def sort_complex(a): Examples -------- >>> np.sort_complex([5, 3, 6, 2, 1]) - array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j]) + array([1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j]) >>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j]) - array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j]) + array([1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j]) """ b = array(a, copy=True) @@ -1596,7 +1595,7 @@ def trim_zeros(filt, trim='fb'): array([1, 2, 3, 0, 2, 1]) >>> np.trim_zeros(a, 'b') - array([0, 0, 0, 1, 2, 3, 0, 2, 1]) + array([0, 0, 0, ..., 0, 2, 1]) The input data type is preserved, list/tuple in means list/tuple out. @@ -1958,11 +1957,11 @@ class vectorize(object): >>> out = vfunc([1, 2, 3, 4], 2) >>> type(out[0]) - <type 'numpy.int32'> + <class 'numpy.int64'> >>> vfunc = np.vectorize(myfunc, otypes=[float]) >>> out = vfunc([1, 2, 3, 4], 2) >>> type(out[0]) - <type 'numpy.float64'> + <class 'numpy.float64'> The `excluded` argument can be used to prevent vectorizing over certain arguments. This can be useful for array-like arguments of a fixed length @@ -1990,18 +1989,18 @@ class vectorize(object): >>> import scipy.stats >>> pearsonr = np.vectorize(scipy.stats.pearsonr, - ... signature='(n),(n)->(),()') - >>> pearsonr([[0, 1, 2, 3]], [[1, 2, 3, 4], [4, 3, 2, 1]]) + ... signature='(n),(n)->(),()') + >>> pearsonr([[0, 1, 2, 3]], [[1, 2, 3, 4], [4, 3, 2, 1]]) (array([ 1., -1.]), array([ 0., 0.])) Or for a vectorized convolution: >>> convolve = np.vectorize(np.convolve, signature='(n),(m)->(k)') >>> convolve(np.eye(4), [1, 2, 1]) - array([[ 1., 2., 1., 0., 0., 0.], - [ 0., 1., 2., 1., 0., 0.], - [ 0., 0., 1., 2., 1., 0.], - [ 0., 0., 0., 1., 2., 1.]]) + array([[1., 2., 1., 0., 0., 0.], + [0., 1., 2., 1., 0., 0.], + [0., 0., 1., 2., 1., 0.], + [0., 0., 0., 1., 2., 1.]]) See Also -------- @@ -2311,10 +2310,14 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, array `m` and let ``f = fweights`` and ``a = aweights`` for brevity. The steps to compute the weighted covariance are as follows:: + >>> m = np.arange(10, dtype=np.float64) + >>> f = np.arange(10) * 2 + >>> a = np.arange(10) ** 2. + >>> ddof = 9 # N - 1 >>> w = f * a >>> v1 = np.sum(w) >>> v2 = np.sum(w * a) - >>> m -= np.sum(m * w, axis=1, keepdims=True) / v1 + >>> m -= np.sum(m * w, axis=None, keepdims=True) / v1 >>> cov = np.dot(m * w, m.T) * v1 / (v1**2 - ddof * v2) Note that when ``a == 1``, the normalization factor @@ -2346,14 +2349,14 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, >>> x = [-2.1, -1, 4.3] >>> y = [3, 1.1, 0.12] >>> X = np.stack((x, y), axis=0) - >>> print(np.cov(X)) - [[ 11.71 -4.286 ] - [ -4.286 2.14413333]] - >>> print(np.cov(x, y)) - [[ 11.71 -4.286 ] - [ -4.286 2.14413333]] - >>> print(np.cov(x)) - 11.71 + >>> np.cov(X) + array([[11.71 , -4.286 ], # may vary + [-4.286 , 2.144133]]) + >>> np.cov(x, y) + array([[11.71 , -4.286 ], # may vary + [-4.286 , 2.144133]]) + >>> np.cov(x) + array(11.71) """ # Check inputs @@ -2590,12 +2593,14 @@ def blackman(M): Examples -------- + >>> import matplotlib + >>> matplotlib.use('agg') + >>> import matplotlib.pyplot as plt >>> np.blackman(12) - array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01, - 4.14397981e-01, 7.36045180e-01, 9.67046769e-01, - 9.67046769e-01, 7.36045180e-01, 4.14397981e-01, - 1.59903635e-01, 3.26064346e-02, -1.38777878e-17]) - + array([-1.38777878e-17, 3.26064346e-02, 1.59903635e-01, # may vary + 4.14397981e-01, 7.36045180e-01, 9.67046769e-01, + 9.67046769e-01, 7.36045180e-01, 4.14397981e-01, + 1.59903635e-01, 3.26064346e-02, -1.38777878e-17]) Plot the window and the frequency response: @@ -2604,15 +2609,15 @@ def blackman(M): >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Blackman window") - <matplotlib.text.Text object at 0x...> + Text(0.5, 1.0, 'Blackman window') >>> plt.ylabel("Amplitude") - <matplotlib.text.Text object at 0x...> + Text(0, 0.5, 'Amplitude') >>> plt.xlabel("Sample") - <matplotlib.text.Text object at 0x...> + Text(0.5, 0, 'Sample') >>> plt.show() >>> plt.figure() - <matplotlib.figure.Figure object at 0x...> + <Figure size 640x480 with 0 Axes> >>> A = fft(window, 2048) / 25.5 >>> mag = np.abs(fftshift(A)) >>> freq = np.linspace(-0.5, 0.5, len(A)) @@ -2621,13 +2626,12 @@ def blackman(M): >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of Blackman window") - <matplotlib.text.Text object at 0x...> + Text(0.5, 1.0, 'Frequency response of Blackman window') >>> plt.ylabel("Magnitude [dB]") - <matplotlib.text.Text object at 0x...> + Text(0, 0.5, 'Magnitude [dB]') >>> plt.xlabel("Normalized frequency [cycles per sample]") - <matplotlib.text.Text object at 0x...> - >>> plt.axis('tight') - (-0.5, 0.5, -100.0, ...) + Text(0.5, 0, 'Normalized frequency [cycles per sample]') + >>> _ = plt.axis('tight') >>> plt.show() """ @@ -2699,8 +2703,9 @@ def bartlett(M): Examples -------- + >>> import matplotlib.pyplot as plt >>> np.bartlett(12) - array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273, + array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273, # may vary 0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636, 0.18181818, 0. ]) @@ -2711,15 +2716,15 @@ def bartlett(M): >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Bartlett window") - <matplotlib.text.Text object at 0x...> + Text(0.5, 1.0, 'Bartlett window') >>> plt.ylabel("Amplitude") - <matplotlib.text.Text object at 0x...> + Text(0, 0.5, 'Amplitude') >>> plt.xlabel("Sample") - <matplotlib.text.Text object at 0x...> + Text(0.5, 0, 'Sample') >>> plt.show() >>> plt.figure() - <matplotlib.figure.Figure object at 0x...> + <Figure size 640x480 with 0 Axes> >>> A = fft(window, 2048) / 25.5 >>> mag = np.abs(fftshift(A)) >>> freq = np.linspace(-0.5, 0.5, len(A)) @@ -2728,13 +2733,12 @@ def bartlett(M): >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of Bartlett window") - <matplotlib.text.Text object at 0x...> + Text(0.5, 1.0, 'Frequency response of Bartlett window') >>> plt.ylabel("Magnitude [dB]") - <matplotlib.text.Text object at 0x...> + Text(0, 0.5, 'Magnitude [dB]') >>> plt.xlabel("Normalized frequency [cycles per sample]") - <matplotlib.text.Text object at 0x...> - >>> plt.axis('tight') - (-0.5, 0.5, -100.0, ...) + Text(0.5, 0, 'Normalized frequency [cycles per sample]') + >>> _ = plt.axis('tight') >>> plt.show() """ @@ -2801,26 +2805,30 @@ def hanning(M): Examples -------- >>> np.hanning(12) - array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037, - 0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249, - 0.07937323, 0. ]) + array([0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037, + 0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249, + 0.07937323, 0. ]) Plot the window and its frequency response: + >>> import matplotlib + >>> import matplotlib.pyplot + >>> matplotlib.pyplot.switch_backend('agg') + >>> import matplotlib.pyplot as plt >>> from numpy.fft import fft, fftshift >>> window = np.hanning(51) >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Hann window") - <matplotlib.text.Text object at 0x...> + Text(0.5, 1.0, 'Hann window') >>> plt.ylabel("Amplitude") - <matplotlib.text.Text object at 0x...> + Text(0, 0.5, 'Amplitude') >>> plt.xlabel("Sample") - <matplotlib.text.Text object at 0x...> + Text(0.5, 0, 'Sample') >>> plt.show() >>> plt.figure() - <matplotlib.figure.Figure object at 0x...> + <Figure size 640x480 with 0 Axes> >>> A = fft(window, 2048) / 25.5 >>> mag = np.abs(fftshift(A)) >>> freq = np.linspace(-0.5, 0.5, len(A)) @@ -2829,13 +2837,13 @@ def hanning(M): >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of the Hann window") - <matplotlib.text.Text object at 0x...> + Text(0.5, 1.0, 'Frequency response of the Hann window') >>> plt.ylabel("Magnitude [dB]") - <matplotlib.text.Text object at 0x...> + Text(0, 0.5, 'Magnitude [dB]') >>> plt.xlabel("Normalized frequency [cycles per sample]") - <matplotlib.text.Text object at 0x...> + Text(0.5, 0, 'Normalized frequency [cycles per sample]') >>> plt.axis('tight') - (-0.5, 0.5, -100.0, ...) + ... >>> plt.show() """ @@ -2900,26 +2908,30 @@ def hamming(M): Examples -------- >>> np.hamming(12) - array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594, + array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594, # may vary 0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909, 0.15302337, 0.08 ]) Plot the window and the frequency response: + >>> import matplotlib + >>> import matplotlib.pyplot + >>> matplotlib.pyplot.switch_backend('agg') + >>> import matplotlib.pyplot as plt >>> from numpy.fft import fft, fftshift >>> window = np.hamming(51) >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Hamming window") - <matplotlib.text.Text object at 0x...> + Text(0.5, 1.0, 'Hamming window') >>> plt.ylabel("Amplitude") - <matplotlib.text.Text object at 0x...> + Text(0, 0.5, 'Amplitude') >>> plt.xlabel("Sample") - <matplotlib.text.Text object at 0x...> + Text(0.5, 0, 'Sample') >>> plt.show() >>> plt.figure() - <matplotlib.figure.Figure object at 0x...> + <Figure size 640x480 with 0 Axes> >>> A = fft(window, 2048) / 25.5 >>> mag = np.abs(fftshift(A)) >>> freq = np.linspace(-0.5, 0.5, len(A)) @@ -2928,13 +2940,13 @@ def hamming(M): >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of Hamming window") - <matplotlib.text.Text object at 0x...> + Text(0.5, 1.0, 'Frequency response of Hamming window') >>> plt.ylabel("Magnitude [dB]") - <matplotlib.text.Text object at 0x...> + Text(0, 0.5, 'Magnitude [dB]') >>> plt.xlabel("Normalized frequency [cycles per sample]") - <matplotlib.text.Text object at 0x...> + Text(0.5, 0, 'Normalized frequency [cycles per sample]') >>> plt.axis('tight') - (-0.5, 0.5, -100.0, ...) + ... >>> plt.show() """ @@ -3083,9 +3095,9 @@ def i0(x): Examples -------- >>> np.i0([0.]) - array(1.0) + array(1.0) # may vary >>> np.i0([0., 1. + 2j]) - array([ 1.00000000+0.j , 0.18785373+0.64616944j]) + array([ 1.00000000+0.j , 0.18785373+0.64616944j]) # may vary """ x = atleast_1d(x).copy() @@ -3180,11 +3192,14 @@ def kaiser(M, beta): Examples -------- + >>> import matplotlib + >>> matplotlib.use('agg') + >>> import matplotlib.pyplot as plt >>> np.kaiser(12, 14) - array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02, - 2.29737120e-01, 5.99885316e-01, 9.45674898e-01, - 9.45674898e-01, 5.99885316e-01, 2.29737120e-01, - 4.65200189e-02, 3.46009194e-03, 7.72686684e-06]) + array([7.72686684e-06, 3.46009194e-03, 4.65200189e-02, # may vary + 2.29737120e-01, 5.99885316e-01, 9.45674898e-01, + 9.45674898e-01, 5.99885316e-01, 2.29737120e-01, + 4.65200189e-02, 3.46009194e-03, 7.72686684e-06]) Plot the window and the frequency response: @@ -3194,15 +3209,15 @@ def kaiser(M, beta): >>> plt.plot(window) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Kaiser window") - <matplotlib.text.Text object at 0x...> + Text(0.5, 1.0, 'Kaiser window') >>> plt.ylabel("Amplitude") - <matplotlib.text.Text object at 0x...> + Text(0, 0.5, 'Amplitude') >>> plt.xlabel("Sample") - <matplotlib.text.Text object at 0x...> + Text(0.5, 0, 'Sample') >>> plt.show() >>> plt.figure() - <matplotlib.figure.Figure object at 0x...> + <Figure size 640x480 with 0 Axes> >>> A = fft(window, 2048) / 25.5 >>> mag = np.abs(fftshift(A)) >>> freq = np.linspace(-0.5, 0.5, len(A)) @@ -3211,13 +3226,13 @@ def kaiser(M, beta): >>> plt.plot(freq, response) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Frequency response of Kaiser window") - <matplotlib.text.Text object at 0x...> + Text(0.5, 1.0, 'Frequency response of Kaiser window') >>> plt.ylabel("Magnitude [dB]") - <matplotlib.text.Text object at 0x...> + Text(0, 0.5, 'Magnitude [dB]') >>> plt.xlabel("Normalized frequency [cycles per sample]") - <matplotlib.text.Text object at 0x...> + Text(0.5, 0, 'Normalized frequency [cycles per sample]') >>> plt.axis('tight') - (-0.5, 0.5, -100.0, ...) + (-0.5, 0.5, -100.0, ...) # may vary >>> plt.show() """ @@ -3273,31 +3288,33 @@ def sinc(x): Examples -------- + >>> import matplotlib + >>> import matplotlib.pyplot as plt >>> x = np.linspace(-4, 4, 41) >>> np.sinc(x) - array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02, + array([-3.89804309e-17, -4.92362781e-02, -8.40918587e-02, # may vary -8.90384387e-02, -5.84680802e-02, 3.89804309e-17, - 6.68206631e-02, 1.16434881e-01, 1.26137788e-01, - 8.50444803e-02, -3.89804309e-17, -1.03943254e-01, + 6.68206631e-02, 1.16434881e-01, 1.26137788e-01, + 8.50444803e-02, -3.89804309e-17, -1.03943254e-01, -1.89206682e-01, -2.16236208e-01, -1.55914881e-01, - 3.89804309e-17, 2.33872321e-01, 5.04551152e-01, - 7.56826729e-01, 9.35489284e-01, 1.00000000e+00, - 9.35489284e-01, 7.56826729e-01, 5.04551152e-01, - 2.33872321e-01, 3.89804309e-17, -1.55914881e-01, - -2.16236208e-01, -1.89206682e-01, -1.03943254e-01, - -3.89804309e-17, 8.50444803e-02, 1.26137788e-01, - 1.16434881e-01, 6.68206631e-02, 3.89804309e-17, + 3.89804309e-17, 2.33872321e-01, 5.04551152e-01, + 7.56826729e-01, 9.35489284e-01, 1.00000000e+00, + 9.35489284e-01, 7.56826729e-01, 5.04551152e-01, + 2.33872321e-01, 3.89804309e-17, -1.55914881e-01, + -2.16236208e-01, -1.89206682e-01, -1.03943254e-01, + -3.89804309e-17, 8.50444803e-02, 1.26137788e-01, + 1.16434881e-01, 6.68206631e-02, 3.89804309e-17, -5.84680802e-02, -8.90384387e-02, -8.40918587e-02, -4.92362781e-02, -3.89804309e-17]) >>> plt.plot(x, np.sinc(x)) [<matplotlib.lines.Line2D object at 0x...>] >>> plt.title("Sinc Function") - <matplotlib.text.Text object at 0x...> + Text(0.5, 1.0, 'Sinc Function') >>> plt.ylabel("Amplitude") - <matplotlib.text.Text object at 0x...> + Text(0, 0.5, 'Amplitude') >>> plt.xlabel("X") - <matplotlib.text.Text object at 0x...> + Text(0.5, 0, 'X') >>> plt.show() It works in 2-D as well: @@ -3469,18 +3486,18 @@ def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): >>> np.median(a) 3.5 >>> np.median(a, axis=0) - array([ 6.5, 4.5, 2.5]) + array([6.5, 4.5, 2.5]) >>> np.median(a, axis=1) - array([ 7., 2.]) + array([7., 2.]) >>> m = np.median(a, axis=0) >>> out = np.zeros_like(m) >>> np.median(a, axis=0, out=m) - array([ 6.5, 4.5, 2.5]) + array([6.5, 4.5, 2.5]) >>> m - array([ 6.5, 4.5, 2.5]) + array([6.5, 4.5, 2.5]) >>> b = a.copy() >>> np.median(b, axis=1, overwrite_input=True) - array([ 7., 2.]) + array([7., 2.]) >>> assert not np.all(a==b) >>> b = a.copy() >>> np.median(b, axis=None, overwrite_input=True) @@ -3647,23 +3664,23 @@ def percentile(a, q, axis=None, out=None, >>> np.percentile(a, 50) 3.5 >>> np.percentile(a, 50, axis=0) - array([[ 6.5, 4.5, 2.5]]) + array([6.5, 4.5, 2.5]) >>> np.percentile(a, 50, axis=1) - array([ 7., 2.]) + array([7., 2.]) >>> np.percentile(a, 50, axis=1, keepdims=True) - array([[ 7.], - [ 2.]]) + array([[7.], + [2.]]) >>> m = np.percentile(a, 50, axis=0) >>> out = np.zeros_like(m) >>> np.percentile(a, 50, axis=0, out=out) - array([[ 6.5, 4.5, 2.5]]) + array([6.5, 4.5, 2.5]) >>> m - array([[ 6.5, 4.5, 2.5]]) + array([6.5, 4.5, 2.5]) >>> b = a.copy() >>> np.percentile(b, 50, axis=1, overwrite_input=True) - array([ 7., 2.]) + array([7., 2.]) >>> assert not np.all(a == b) The different types of interpolation can be visualized graphically: @@ -3789,21 +3806,21 @@ def quantile(a, q, axis=None, out=None, >>> np.quantile(a, 0.5) 3.5 >>> np.quantile(a, 0.5, axis=0) - array([[ 6.5, 4.5, 2.5]]) + array([6.5, 4.5, 2.5]) >>> np.quantile(a, 0.5, axis=1) - array([ 7., 2.]) + array([7., 2.]) >>> np.quantile(a, 0.5, axis=1, keepdims=True) - array([[ 7.], - [ 2.]]) + array([[7.], + [2.]]) >>> m = np.quantile(a, 0.5, axis=0) >>> out = np.zeros_like(m) >>> np.quantile(a, 0.5, axis=0, out=out) - array([[ 6.5, 4.5, 2.5]]) + array([6.5, 4.5, 2.5]) >>> m - array([[ 6.5, 4.5, 2.5]]) + array([6.5, 4.5, 2.5]) >>> b = a.copy() >>> np.quantile(b, 0.5, axis=1, overwrite_input=True) - array([ 7., 2.]) + array([7., 2.]) >>> assert not np.all(a == b) """ q = np.asanyarray(q) @@ -4032,9 +4049,9 @@ def trapz(y, x=None, dx=1.0, axis=-1): array([[0, 1, 2], [3, 4, 5]]) >>> np.trapz(a, axis=0) - array([ 1.5, 2.5, 3.5]) + array([1.5, 2.5, 3.5]) >>> np.trapz(a, axis=1) - array([ 2., 8.]) + array([2., 8.]) """ y = asanyarray(y) @@ -4152,17 +4169,17 @@ def meshgrid(*xi, **kwargs): >>> y = np.linspace(0, 1, ny) >>> xv, yv = np.meshgrid(x, y) >>> xv - array([[ 0. , 0.5, 1. ], - [ 0. , 0.5, 1. ]]) + array([[0. , 0.5, 1. ], + [0. , 0.5, 1. ]]) >>> yv - array([[ 0., 0., 0.], - [ 1., 1., 1.]]) + array([[0., 0., 0.], + [1., 1., 1.]]) >>> xv, yv = np.meshgrid(x, y, sparse=True) # make sparse output arrays >>> xv - array([[ 0. , 0.5, 1. ]]) + array([[0. , 0.5, 1. ]]) >>> yv - array([[ 0.], - [ 1.]]) + array([[0.], + [1.]]) `meshgrid` is very useful to evaluate functions on a grid. @@ -4224,7 +4241,7 @@ def delete(arr, obj, axis=None): arr : array_like Input array. obj : slice, int or array of ints - Indicate which sub-arrays to remove. + Indicate indices of sub-arrays to remove along the specified axis. axis : int, optional The axis along which to delete the subarray defined by `obj`. If `axis` is None, `obj` is applied to the flattened array. @@ -4245,6 +4262,7 @@ def delete(arr, obj, axis=None): ----- Often it is preferable to use a boolean mask. For example: + >>> arr = np.arange(12) + 1 >>> mask = np.ones(len(arr), dtype=bool) >>> mask[[0,2,4]] = False >>> result = arr[mask,...] @@ -4476,7 +4494,7 @@ def insert(arr, obj, values, axis=None): [2, 2], [3, 3]]) >>> np.insert(a, 1, 5) - array([1, 5, 1, 2, 2, 3, 3]) + array([1, 5, 1, ..., 2, 3, 3]) >>> np.insert(a, 1, 5, axis=1) array([[1, 5, 1], [2, 5, 2], @@ -4496,13 +4514,13 @@ def insert(arr, obj, values, axis=None): >>> b array([1, 1, 2, 2, 3, 3]) >>> np.insert(b, [2, 2], [5, 6]) - array([1, 1, 5, 6, 2, 2, 3, 3]) + array([1, 1, 5, ..., 2, 3, 3]) >>> np.insert(b, slice(2, 4), [5, 6]) - array([1, 1, 5, 2, 6, 2, 3, 3]) + array([1, 1, 5, ..., 2, 3, 3]) >>> np.insert(b, [2, 2], [7.13, False]) # type casting - array([1, 1, 7, 0, 2, 2, 3, 3]) + array([1, 1, 7, ..., 2, 3, 3]) >>> x = np.arange(8).reshape(2, 4) >>> idx = (1, 3) @@ -4666,7 +4684,7 @@ def append(arr, values, axis=None): Examples -------- >>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]]) - array([1, 2, 3, 4, 5, 6, 7, 8, 9]) + array([1, 2, 3, ..., 7, 8, 9]) When `axis` is specified, `values` must have the correct shape. @@ -4676,8 +4694,8 @@ def append(arr, values, axis=None): [7, 8, 9]]) >>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0) Traceback (most recent call last): - ... - ValueError: arrays must have same number of dimensions + ... + ValueError: all the input arrays must have same number of dimensions """ arr = asanyarray(arr) diff --git a/numpy/lib/histograms.py b/numpy/lib/histograms.py index 482eabe14..7b229cc89 100644 --- a/numpy/lib/histograms.py +++ b/numpy/lib/histograms.py @@ -645,7 +645,7 @@ def histogram_bin_edges(a, bins=10, range=None, weights=None): >>> hist_0, bins_0 = np.histogram(arr[group_id == 0], bins='auto') >>> hist_1, bins_1 = np.histogram(arr[group_id == 1], bins='auto') - >>> hist_0; hist1 + >>> hist_0; hist_1 array([1, 1, 1]) array([2, 1, 1, 2]) >>> bins_0; bins_1 @@ -748,14 +748,14 @@ def histogram(a, bins=10, range=None, normed=None, weights=None, >>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3]) (array([0, 2, 1]), array([0, 1, 2, 3])) >>> np.histogram(np.arange(4), bins=np.arange(5), density=True) - (array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4])) + (array([0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4])) >>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3]) (array([1, 4, 1]), array([0, 1, 2, 3])) >>> a = np.arange(5) >>> hist, bin_edges = np.histogram(a, density=True) >>> hist - array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5]) + array([0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5]) >>> hist.sum() 2.4999999999999996 >>> np.sum(hist * np.diff(bin_edges)) @@ -770,8 +770,9 @@ def histogram(a, bins=10, range=None, normed=None, weights=None, >>> rng = np.random.RandomState(10) # deterministic random data >>> a = np.hstack((rng.normal(size=1000), ... rng.normal(loc=5, scale=2, size=1000))) - >>> plt.hist(a, bins='auto') # arguments are passed to np.histogram + >>> _ = plt.hist(a, bins='auto') # arguments are passed to np.histogram >>> plt.title("Histogram with 'auto' bins") + Text(0.5, 1.0, "Histogram with 'auto' bins") >>> plt.show() """ diff --git a/numpy/lib/index_tricks.py b/numpy/lib/index_tricks.py index 56abe293a..64c491cfa 100644 --- a/numpy/lib/index_tricks.py +++ b/numpy/lib/index_tricks.py @@ -478,7 +478,7 @@ class RClass(AxisConcatenator): Examples -------- >>> np.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])] - array([1, 2, 3, 0, 0, 4, 5, 6]) + array([1, 2, 3, ..., 4, 5, 6]) >>> np.r_[-1:1:6j, [0]*3, 5, 6] array([-1. , -0.6, -0.2, 0.2, 0.6, 1. , 0. , 0. , 0. , 5. , 6. ]) @@ -538,7 +538,7 @@ class CClass(AxisConcatenator): [2, 5], [3, 6]]) >>> np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])] - array([[1, 2, 3, 0, 0, 4, 5, 6]]) + array([[1, 2, 3, ..., 4, 5, 6]]) """ @@ -813,7 +813,7 @@ def fill_diagonal(a, val, wrap=False): >>> # tall matrices no wrap >>> a = np.zeros((5, 3),int) - >>> fill_diagonal(a, 4) + >>> np.fill_diagonal(a, 4) >>> a array([[4, 0, 0], [0, 4, 0], @@ -823,7 +823,7 @@ def fill_diagonal(a, val, wrap=False): >>> # tall matrices wrap >>> a = np.zeros((5, 3),int) - >>> fill_diagonal(a, 4, wrap=True) + >>> np.fill_diagonal(a, 4, wrap=True) >>> a array([[4, 0, 0], [0, 4, 0], @@ -833,7 +833,7 @@ def fill_diagonal(a, val, wrap=False): >>> # wide matrices >>> a = np.zeros((3, 5),int) - >>> fill_diagonal(a, 4, wrap=True) + >>> np.fill_diagonal(a, 4, wrap=True) >>> a array([[4, 0, 0, 0, 0], [0, 4, 0, 0, 0], diff --git a/numpy/lib/nanfunctions.py b/numpy/lib/nanfunctions.py index d73d84467..b3bf1880b 100644 --- a/numpy/lib/nanfunctions.py +++ b/numpy/lib/nanfunctions.py @@ -271,9 +271,9 @@ def nanmin(a, axis=None, out=None, keepdims=np._NoValue): >>> np.nanmin(a) 1.0 >>> np.nanmin(a, axis=0) - array([ 1., 2.]) + array([1., 2.]) >>> np.nanmin(a, axis=1) - array([ 1., 3.]) + array([1., 3.]) When positive infinity and negative infinity are present: @@ -384,9 +384,9 @@ def nanmax(a, axis=None, out=None, keepdims=np._NoValue): >>> np.nanmax(a) 3.0 >>> np.nanmax(a, axis=0) - array([ 3., 2.]) + array([3., 2.]) >>> np.nanmax(a, axis=1) - array([ 2., 3.]) + array([2., 3.]) When positive infinity and negative infinity are present: @@ -601,12 +601,15 @@ def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): >>> np.nansum(a) 3.0 >>> np.nansum(a, axis=0) - array([ 2., 1.]) + array([2., 1.]) >>> np.nansum([1, np.nan, np.inf]) inf >>> np.nansum([1, np.nan, np.NINF]) -inf - >>> np.nansum([1, np.nan, np.inf, -np.inf]) # both +/- infinity present + >>> from numpy.testing import suppress_warnings + >>> with suppress_warnings() as sup: + ... sup.filter(RuntimeWarning) + ... np.nansum([1, np.nan, np.inf, -np.inf]) # both +/- infinity present nan """ @@ -677,7 +680,7 @@ def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): >>> np.nanprod(a) 6.0 >>> np.nanprod(a, axis=0) - array([ 3., 2.]) + array([3., 2.]) """ a, mask = _replace_nan(a, 1) @@ -738,16 +741,16 @@ def nancumsum(a, axis=None, dtype=None, out=None): >>> np.nancumsum([1]) array([1]) >>> np.nancumsum([1, np.nan]) - array([ 1., 1.]) + array([1., 1.]) >>> a = np.array([[1, 2], [3, np.nan]]) >>> np.nancumsum(a) - array([ 1., 3., 6., 6.]) + array([1., 3., 6., 6.]) >>> np.nancumsum(a, axis=0) - array([[ 1., 2.], - [ 4., 2.]]) + array([[1., 2.], + [4., 2.]]) >>> np.nancumsum(a, axis=1) - array([[ 1., 3.], - [ 3., 3.]]) + array([[1., 3.], + [3., 3.]]) """ a, mask = _replace_nan(a, 0) @@ -805,16 +808,16 @@ def nancumprod(a, axis=None, dtype=None, out=None): >>> np.nancumprod([1]) array([1]) >>> np.nancumprod([1, np.nan]) - array([ 1., 1.]) + array([1., 1.]) >>> a = np.array([[1, 2], [3, np.nan]]) >>> np.nancumprod(a) - array([ 1., 2., 6., 6.]) + array([1., 2., 6., 6.]) >>> np.nancumprod(a, axis=0) - array([[ 1., 2.], - [ 3., 2.]]) + array([[1., 2.], + [3., 2.]]) >>> np.nancumprod(a, axis=1) - array([[ 1., 2.], - [ 3., 3.]]) + array([[1., 2.], + [3., 3.]]) """ a, mask = _replace_nan(a, 1) @@ -895,9 +898,9 @@ def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue): >>> np.nanmean(a) 2.6666666666666665 >>> np.nanmean(a, axis=0) - array([ 2., 4.]) + array([2., 4.]) >>> np.nanmean(a, axis=1) - array([ 1., 3.5]) + array([1., 3.5]) # may vary """ arr, mask = _replace_nan(a, 0) @@ -1049,19 +1052,19 @@ def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=np._NoValu >>> a = np.array([[10.0, 7, 4], [3, 2, 1]]) >>> a[0, 1] = np.nan >>> a - array([[ 10., nan, 4.], - [ 3., 2., 1.]]) + array([[10., nan, 4.], + [ 3., 2., 1.]]) >>> np.median(a) nan >>> np.nanmedian(a) 3.0 >>> np.nanmedian(a, axis=0) - array([ 6.5, 2., 2.5]) + array([6.5, 2. , 2.5]) >>> np.median(a, axis=1) - array([ 7., 2.]) + array([nan, 2.]) >>> b = a.copy() >>> np.nanmedian(b, axis=1, overwrite_input=True) - array([ 7., 2.]) + array([7., 2.]) >>> assert not np.all(a==b) >>> b = a.copy() >>> np.nanmedian(b, axis=None, overwrite_input=True) @@ -1177,27 +1180,27 @@ def nanpercentile(a, q, axis=None, out=None, overwrite_input=False, >>> a = np.array([[10., 7., 4.], [3., 2., 1.]]) >>> a[0][1] = np.nan >>> a - array([[ 10., nan, 4.], - [ 3., 2., 1.]]) + array([[10., nan, 4.], + [ 3., 2., 1.]]) >>> np.percentile(a, 50) nan >>> np.nanpercentile(a, 50) - 3.5 + 3.0 >>> np.nanpercentile(a, 50, axis=0) - array([ 6.5, 2., 2.5]) + array([6.5, 2. , 2.5]) >>> np.nanpercentile(a, 50, axis=1, keepdims=True) - array([[ 7.], - [ 2.]]) + array([[7.], + [2.]]) >>> m = np.nanpercentile(a, 50, axis=0) >>> out = np.zeros_like(m) >>> np.nanpercentile(a, 50, axis=0, out=out) - array([ 6.5, 2., 2.5]) + array([6.5, 2. , 2.5]) >>> m - array([ 6.5, 2. , 2.5]) + array([6.5, 2. , 2.5]) >>> b = a.copy() >>> np.nanpercentile(b, 50, axis=1, overwrite_input=True) - array([ 7., 2.]) + array([7., 2.]) >>> assert not np.all(a==b) """ @@ -1291,26 +1294,26 @@ def nanquantile(a, q, axis=None, out=None, overwrite_input=False, >>> a = np.array([[10., 7., 4.], [3., 2., 1.]]) >>> a[0][1] = np.nan >>> a - array([[ 10., nan, 4.], - [ 3., 2., 1.]]) + array([[10., nan, 4.], + [ 3., 2., 1.]]) >>> np.quantile(a, 0.5) nan >>> np.nanquantile(a, 0.5) - 3.5 + 3.0 >>> np.nanquantile(a, 0.5, axis=0) - array([ 6.5, 2., 2.5]) + array([6.5, 2. , 2.5]) >>> np.nanquantile(a, 0.5, axis=1, keepdims=True) - array([[ 7.], - [ 2.]]) + array([[7.], + [2.]]) >>> m = np.nanquantile(a, 0.5, axis=0) >>> out = np.zeros_like(m) >>> np.nanquantile(a, 0.5, axis=0, out=out) - array([ 6.5, 2., 2.5]) + array([6.5, 2. , 2.5]) >>> m - array([ 6.5, 2. , 2.5]) + array([6.5, 2. , 2.5]) >>> b = a.copy() >>> np.nanquantile(b, 0.5, axis=1, overwrite_input=True) - array([ 7., 2.]) + array([7., 2.]) >>> assert not np.all(a==b) """ a = np.asanyarray(a) @@ -1465,12 +1468,12 @@ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): Examples -------- >>> a = np.array([[1, np.nan], [3, 4]]) - >>> np.var(a) + >>> np.nanvar(a) 1.5555555555555554 >>> np.nanvar(a, axis=0) - array([ 1., 0.]) + array([1., 0.]) >>> np.nanvar(a, axis=1) - array([ 0., 0.25]) + array([0., 0.25]) # may vary """ arr, mask = _replace_nan(a, 0) @@ -1619,9 +1622,9 @@ def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue): >>> np.nanstd(a) 1.247219128924647 >>> np.nanstd(a, axis=0) - array([ 1., 0.]) + array([1., 0.]) >>> np.nanstd(a, axis=1) - array([ 0., 0.5]) + array([0., 0.5]) # may vary """ var = nanvar(a, axis=axis, dtype=dtype, out=out, ddof=ddof, diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py index db6a8e5eb..704fea108 100644 --- a/numpy/lib/npyio.py +++ b/numpy/lib/npyio.py @@ -168,13 +168,13 @@ class NpzFile(Mapping): >>> x = np.arange(10) >>> y = np.sin(x) >>> np.savez(outfile, x=x, y=y) - >>> outfile.seek(0) + >>> _ = outfile.seek(0) >>> npz = np.load(outfile) >>> isinstance(npz, np.lib.io.NpzFile) True - >>> npz.files - ['y', 'x'] + >>> sorted(npz.files) + ['x', 'y'] >>> npz['x'] # getitem access array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) >>> npz.f.x # attribute lookup @@ -502,7 +502,7 @@ def save(file, arr, allow_pickle=True, fix_imports=True): >>> x = np.arange(10) >>> np.save(outfile, x) - >>> outfile.seek(0) # Only needed here to simulate closing & reopening file + >>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file >>> np.load(outfile) array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) @@ -597,10 +597,10 @@ def savez(file, *args, **kwds): Using `savez` with \\*args, the arrays are saved with default names. >>> np.savez(outfile, x, y) - >>> outfile.seek(0) # Only needed here to simulate closing & reopening file + >>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file >>> npzfile = np.load(outfile) >>> npzfile.files - ['arr_1', 'arr_0'] + ['arr_0', 'arr_1'] >>> npzfile['arr_0'] array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) @@ -608,10 +608,10 @@ def savez(file, *args, **kwds): >>> outfile = TemporaryFile() >>> np.savez(outfile, x=x, y=y) - >>> outfile.seek(0) + >>> _ = outfile.seek(0) >>> npzfile = np.load(outfile) - >>> npzfile.files - ['y', 'x'] + >>> sorted(npzfile.files) + ['x', 'y'] >>> npzfile['x'] array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) @@ -829,7 +829,7 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None, `genfromtxt`): ``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None. skiprows : int, optional - Skip the first `skiprows` lines; default: 0. + Skip the first `skiprows` lines, including comments; default: 0. usecols : int or sequence, optional Which columns to read, with 0 being the first. For example, ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns. @@ -891,21 +891,21 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None, >>> from io import StringIO # StringIO behaves like a file object >>> c = StringIO(u"0 1\\n2 3") >>> np.loadtxt(c) - array([[ 0., 1.], - [ 2., 3.]]) + array([[0., 1.], + [2., 3.]]) >>> d = StringIO(u"M 21 72\\nF 35 58") >>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'), ... 'formats': ('S1', 'i4', 'f4')}) - array([('M', 21, 72.0), ('F', 35, 58.0)], - dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')]) + array([(b'M', 21, 72.), (b'F', 35, 58.)], + dtype=[('gender', 'S1'), ('age', '<i4'), ('weight', '<f4')]) >>> c = StringIO(u"1,0,2\\n3,0,4") >>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True) >>> x - array([ 1., 3.]) + array([1., 3.]) >>> y - array([ 2., 4.]) + array([2., 4.]) """ # Type conversions for Py3 convenience @@ -1481,17 +1481,17 @@ def fromregex(file, regexp, dtype, encoding=None): Examples -------- >>> f = open('test.dat', 'w') - >>> f.write("1312 foo\\n1534 bar\\n444 qux") + >>> _ = f.write("1312 foo\\n1534 bar\\n444 qux") >>> f.close() >>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything] >>> output = np.fromregex('test.dat', regexp, ... [('num', np.int64), ('key', 'S3')]) >>> output - array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')], - dtype=[('num', '<i8'), ('key', '|S3')]) + array([(1312, b'foo'), (1534, b'bar'), ( 444, b'qux')], + dtype=[('num', '<i8'), ('key', 'S3')]) >>> output['num'] - array([1312, 1534, 444], dtype=int64) + array([1312, 1534, 444]) """ own_fh = False @@ -1674,26 +1674,26 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, >>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'), ... ('mystring','S5')], delimiter=",") >>> data - array((1, 1.3, 'abcde'), - dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')]) + array((1, 1.3, b'abcde'), + dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')]) Using dtype = None - >>> s.seek(0) # needed for StringIO example only + >>> _ = s.seek(0) # needed for StringIO example only >>> data = np.genfromtxt(s, dtype=None, ... names = ['myint','myfloat','mystring'], delimiter=",") >>> data - array((1, 1.3, 'abcde'), - dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')]) + array((1, 1.3, b'abcde'), + dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')]) Specifying dtype and names - >>> s.seek(0) + >>> _ = s.seek(0) >>> data = np.genfromtxt(s, dtype="i8,f8,S5", ... names=['myint','myfloat','mystring'], delimiter=",") >>> data - array((1, 1.3, 'abcde'), - dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')]) + array((1, 1.3, b'abcde'), + dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')]) An example with fixed-width columns @@ -1701,8 +1701,8 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, >>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'], ... delimiter=[1,3,5]) >>> data - array((1, 1.3, 'abcde'), - dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')]) + array((1, 1.3, b'abcde'), + dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', 'S5')]) """ if max_rows is not None: diff --git a/numpy/lib/polynomial.py b/numpy/lib/polynomial.py index e3defdca2..7904092ed 100644 --- a/numpy/lib/polynomial.py +++ b/numpy/lib/polynomial.py @@ -110,7 +110,7 @@ def poly(seq_of_zeros): Given a sequence of a polynomial's zeros: >>> np.poly((0, 0, 0)) # Multiple root example - array([1, 0, 0, 0]) + array([1., 0., 0., 0.]) The line above represents z**3 + 0*z**2 + 0*z + 0. @@ -119,14 +119,14 @@ def poly(seq_of_zeros): The line above represents z**3 - z/4 - >>> np.poly((np.random.random(1.)[0], 0, np.random.random(1.)[0])) - array([ 1. , -0.77086955, 0.08618131, 0. ]) #random + >>> np.poly((np.random.random(1)[0], 0, np.random.random(1)[0])) + array([ 1. , -0.77086955, 0.08618131, 0. ]) # random Given a square array object: >>> P = np.array([[0, 1./3], [-1./2, 0]]) >>> np.poly(P) - array([ 1. , 0. , 0.16666667]) + array([1. , 0. , 0.16666667]) Note how in all cases the leading coefficient is always 1. @@ -295,7 +295,7 @@ def polyint(p, m=1, k=None): >>> p = np.poly1d([1,1,1]) >>> P = np.polyint(p) >>> P - poly1d([ 0.33333333, 0.5 , 1. , 0. ]) + poly1d([ 0.33333333, 0.5 , 1. , 0. ]) # may vary >>> np.polyder(P) == p True @@ -310,7 +310,7 @@ def polyint(p, m=1, k=None): 0.0 >>> P = np.polyint(p, 3, k=[6,5,3]) >>> P - poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ]) + poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ]) # may vary Note that 3 = 6 / 2!, and that the constants are given in the order of integrations. Constant of the highest-order polynomial term comes first: @@ -404,7 +404,7 @@ def polyder(p, m=1): >>> np.polyder(p, 3) poly1d([6]) >>> np.polyder(p, 4) - poly1d([ 0.]) + poly1d([0.]) """ m = int(m) @@ -552,28 +552,29 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False): >>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0]) >>> z = np.polyfit(x, y, 3) >>> z - array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254]) + array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254]) # may vary It is convenient to use `poly1d` objects for dealing with polynomials: >>> p = np.poly1d(z) >>> p(0.5) - 0.6143849206349179 + 0.6143849206349179 # may vary >>> p(3.5) - -0.34732142857143039 + -0.34732142857143039 # may vary >>> p(10) - 22.579365079365115 + 22.579365079365115 # may vary High-order polynomials may oscillate wildly: >>> p30 = np.poly1d(np.polyfit(x, y, 30)) - /... RankWarning: Polyfit may be poorly conditioned... + ... + >>> # RankWarning: Polyfit may be poorly conditioned... >>> p30(4) - -0.80000000000000204 + -0.80000000000000204 # may vary >>> p30(5) - -0.99999999999999445 + -0.99999999999999445 # may vary >>> p30(4.5) - -0.10547061179440398 + -0.10547061179440398 # may vary Illustration: @@ -714,11 +715,11 @@ def polyval(p, x): >>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1 76 >>> np.polyval([3,0,1], np.poly1d(5)) - poly1d([ 76.]) + poly1d([76.]) >>> np.polyval(np.poly1d([3,0,1]), 5) 76 >>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5)) - poly1d([ 76.]) + poly1d([76.]) """ p = NX.asarray(p) @@ -951,7 +952,7 @@ def polydiv(u, v): >>> x = np.array([3.0, 5.0, 2.0]) >>> y = np.array([2.0, 1.0]) >>> np.polydiv(x, y) - (array([ 1.5 , 1.75]), array([ 0.25])) + (array([1.5 , 1.75]), array([0.25])) """ truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d)) @@ -1046,7 +1047,7 @@ class poly1d(object): >>> p.r array([-1.+1.41421356j, -1.-1.41421356j]) >>> p(p.r) - array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j]) + array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j]) # may vary These numbers in the previous line represent (0, 0) to machine precision @@ -1073,7 +1074,7 @@ class poly1d(object): poly1d([ 1, 4, 10, 12, 9]) >>> (p**3 + 4) / p - (poly1d([ 1., 4., 10., 12., 9.]), poly1d([ 4.])) + (poly1d([ 1., 4., 10., 12., 9.]), poly1d([4.])) ``asarray(p)`` gives the coefficient array, so polynomials can be used in all functions that accept arrays: @@ -1095,7 +1096,7 @@ class poly1d(object): Construct a polynomial from its roots: >>> np.poly1d([1, 2], True) - poly1d([ 1, -3, 2]) + poly1d([ 1., -3., 2.]) This is the same polynomial as obtained by: diff --git a/numpy/lib/recfunctions.py b/numpy/lib/recfunctions.py index fcc0d9a7a..5ff35f0bb 100644 --- a/numpy/lib/recfunctions.py +++ b/numpy/lib/recfunctions.py @@ -57,11 +57,10 @@ def recursive_fill_fields(input, output): Examples -------- >>> from numpy.lib import recfunctions as rfn - >>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)]) + >>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', np.int64), ('B', np.float64)]) >>> b = np.zeros((3,), dtype=a.dtype) >>> rfn.recursive_fill_fields(a, b) - array([(1, 10.0), (2, 20.0), (0, 0.0)], - dtype=[('A', '<i4'), ('B', '<f8')]) + array([(1, 10.), (2, 20.), (0, 0.)], dtype=[('A', '<i8'), ('B', '<f8')]) """ newdtype = output.dtype @@ -89,11 +88,11 @@ def get_fieldspec(dtype): Examples -------- - >>> dt = np.dtype([(('a', 'A'), int), ('b', float, 3)]) + >>> dt = np.dtype([(('a', 'A'), np.int64), ('b', np.double, 3)]) >>> dt.descr - [(('a', 'A'), '<i4'), ('b', '<f8', (3,))] + [(('a', 'A'), '<i8'), ('b', '<f8', (3,))] >>> get_fieldspec(dt) - [(('a', 'A'), dtype('int32')), ('b', dtype(('<f8', (3,))))] + [(('a', 'A'), dtype('int64')), ('b', dtype(('<f8', (3,))))] """ if dtype.names is None: @@ -120,10 +119,15 @@ def get_names(adtype): Examples -------- >>> from numpy.lib import recfunctions as rfn - >>> rfn.get_names(np.empty((1,), dtype=int)) is None - True + >>> rfn.get_names(np.empty((1,), dtype=int)) + Traceback (most recent call last): + ... + AttributeError: 'numpy.ndarray' object has no attribute 'names' + >>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)])) - ('A', 'B') + Traceback (most recent call last): + ... + AttributeError: 'numpy.ndarray' object has no attribute 'names' >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])]) >>> rfn.get_names(adtype) ('a', ('b', ('ba', 'bb'))) @@ -153,9 +157,13 @@ def get_names_flat(adtype): -------- >>> from numpy.lib import recfunctions as rfn >>> rfn.get_names_flat(np.empty((1,), dtype=int)) is None - True + Traceback (most recent call last): + ... + AttributeError: 'numpy.ndarray' object has no attribute 'names' >>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', float)])) - ('A', 'B') + Traceback (most recent call last): + ... + AttributeError: 'numpy.ndarray' object has no attribute 'names' >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])]) >>> rfn.get_names_flat(adtype) ('a', 'b', 'ba', 'bb') @@ -403,20 +411,18 @@ def merge_arrays(seqarrays, fill_value=-1, flatten=False, -------- >>> from numpy.lib import recfunctions as rfn >>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.]))) - masked_array(data = [(1, 10.0) (2, 20.0) (--, 30.0)], - mask = [(False, False) (False, False) (True, False)], - fill_value = (999999, 1e+20), - dtype = [('f0', '<i4'), ('f1', '<f8')]) - - >>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])), - ... usemask=False) - array([(1, 10.0), (2, 20.0), (-1, 30.0)], - dtype=[('f0', '<i4'), ('f1', '<f8')]) - >>> rfn.merge_arrays((np.array([1, 2]).view([('a', int)]), + array([( 1, 10.), ( 2, 20.), (-1, 30.)], + dtype=[('f0', '<i8'), ('f1', '<f8')]) + + >>> rfn.merge_arrays((np.array([1, 2], dtype=np.int64), + ... np.array([10., 20., 30.])), usemask=False) + array([(1, 10.0), (2, 20.0), (-1, 30.0)], + dtype=[('f0', '<i8'), ('f1', '<f8')]) + >>> rfn.merge_arrays((np.array([1, 2]).view([('a', np.int64)]), ... np.array([10., 20., 30.])), ... usemask=False, asrecarray=True) - rec.array([(1, 10.0), (2, 20.0), (-1, 30.0)], - dtype=[('a', '<i4'), ('f1', '<f8')]) + rec.array([( 1, 10.), ( 2, 20.), (-1, 30.)], + dtype=[('a', '<i8'), ('f1', '<f8')]) Notes ----- @@ -547,16 +553,14 @@ def drop_fields(base, drop_names, usemask=True, asrecarray=False): -------- >>> from numpy.lib import recfunctions as rfn >>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], - ... dtype=[('a', int), ('b', [('ba', float), ('bb', int)])]) + ... dtype=[('a', np.int64), ('b', [('ba', np.double), ('bb', np.int64)])]) >>> rfn.drop_fields(a, 'a') - array([((2.0, 3),), ((5.0, 6),)], - dtype=[('b', [('ba', '<f8'), ('bb', '<i4')])]) + array([((2., 3),), ((5., 6),)], + dtype=[('b', [('ba', '<f8'), ('bb', '<i8')])]) >>> rfn.drop_fields(a, 'ba') - array([(1, (3,)), (4, (6,))], - dtype=[('a', '<i4'), ('b', [('bb', '<i4')])]) + array([(1, (3,)), (4, (6,))], dtype=[('a', '<i8'), ('b', [('bb', '<i8')])]) >>> rfn.drop_fields(a, ['ba', 'bb']) - array([(1,), (4,)], - dtype=[('a', '<i4')]) + array([(1,), (4,)], dtype=[('a', '<i8')]) """ if _is_string_like(drop_names): drop_names = [drop_names] @@ -648,8 +652,8 @@ def rename_fields(base, namemapper): >>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))], ... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])]) >>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'}) - array([(1, (2.0, [3.0, 30.0])), (4, (5.0, [6.0, 60.0]))], - dtype=[('A', '<i4'), ('b', [('ba', '<f8'), ('BB', '<f8', 2)])]) + array([(1, (2., [ 3., 30.])), (4, (5., [ 6., 60.]))], + dtype=[('A', '<i8'), ('b', [('ba', '<f8'), ('BB', '<f8', (2,))])]) """ def _recursive_rename_fields(ndtype, namemapper): @@ -834,18 +838,18 @@ def repack_fields(a, align=False, recurse=False): ... print("offsets:", [d.fields[name][1] for name in d.names]) ... print("itemsize:", d.itemsize) ... - >>> dt = np.dtype('u1,i4,f4', align=True) + >>> dt = np.dtype('u1,<i4,<f4', align=True) >>> dt - dtype({'names':['f0','f1','f2'], 'formats':['u1','<i4','<f8'], 'offsets':[0,4,8], 'itemsize':16}, align=True) + dtype({'names':['f0','f1','f2'], 'formats':['u1','<i8','<f8'], 'offsets':[0,8,16], 'itemsize':24}, align=True) >>> print_offsets(dt) - offsets: [0, 4, 8] - itemsize: 16 + offsets: [0, 8, 16] + itemsize: 24 >>> packed_dt = repack_fields(dt) >>> packed_dt - dtype([('f0', 'u1'), ('f1', '<i4'), ('f2', '<f8')]) + dtype([('f0', 'u1'), ('f1', '<i8'), ('f2', '<f8')]) >>> print_offsets(packed_dt) - offsets: [0, 1, 5] - itemsize: 13 + offsets: [0, 1, 9] + itemsize: 17 """ if not isinstance(a, np.dtype): @@ -1244,15 +1248,16 @@ def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False, True >>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)]) >>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], - ... dtype=[('A', '|S3'), ('B', float), ('C', float)]) + ... dtype=[('A', '|S3'), ('B', np.double), ('C', np.double)]) >>> test = rfn.stack_arrays((z,zz)) >>> test - masked_array(data = [('A', 1.0, --) ('B', 2.0, --) ('a', 10.0, 100.0) ('b', 20.0, 200.0) - ('c', 30.0, 300.0)], - mask = [(False, False, True) (False, False, True) (False, False, False) - (False, False, False) (False, False, False)], - fill_value = ('N/A', 1e+20, 1e+20), - dtype = [('A', '|S3'), ('B', '<f8'), ('C', '<f8')]) + masked_array(data=[(b'A', 1.0, --), (b'B', 2.0, --), (b'a', 10.0, 100.0), + (b'b', 20.0, 200.0), (b'c', 30.0, 300.0)], + mask=[(False, False, True), (False, False, True), + (False, False, False), (False, False, False), + (False, False, False)], + fill_value=(b'N/A', 1.e+20, 1.e+20), + dtype=[('A', 'S3'), ('B', '<f8'), ('C', '<f8')]) """ if isinstance(arrays, ndarray): @@ -1331,7 +1336,10 @@ def find_duplicates(a, key=None, ignoremask=True, return_index=False): >>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3], ... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype) >>> rfn.find_duplicates(a, ignoremask=True, return_index=True) - ... # XXX: judging by the output, the ignoremask flag has no effect + (masked_array(data=[(1,), (1,), (2,), (2,)], + mask=[(False,), (False,), (False,), (False,)], + fill_value=(999999,), + dtype=[('a', '<i8')]), array([0, 1, 3, 4])) """ a = np.asanyarray(a).ravel() # Get a dictionary of fields diff --git a/numpy/lib/scimath.py b/numpy/lib/scimath.py index 9ca006841..5ac790ce9 100644 --- a/numpy/lib/scimath.py +++ b/numpy/lib/scimath.py @@ -59,7 +59,7 @@ def _tocomplex(arr): >>> a = np.array([1,2,3],np.short) >>> ac = np.lib.scimath._tocomplex(a); ac - array([ 1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) + array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) >>> ac.dtype dtype('complex64') @@ -70,7 +70,7 @@ def _tocomplex(arr): >>> b = np.array([1,2,3],np.double) >>> bc = np.lib.scimath._tocomplex(b); bc - array([ 1.+0.j, 2.+0.j, 3.+0.j]) + array([1.+0.j, 2.+0.j, 3.+0.j]) >>> bc.dtype dtype('complex128') @@ -81,13 +81,13 @@ def _tocomplex(arr): >>> c = np.array([1,2,3],np.csingle) >>> cc = np.lib.scimath._tocomplex(c); cc - array([ 1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) + array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) >>> c *= 2; c - array([ 2.+0.j, 4.+0.j, 6.+0.j], dtype=complex64) + array([2.+0.j, 4.+0.j, 6.+0.j], dtype=complex64) >>> cc - array([ 1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) + array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64) """ if issubclass(arr.dtype.type, (nt.single, nt.byte, nt.short, nt.ubyte, nt.ushort, nt.csingle)): @@ -170,7 +170,7 @@ def _fix_real_abs_gt_1(x): array([0, 1]) >>> np.lib.scimath._fix_real_abs_gt_1([0,2]) - array([ 0.+0.j, 2.+0.j]) + array([0.+0.j, 2.+0.j]) """ x = asarray(x) if any(isreal(x) & (abs(x) > 1)): @@ -212,14 +212,14 @@ def sqrt(x): >>> np.lib.scimath.sqrt(1) 1.0 >>> np.lib.scimath.sqrt([1, 4]) - array([ 1., 2.]) + array([1., 2.]) But it automatically handles negative inputs: >>> np.lib.scimath.sqrt(-1) - (0.0+1.0j) + 1j >>> np.lib.scimath.sqrt([-1,4]) - array([ 0.+1.j, 2.+0.j]) + array([0.+1.j, 2.+0.j]) """ x = _fix_real_lt_zero(x) @@ -317,7 +317,7 @@ def log10(x): 1.0 >>> np.emath.log10([-10**1, -10**2, 10**2]) - array([ 1.+1.3644j, 2.+1.3644j, 2.+0.j ]) + array([1.+1.3644j, 2.+1.3644j, 2.+0.j ]) """ x = _fix_real_lt_zero(x) @@ -354,9 +354,9 @@ def logn(n, x): >>> np.set_printoptions(precision=4) >>> np.lib.scimath.logn(2, [4, 8]) - array([ 2., 3.]) + array([2., 3.]) >>> np.lib.scimath.logn(2, [-4, -8, 8]) - array([ 2.+4.5324j, 3.+4.5324j, 3.+0.j ]) + array([2.+4.5324j, 3.+4.5324j, 3.+0.j ]) """ x = _fix_real_lt_zero(x) @@ -405,7 +405,7 @@ def log2(x): >>> np.emath.log2(8) 3.0 >>> np.emath.log2([-4, -8, 8]) - array([ 2.+4.5324j, 3.+4.5324j, 3.+0.j ]) + array([2.+4.5324j, 3.+4.5324j, 3.+0.j ]) """ x = _fix_real_lt_zero(x) @@ -451,9 +451,9 @@ def power(x, p): >>> np.lib.scimath.power([2, 4], 2) array([ 4, 16]) >>> np.lib.scimath.power([2, 4], -2) - array([ 0.25 , 0.0625]) + array([0.25 , 0.0625]) >>> np.lib.scimath.power([-2, 4], 2) - array([ 4.+0.j, 16.+0.j]) + array([ 4.-0.j, 16.+0.j]) """ x = _fix_real_lt_zero(x) @@ -499,7 +499,7 @@ def arccos(x): 0.0 >>> np.emath.arccos([1,2]) - array([ 0.-0.j , 0.+1.317j]) + array([0.-0.j , 0.-1.317j]) """ x = _fix_real_abs_gt_1(x) @@ -545,7 +545,7 @@ def arcsin(x): 0.0 >>> np.emath.arcsin([0,1]) - array([ 0. , 1.5708]) + array([0. , 1.5708]) """ x = _fix_real_abs_gt_1(x) @@ -589,11 +589,14 @@ def arctanh(x): -------- >>> np.set_printoptions(precision=4) - >>> np.emath.arctanh(np.eye(2)) - array([[ Inf, 0.], - [ 0., Inf]]) + >>> from numpy.testing import suppress_warnings + >>> with suppress_warnings() as sup: + ... sup.filter(RuntimeWarning) + ... np.emath.arctanh(np.eye(2)) + array([[inf, 0.], + [ 0., inf]]) >>> np.emath.arctanh([1j]) - array([ 0.+0.7854j]) + array([0.+0.7854j]) """ x = _fix_real_abs_gt_1(x) diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py index f56c4f4db..e088a6c4a 100644 --- a/numpy/lib/shape_base.py +++ b/numpy/lib/shape_base.py @@ -129,7 +129,7 @@ def take_along_axis(arr, indices, axis): [40, 50, 60]]) >>> ai = np.argsort(a, axis=1); ai array([[0, 2, 1], - [1, 2, 0]], dtype=int64) + [1, 2, 0]]) >>> np.take_along_axis(a, ai, axis=1) array([[10, 20, 30], [40, 50, 60]]) @@ -142,7 +142,7 @@ def take_along_axis(arr, indices, axis): >>> ai = np.expand_dims(np.argmax(a, axis=1), axis=1) >>> ai array([[1], - [0], dtype=int64) + [0]]) >>> np.take_along_axis(a, ai, axis=1) array([[30], [60]]) @@ -152,10 +152,10 @@ def take_along_axis(arr, indices, axis): >>> ai_min = np.expand_dims(np.argmin(a, axis=1), axis=1) >>> ai_max = np.expand_dims(np.argmax(a, axis=1), axis=1) - >>> ai = np.concatenate([ai_min, ai_max], axis=axis) - >> ai + >>> ai = np.concatenate([ai_min, ai_max], axis=1) + >>> ai array([[0, 1], - [1, 0]], dtype=int64) + [1, 0]]) >>> np.take_along_axis(a, ai, axis=1) array([[10, 30], [40, 60]]) @@ -243,7 +243,7 @@ def put_along_axis(arr, indices, values, axis): >>> ai = np.expand_dims(np.argmax(a, axis=1), axis=1) >>> ai array([[1], - [0]], dtype=int64) + [0]]) >>> np.put_along_axis(a, ai, 99, axis=1) >>> a array([[10, 99, 20], @@ -330,9 +330,9 @@ def apply_along_axis(func1d, axis, arr, *args, **kwargs): ... return (a[0] + a[-1]) * 0.5 >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]]) >>> np.apply_along_axis(my_func, 0, b) - array([ 4., 5., 6.]) + array([4., 5., 6.]) >>> np.apply_along_axis(my_func, 1, b) - array([ 2., 5., 8.]) + array([2., 5., 8.]) For a function that returns a 1D array, the number of dimensions in `outarr` is the same as `arr`. @@ -732,11 +732,11 @@ def array_split(ary, indices_or_sections, axis=0): -------- >>> x = np.arange(8.0) >>> np.array_split(x, 3) - [array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7.])] + [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7.])] >>> x = np.arange(7.0) >>> np.array_split(x, 3) - [array([ 0., 1., 2.]), array([ 3., 4.]), array([ 5., 6.])] + [array([0., 1., 2.]), array([3., 4.]), array([5., 6.])] """ try: @@ -828,14 +828,14 @@ def split(ary, indices_or_sections, axis=0): -------- >>> x = np.arange(9.0) >>> np.split(x, 3) - [array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7., 8.])] + [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7., 8.])] >>> x = np.arange(8.0) >>> np.split(x, [3, 5, 6, 10]) - [array([ 0., 1., 2.]), - array([ 3., 4.]), - array([ 5.]), - array([ 6., 7.]), + [array([0., 1., 2.]), + array([3., 4.]), + array([5.]), + array([6., 7.]), array([], dtype=float64)] """ @@ -872,43 +872,43 @@ def hsplit(ary, indices_or_sections): -------- >>> x = np.arange(16.0).reshape(4, 4) >>> x - array([[ 0., 1., 2., 3.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.], - [ 12., 13., 14., 15.]]) + array([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [12., 13., 14., 15.]]) >>> np.hsplit(x, 2) [array([[ 0., 1.], [ 4., 5.], [ 8., 9.], - [ 12., 13.]]), + [12., 13.]]), array([[ 2., 3.], [ 6., 7.], - [ 10., 11.], - [ 14., 15.]])] + [10., 11.], + [14., 15.]])] >>> np.hsplit(x, np.array([3, 6])) - [array([[ 0., 1., 2.], - [ 4., 5., 6.], - [ 8., 9., 10.], - [ 12., 13., 14.]]), - array([[ 3.], - [ 7.], - [ 11.], - [ 15.]]), - array([], dtype=float64)] + [array([[ 0., 1., 2.], + [ 4., 5., 6.], + [ 8., 9., 10.], + [12., 13., 14.]]), + array([[ 3.], + [ 7.], + [11.], + [15.]]), + array([], shape=(4, 0), dtype=float64)] With a higher dimensional array the split is still along the second axis. >>> x = np.arange(8.0).reshape(2, 2, 2) >>> x - array([[[ 0., 1.], - [ 2., 3.]], - [[ 4., 5.], - [ 6., 7.]]]) + array([[[0., 1.], + [2., 3.]], + [[4., 5.], + [6., 7.]]]) >>> np.hsplit(x, 2) - [array([[[ 0., 1.]], - [[ 4., 5.]]]), - array([[[ 2., 3.]], - [[ 6., 7.]]])] + [array([[[0., 1.]], + [[4., 5.]]]), + array([[[2., 3.]], + [[6., 7.]]])] """ if _nx.ndim(ary) == 0: @@ -936,35 +936,31 @@ def vsplit(ary, indices_or_sections): -------- >>> x = np.arange(16.0).reshape(4, 4) >>> x - array([[ 0., 1., 2., 3.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.], - [ 12., 13., 14., 15.]]) + array([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.], + [12., 13., 14., 15.]]) >>> np.vsplit(x, 2) - [array([[ 0., 1., 2., 3.], - [ 4., 5., 6., 7.]]), - array([[ 8., 9., 10., 11.], - [ 12., 13., 14., 15.]])] + [array([[0., 1., 2., 3.], + [4., 5., 6., 7.]]), array([[ 8., 9., 10., 11.], + [12., 13., 14., 15.]])] >>> np.vsplit(x, np.array([3, 6])) - [array([[ 0., 1., 2., 3.], - [ 4., 5., 6., 7.], - [ 8., 9., 10., 11.]]), - array([[ 12., 13., 14., 15.]]), - array([], dtype=float64)] + [array([[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.], + [ 8., 9., 10., 11.]]), array([[12., 13., 14., 15.]]), array([], shape=(0, 4), dtype=float64)] With a higher dimensional array the split is still along the first axis. >>> x = np.arange(8.0).reshape(2, 2, 2) >>> x - array([[[ 0., 1.], - [ 2., 3.]], - [[ 4., 5.], - [ 6., 7.]]]) + array([[[0., 1.], + [2., 3.]], + [[4., 5.], + [6., 7.]]]) >>> np.vsplit(x, 2) - [array([[[ 0., 1.], - [ 2., 3.]]]), - array([[[ 4., 5.], - [ 6., 7.]]])] + [array([[[0., 1.], + [2., 3.]]]), array([[[4., 5.], + [6., 7.]]])] """ if _nx.ndim(ary) < 2: @@ -989,30 +985,28 @@ def dsplit(ary, indices_or_sections): -------- >>> x = np.arange(16.0).reshape(2, 2, 4) >>> x - array([[[ 0., 1., 2., 3.], - [ 4., 5., 6., 7.]], - [[ 8., 9., 10., 11.], - [ 12., 13., 14., 15.]]]) + array([[[ 0., 1., 2., 3.], + [ 4., 5., 6., 7.]], + [[ 8., 9., 10., 11.], + [12., 13., 14., 15.]]]) >>> np.dsplit(x, 2) - [array([[[ 0., 1.], - [ 4., 5.]], - [[ 8., 9.], - [ 12., 13.]]]), - array([[[ 2., 3.], - [ 6., 7.]], - [[ 10., 11.], - [ 14., 15.]]])] + [array([[[ 0., 1.], + [ 4., 5.]], + [[ 8., 9.], + [12., 13.]]]), array([[[ 2., 3.], + [ 6., 7.]], + [[10., 11.], + [14., 15.]]])] >>> np.dsplit(x, np.array([3, 6])) - [array([[[ 0., 1., 2.], - [ 4., 5., 6.]], - [[ 8., 9., 10.], - [ 12., 13., 14.]]]), - array([[[ 3.], - [ 7.]], - [[ 11.], - [ 15.]]]), - array([], dtype=float64)] - + [array([[[ 0., 1., 2.], + [ 4., 5., 6.]], + [[ 8., 9., 10.], + [12., 13., 14.]]]), + array([[[ 3.], + [ 7.]], + [[11.], + [15.]]]), + array([], shape=(2, 2, 0), dtype=float64)] """ if _nx.ndim(ary) < 3: raise ValueError('dsplit only works on arrays of 3 or more dimensions') @@ -1092,15 +1086,15 @@ def kron(a, b): Examples -------- >>> np.kron([1,10,100], [5,6,7]) - array([ 5, 6, 7, 50, 60, 70, 500, 600, 700]) + array([ 5, 6, 7, ..., 500, 600, 700]) >>> np.kron([5,6,7], [1,10,100]) - array([ 5, 50, 500, 6, 60, 600, 7, 70, 700]) + array([ 5, 50, 500, ..., 7, 70, 700]) >>> np.kron(np.eye(2), np.ones((2,2))) - array([[ 1., 1., 0., 0.], - [ 1., 1., 0., 0.], - [ 0., 0., 1., 1.], - [ 0., 0., 1., 1.]]) + array([[1., 1., 0., 0.], + [1., 1., 0., 0.], + [0., 0., 1., 1.], + [0., 0., 1., 1.]]) >>> a = np.arange(100).reshape((2,5,2,5)) >>> b = np.arange(24).reshape((2,3,4)) diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py index 27d848608..54d0240ef 100644 --- a/numpy/lib/twodim_base.py +++ b/numpy/lib/twodim_base.py @@ -77,13 +77,13 @@ def fliplr(m): -------- >>> A = np.diag([1.,2.,3.]) >>> A - array([[ 1., 0., 0.], - [ 0., 2., 0.], - [ 0., 0., 3.]]) + array([[1., 0., 0.], + [0., 2., 0.], + [0., 0., 3.]]) >>> np.fliplr(A) - array([[ 0., 0., 1.], - [ 0., 2., 0.], - [ 3., 0., 0.]]) + array([[0., 0., 1.], + [0., 2., 0.], + [3., 0., 0.]]) >>> A = np.random.randn(2,3,5) >>> np.all(np.fliplr(A) == A[:,::-1,...]) @@ -129,13 +129,13 @@ def flipud(m): -------- >>> A = np.diag([1.0, 2, 3]) >>> A - array([[ 1., 0., 0.], - [ 0., 2., 0.], - [ 0., 0., 3.]]) + array([[1., 0., 0.], + [0., 2., 0.], + [0., 0., 3.]]) >>> np.flipud(A) - array([[ 0., 0., 3.], - [ 0., 2., 0.], - [ 1., 0., 0.]]) + array([[0., 0., 3.], + [0., 2., 0.], + [1., 0., 0.]]) >>> A = np.random.randn(2,3,5) >>> np.all(np.flipud(A) == A[::-1,...]) @@ -191,9 +191,9 @@ def eye(N, M=None, k=0, dtype=float, order='C'): array([[1, 0], [0, 1]]) >>> np.eye(3, k=1) - array([[ 0., 1., 0.], - [ 0., 0., 1.], - [ 0., 0., 0.]]) + array([[0., 1., 0.], + [0., 0., 1.], + [0., 0., 0.]]) """ if M is None: @@ -378,9 +378,9 @@ def tri(N, M=None, k=0, dtype=float): [1, 1, 1, 1, 1]]) >>> np.tri(3, 5, -1) - array([[ 0., 0., 0., 0., 0.], - [ 1., 0., 0., 0., 0.], - [ 1., 1., 0., 0., 0.]]) + array([[0., 0., 0., 0., 0.], + [1., 0., 0., 0., 0.], + [1., 1., 0., 0., 0.]]) """ if M is None: @@ -540,7 +540,7 @@ def vander(x, N=None, increasing=False): of the differences between the values of the input vector: >>> np.linalg.det(np.vander(x)) - 48.000000000000043 + 48.000000000000043 # may vary >>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1) 48 @@ -644,6 +644,9 @@ def histogram2d(x, y, bins=10, range=None, normed=None, weights=None, Examples -------- + >>> import matplotlib + >>> import matplotlib.pyplot + >>> matplotlib.pyplot.switch_backend('agg') >>> import matplotlib as mpl >>> import matplotlib.pyplot as plt @@ -666,6 +669,7 @@ def histogram2d(x, y, bins=10, range=None, normed=None, weights=None, >>> ax = fig.add_subplot(131, title='imshow: square bins') >>> plt.imshow(H, interpolation='nearest', origin='low', ... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]]) + <matplotlib.image.AxesImage object at 0x...> :func:`pcolormesh <matplotlib.pyplot.pcolormesh>` can display actual edges: @@ -673,6 +677,7 @@ def histogram2d(x, y, bins=10, range=None, normed=None, weights=None, ... aspect='equal') >>> X, Y = np.meshgrid(xedges, yedges) >>> ax.pcolormesh(X, Y, H) + <matplotlib.collections.QuadMesh object at 0x...> :class:`NonUniformImage <matplotlib.image.NonUniformImage>` can be used to display actual bin edges with interpolation: @@ -829,7 +834,7 @@ def tril_indices(n, k=0, m=None): Both for indexing: >>> a[il1] - array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15]) + array([ 0, 4, 5, ..., 13, 14, 15]) And for assigning values: @@ -944,7 +949,7 @@ def triu_indices(n, k=0, m=None): Both for indexing: >>> a[iu1] - array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15]) + array([ 0, 1, 2, ..., 10, 11, 15]) And for assigning values: diff --git a/numpy/lib/type_check.py b/numpy/lib/type_check.py index 90b1e9a6e..f55517732 100644 --- a/numpy/lib/type_check.py +++ b/numpy/lib/type_check.py @@ -105,11 +105,11 @@ def asfarray(a, dtype=_nx.float_): Examples -------- >>> np.asfarray([2, 3]) - array([ 2., 3.]) + array([2., 3.]) >>> np.asfarray([2, 3], dtype='float') - array([ 2., 3.]) + array([2., 3.]) >>> np.asfarray([2, 3], dtype='int8') - array([ 2., 3.]) + array([2., 3.]) """ if not _nx.issubdtype(dtype, _nx.inexact): @@ -146,13 +146,13 @@ def real(val): -------- >>> a = np.array([1+2j, 3+4j, 5+6j]) >>> a.real - array([ 1., 3., 5.]) + array([1., 3., 5.]) >>> a.real = 9 >>> a - array([ 9.+2.j, 9.+4.j, 9.+6.j]) + array([9.+2.j, 9.+4.j, 9.+6.j]) >>> a.real = np.array([9, 8, 7]) >>> a - array([ 9.+2.j, 8.+4.j, 7.+6.j]) + array([9.+2.j, 8.+4.j, 7.+6.j]) >>> np.real(1 + 1j) 1.0 @@ -192,10 +192,10 @@ def imag(val): -------- >>> a = np.array([1+2j, 3+4j, 5+6j]) >>> a.imag - array([ 2., 4., 6.]) + array([2., 4., 6.]) >>> a.imag = np.array([8, 10, 12]) >>> a - array([ 1. +8.j, 3.+10.j, 5.+12.j]) + array([1. +8.j, 3.+10.j, 5.+12.j]) >>> np.imag(1 + 1j) 1.0 @@ -422,11 +422,13 @@ def nan_to_num(x, copy=True): 0.0 >>> x = np.array([np.inf, -np.inf, np.nan, -128, 128]) >>> np.nan_to_num(x) - array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, - -1.28000000e+002, 1.28000000e+002]) + array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary + -1.28000000e+002, 1.28000000e+002]) >>> y = np.array([complex(np.inf, np.nan), np.nan, complex(np.nan, np.inf)]) + array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary + -1.28000000e+002, 1.28000000e+002]) >>> np.nan_to_num(y) - array([ 1.79769313e+308 +0.00000000e+000j, + array([ 1.79769313e+308 +0.00000000e+000j, # may vary 0.00000000e+000 +0.00000000e+000j, 0.00000000e+000 +1.79769313e+308j]) """ @@ -490,12 +492,12 @@ def real_if_close(a, tol=100): Examples -------- >>> np.finfo(float).eps - 2.2204460492503131e-16 + 2.2204460492503131e-16 # may vary >>> np.real_if_close([2.1 + 4e-14j], tol=1000) - array([ 2.1]) + array([2.1]) >>> np.real_if_close([2.1 + 4e-13j], tol=1000) - array([ 2.1 +4.00000000e-13j]) + array([2.1+4.e-13j]) """ a = asanyarray(a) @@ -538,7 +540,6 @@ def asscalar(a): -------- >>> np.asscalar(np.array([24])) 24 - """ # 2018-10-10, 1.16 @@ -672,11 +673,11 @@ def common_type(*arrays): Examples -------- >>> np.common_type(np.arange(2, dtype=np.float32)) - <type 'numpy.float32'> + <class 'numpy.float32'> >>> np.common_type(np.arange(2, dtype=np.float32), np.arange(2)) - <type 'numpy.float64'> + <class 'numpy.float64'> >>> np.common_type(np.arange(4), np.array([45, 6.j]), np.array([45.0])) - <type 'numpy.complex128'> + <class 'numpy.complex128'> """ is_complex = False diff --git a/numpy/lib/ufunclike.py b/numpy/lib/ufunclike.py index 9a9e6f9dd..5c411e8c8 100644 --- a/numpy/lib/ufunclike.py +++ b/numpy/lib/ufunclike.py @@ -154,11 +154,11 @@ def isposinf(x, out=None): Examples -------- >>> np.isposinf(np.PINF) - array(True, dtype=bool) + True >>> np.isposinf(np.inf) - array(True, dtype=bool) + True >>> np.isposinf(np.NINF) - array(False, dtype=bool) + False >>> np.isposinf([-np.inf, 0., np.inf]) array([False, False, True]) @@ -224,11 +224,11 @@ def isneginf(x, out=None): Examples -------- >>> np.isneginf(np.NINF) - array(True, dtype=bool) + True >>> np.isneginf(np.inf) - array(False, dtype=bool) + False >>> np.isneginf(np.PINF) - array(False, dtype=bool) + False >>> np.isneginf([-np.inf, 0., np.inf]) array([ True, False, False]) diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py index 84edf4021..5a4cae235 100644 --- a/numpy/lib/utils.py +++ b/numpy/lib/utils.py @@ -150,10 +150,8 @@ def deprecate(*args, **kwargs): Warning: >>> olduint = np.deprecate(np.uint) + DeprecationWarning: `uint64` is deprecated! # may vary >>> olduint(6) - /usr/lib/python2.5/site-packages/numpy/lib/utils.py:114: - DeprecationWarning: uint32 is deprecated - warnings.warn(str1, DeprecationWarning, stacklevel=2) 6 """ @@ -201,8 +199,8 @@ def byte_bounds(a): >>> low, high = np.byte_bounds(I) >>> high - low == I.size*I.itemsize True - >>> I = np.eye(2, dtype='G'); I.dtype - dtype('complex192') + >>> I = np.eye(2); I.dtype + dtype('float64') >>> low, high = np.byte_bounds(I) >>> high - low == I.size*I.itemsize True @@ -263,17 +261,17 @@ def who(vardict=None): >>> np.who() Name Shape Bytes Type =========================================================== - a 10 40 int32 + a 10 80 int64 b 20 160 float64 - Upper bound on total bytes = 200 + Upper bound on total bytes = 240 >>> d = {'x': np.arange(2.0), 'y': np.arange(3.0), 'txt': 'Some str', ... 'idx':5} >>> np.who(d) Name Shape Bytes Type =========================================================== - y 3 24 float64 x 2 16 float64 + y 3 24 float64 Upper bound on total bytes = 40 """ @@ -733,7 +731,7 @@ def lookfor(what, module=None, import_modules=True, regenerate=False, Examples -------- - >>> np.lookfor('binary representation') + >>> np.lookfor('binary representation') # doctest: +SKIP Search results for 'binary representation' ------------------------------------------ numpy.binary_repr @@ -1104,7 +1102,7 @@ def safe_eval(source): >>> np.safe_eval('open("/home/user/.ssh/id_dsa").read()') Traceback (most recent call last): ... - SyntaxError: Unsupported source construct: compiler.ast.CallFunc + ValueError: malformed node or string: <_ast.Call object at 0x...> """ # Local import to speed up numpy's import time. diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py index 8363d7377..92fa6cb73 100644 --- a/numpy/linalg/linalg.py +++ b/numpy/linalg/linalg.py @@ -377,7 +377,7 @@ def solve(a, b): >>> b = np.array([9,8]) >>> x = np.linalg.solve(a, b) >>> x - array([ 2., 3.]) + array([2., 3.]) Check that the solution is correct: @@ -535,10 +535,10 @@ def inv(a): >>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]]) >>> inv(a) - array([[[-2. , 1. ], - [ 1.5, -0.5]], - [[-5. , 2. ], - [ 3. , -1. ]]]) + array([[[-2. , 1. ], + [ 1.5 , -0.5 ]], + [[-1.25, 0.75], + [ 0.75, -0.25]]]) """ a, wrap = _makearray(a) @@ -730,21 +730,21 @@ def cholesky(a): -------- >>> A = np.array([[1,-2j],[2j,5]]) >>> A - array([[ 1.+0.j, 0.-2.j], + array([[ 1.+0.j, -0.-2.j], [ 0.+2.j, 5.+0.j]]) >>> L = np.linalg.cholesky(A) >>> L - array([[ 1.+0.j, 0.+0.j], - [ 0.+2.j, 1.+0.j]]) + array([[1.+0.j, 0.+0.j], + [0.+2.j, 1.+0.j]]) >>> np.dot(L, L.T.conj()) # verify that L * L.H = A - array([[ 1.+0.j, 0.-2.j], - [ 0.+2.j, 5.+0.j]]) + array([[1.+0.j, 0.-2.j], + [0.+2.j, 5.+0.j]]) >>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like? >>> np.linalg.cholesky(A) # an ndarray object is returned - array([[ 1.+0.j, 0.+0.j], - [ 0.+2.j, 1.+0.j]]) + array([[1.+0.j, 0.+0.j], + [0.+2.j, 1.+0.j]]) >>> # But a matrix object is returned if A is a matrix object - >>> LA.cholesky(np.matrix(A)) + >>> np.linalg.cholesky(np.matrix(A)) matrix([[ 1.+0.j, 0.+0.j], [ 0.+2.j, 1.+0.j]]) @@ -878,9 +878,9 @@ def qr(a, mode='reduced'): [1, 1], [2, 1]]) >>> b = np.array([1, 0, 2, 1]) - >>> q, r = LA.qr(A) + >>> q, r = np.linalg.qr(A) >>> p = np.dot(q.T, b) - >>> np.dot(LA.inv(r), p) + >>> np.dot(np.linalg.inv(r), p) array([ 1.1e-16, 1.0e+00]) """ @@ -1049,7 +1049,7 @@ def eigvals(a): >>> A = np.dot(Q, D) >>> A = np.dot(A, Q.T) >>> LA.eigvals(A) - array([ 1., -1.]) + array([ 1., -1.]) # random """ a, wrap = _makearray(a) @@ -1131,24 +1131,24 @@ def eigvalsh(a, UPLO='L'): >>> from numpy import linalg as LA >>> a = np.array([[1, -2j], [2j, 5]]) >>> LA.eigvalsh(a) - array([ 0.17157288, 5.82842712]) + array([ 0.17157288, 5.82842712]) # may vary >>> # demonstrate the treatment of the imaginary part of the diagonal >>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]]) >>> a - array([[ 5.+2.j, 9.-2.j], - [ 0.+2.j, 2.-1.j]]) + array([[5.+2.j, 9.-2.j], + [0.+2.j, 2.-1.j]]) >>> # with UPLO='L' this is numerically equivalent to using LA.eigvals() >>> # with: >>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]]) >>> b - array([[ 5.+0.j, 0.-2.j], - [ 0.+2.j, 2.+0.j]]) + array([[5.+0.j, 0.-2.j], + [0.+2.j, 2.+0.j]]) >>> wa = LA.eigvalsh(a) >>> wb = LA.eigvals(b) >>> wa; wb - array([ 1., 6.]) - array([ 6.+0.j, 1.+0.j]) + array([1., 6.]) + array([6.+0.j, 1.+0.j]) """ UPLO = UPLO.upper() @@ -1264,19 +1264,19 @@ def eig(a): >>> w, v = LA.eig(np.diag((1, 2, 3))) >>> w; v - array([ 1., 2., 3.]) - array([[ 1., 0., 0.], - [ 0., 1., 0.], - [ 0., 0., 1.]]) + array([1., 2., 3.]) + array([[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]]) Real matrix possessing complex e-values and e-vectors; note that the e-values are complex conjugates of each other. >>> w, v = LA.eig(np.array([[1, -1], [1, 1]])) >>> w; v - array([ 1. + 1.j, 1. - 1.j]) - array([[ 0.70710678+0.j , 0.70710678+0.j ], - [ 0.00000000-0.70710678j, 0.00000000+0.70710678j]]) + array([1.+1.j, 1.-1.j]) + array([[0.70710678+0.j , 0.70710678-0.j ], + [0. -0.70710678j, 0. +0.70710678j]]) Complex-valued matrix with real e-values (but complex-valued e-vectors); note that a.conj().T = a, i.e., a is Hermitian. @@ -1284,9 +1284,9 @@ def eig(a): >>> a = np.array([[1, 1j], [-1j, 1]]) >>> w, v = LA.eig(a) >>> w; v - array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0} - array([[ 0.00000000+0.70710678j, 0.70710678+0.j ], - [ 0.70710678+0.j , 0.00000000+0.70710678j]]) + array([2.+0.j, 0.+0.j]) + array([[ 0. +0.70710678j, 0.70710678+0.j ], # may vary + [ 0.70710678+0.j , -0. +0.70710678j]]) Be careful about round-off error! @@ -1294,9 +1294,9 @@ def eig(a): >>> # Theor. e-values are 1 +/- 1e-9 >>> w, v = LA.eig(a) >>> w; v - array([ 1., 1.]) - array([[ 1., 0.], - [ 0., 1.]]) + array([1., 1.]) + array([[1., 0.], + [0., 1.]]) """ a, wrap = _makearray(a) @@ -1392,49 +1392,49 @@ def eigh(a, UPLO='L'): >>> from numpy import linalg as LA >>> a = np.array([[1, -2j], [2j, 5]]) >>> a - array([[ 1.+0.j, 0.-2.j], + array([[ 1.+0.j, -0.-2.j], [ 0.+2.j, 5.+0.j]]) >>> w, v = LA.eigh(a) >>> w; v - array([ 0.17157288, 5.82842712]) - array([[-0.92387953+0.j , -0.38268343+0.j ], - [ 0.00000000+0.38268343j, 0.00000000-0.92387953j]]) + array([0.17157288, 5.82842712]) + array([[-0.92387953+0.j , -0.38268343+0.j ], # may vary + [ 0. +0.38268343j, 0. -0.92387953j]]) >>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair - array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j]) + array([5.55111512e-17+0.0000000e+00j, 0.00000000e+00+1.2490009e-16j]) >>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair - array([ 0.+0.j, 0.+0.j]) + array([0.+0.j, 0.+0.j]) >>> A = np.matrix(a) # what happens if input is a matrix object >>> A - matrix([[ 1.+0.j, 0.-2.j], + matrix([[ 1.+0.j, -0.-2.j], [ 0.+2.j, 5.+0.j]]) >>> w, v = LA.eigh(A) >>> w; v - array([ 0.17157288, 5.82842712]) - matrix([[-0.92387953+0.j , -0.38268343+0.j ], - [ 0.00000000+0.38268343j, 0.00000000-0.92387953j]]) + array([0.17157288, 5.82842712]) + matrix([[-0.92387953+0.j , -0.38268343+0.j ], # may vary + [ 0. +0.38268343j, 0. -0.92387953j]]) >>> # demonstrate the treatment of the imaginary part of the diagonal >>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]]) >>> a - array([[ 5.+2.j, 9.-2.j], - [ 0.+2.j, 2.-1.j]]) + array([[5.+2.j, 9.-2.j], + [0.+2.j, 2.-1.j]]) >>> # with UPLO='L' this is numerically equivalent to using LA.eig() with: >>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]]) >>> b - array([[ 5.+0.j, 0.-2.j], - [ 0.+2.j, 2.+0.j]]) + array([[5.+0.j, 0.-2.j], + [0.+2.j, 2.+0.j]]) >>> wa, va = LA.eigh(a) >>> wb, vb = LA.eig(b) >>> wa; wb - array([ 1., 6.]) - array([ 6.+0.j, 1.+0.j]) + array([1., 6.]) + array([6.+0.j, 1.+0.j]) >>> va; vb - array([[-0.44721360-0.j , -0.89442719+0.j ], - [ 0.00000000+0.89442719j, 0.00000000-0.4472136j ]]) - array([[ 0.89442719+0.j , 0.00000000-0.4472136j], - [ 0.00000000-0.4472136j, 0.89442719+0.j ]]) + array([[-0.4472136 +0.j , -0.89442719+0.j ], # may vary + [ 0. +0.89442719j, 0. -0.4472136j ]]) + array([[ 0.89442719+0.j , -0. +0.4472136j], + [-0. +0.4472136j, 0.89442719+0.j ]]) """ UPLO = UPLO.upper() if UPLO not in ('L', 'U'): @@ -1705,9 +1705,9 @@ def cond(x, p=None): >>> LA.cond(a, 2) 1.4142135623730951 >>> LA.cond(a, -2) - 0.70710678118654746 + 0.70710678118654746 # may vary >>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0)) - 0.70710678118654746 + 0.70710678118654746 # may vary """ x = asarray(x) # in case we have a matrix @@ -2002,7 +2002,7 @@ def slogdet(a): >>> a = np.array([[1, 2], [3, 4]]) >>> (sign, logdet) = np.linalg.slogdet(a) >>> (sign, logdet) - (-1, 0.69314718055994529) + (-1, 0.69314718055994529) # may vary >>> sign * np.exp(logdet) -2.0 @@ -2074,7 +2074,7 @@ def det(a): >>> a = np.array([[1, 2], [3, 4]]) >>> np.linalg.det(a) - -2.0 + -2.0 # may vary Computing determinants for a stack of matrices: @@ -2181,15 +2181,15 @@ def lstsq(a, b, rcond="warn"): [ 3., 1.]]) >>> m, c = np.linalg.lstsq(A, y, rcond=None)[0] - >>> print(m, c) - 1.0 -0.95 + >>> m, c + (1.0 -0.95) # may vary Plot the data along with the fitted line: >>> import matplotlib.pyplot as plt - >>> plt.plot(x, y, 'o', label='Original data', markersize=10) - >>> plt.plot(x, m*x + c, 'r', label='Fitted line') - >>> plt.legend() + >>> _ = plt.plot(x, y, 'o', label='Original data', markersize=10) + >>> _ = plt.plot(x, m*x + c, 'r', label='Fitted line') + >>> _ = plt.legend() >>> plt.show() """ @@ -2367,7 +2367,7 @@ def norm(x, ord=None, axis=None, keepdims=False): >>> from numpy import linalg as LA >>> a = np.arange(9) - 4 >>> a - array([-4, -3, -2, -1, 0, 1, 2, 3, 4]) + array([-4, -3, -2, ..., 2, 3, 4]) >>> b = a.reshape((3, 3)) >>> b array([[-4, -3, -2], @@ -2403,13 +2403,13 @@ def norm(x, ord=None, axis=None, keepdims=False): 7.3484692283495345 >>> LA.norm(a, -2) - nan + 0.0 >>> LA.norm(b, -2) - 1.8570331885190563e-016 + 1.8570331885190563e-016 # may vary >>> LA.norm(a, 3) - 5.8480354764257312 + 5.8480354764257312 # may vary >>> LA.norm(a, -3) - nan + 0.0 Using the `axis` argument to compute vector norms: @@ -2584,18 +2584,20 @@ def multi_dot(arrays): >>> from numpy.linalg import multi_dot >>> # Prepare some data - >>> A = np.random.random(10000, 100) - >>> B = np.random.random(100, 1000) - >>> C = np.random.random(1000, 5) - >>> D = np.random.random(5, 333) + >>> A = np.random.random((10000, 100)) + >>> B = np.random.random((100, 1000)) + >>> C = np.random.random((1000, 5)) + >>> D = np.random.random((5, 333)) >>> # the actual dot multiplication - >>> multi_dot([A, B, C, D]) + >>> _ = multi_dot([A, B, C, D]) instead of:: - >>> np.dot(np.dot(np.dot(A, B), C), D) + >>> _ = np.dot(np.dot(np.dot(A, B), C), D) + ... >>> # or - >>> A.dot(B).dot(C).dot(D) + >>> _ = A.dot(B).dot(C).dot(D) + ... Notes ----- diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 96d7207bd..63a61599c 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -516,18 +516,18 @@ def set_fill_value(a, fill_value): array([0, 1, 2, 3, 4]) >>> a = ma.masked_where(a < 3, a) >>> a - masked_array(data = [-- -- -- 3 4], - mask = [ True True True False False], - fill_value=999999) + masked_array(data=[--, --, --, 3, 4], + mask=[ True, True, True, False, False], + fill_value=999999) >>> ma.set_fill_value(a, -999) >>> a - masked_array(data = [-- -- -- 3 4], - mask = [ True True True False False], - fill_value=-999) + masked_array(data=[--, --, --, 3, 4], + mask=[ True, True, True, False, False], + fill_value=-999) Nothing happens if `a` is not a masked array. - >>> a = range(5) + >>> a = list(range(5)) >>> a [0, 1, 2, 3, 4] >>> ma.set_fill_value(a, 100) @@ -689,13 +689,12 @@ def getdata(a, subok=True): >>> import numpy.ma as ma >>> a = ma.masked_equal([[1,2],[3,4]], 2) >>> a - masked_array(data = - [[1 --] - [3 4]], - mask = - [[False True] - [False False]], - fill_value=999999) + masked_array( + data=[[1, --], + [3, 4]], + mask=[[False, True], + [False, False]], + fill_value=2) >>> ma.getdata(a) array([[1, 2], [3, 4]]) @@ -752,20 +751,19 @@ def fix_invalid(a, mask=nomask, copy=True, fill_value=None): -------- >>> x = np.ma.array([1., -1, np.nan, np.inf], mask=[1] + [0]*3) >>> x - masked_array(data = [-- -1.0 nan inf], - mask = [ True False False False], - fill_value = 1e+20) + masked_array(data=[--, -1.0, nan, inf], + mask=[ True, False, False, False], + fill_value=1e+20) >>> np.ma.fix_invalid(x) - masked_array(data = [-- -1.0 -- --], - mask = [ True False True True], - fill_value = 1e+20) + masked_array(data=[--, -1.0, --, --], + mask=[ True, False, True, True], + fill_value=1e+20) >>> fixed = np.ma.fix_invalid(x) >>> fixed.data - array([ 1.00000000e+00, -1.00000000e+00, 1.00000000e+20, - 1.00000000e+20]) + array([ 1.e+00, -1.e+00, 1.e+20, 1.e+20]) >>> x.data - array([ 1., -1., NaN, Inf]) + array([ 1., -1., nan, inf]) """ a = masked_array(a, copy=copy, mask=mask, subok=True) @@ -1346,9 +1344,9 @@ def make_mask_descr(ndtype): -------- >>> import numpy.ma as ma >>> dtype = np.dtype({'names':['foo', 'bar'], - 'formats':[np.float32, int]}) + ... 'formats':[np.float32, np.int64]}) >>> dtype - dtype([('foo', '<f4'), ('bar', '<i4')]) + dtype([('foo', '<f4'), ('bar', '<i8')]) >>> ma.make_mask_descr(dtype) dtype([('foo', '|b1'), ('bar', '|b1')]) >>> ma.make_mask_descr(np.float32) @@ -1381,13 +1379,12 @@ def getmask(a): >>> import numpy.ma as ma >>> a = ma.masked_equal([[1,2],[3,4]], 2) >>> a - masked_array(data = - [[1 --] - [3 4]], - mask = - [[False True] - [False False]], - fill_value=999999) + masked_array( + data=[[1, --], + [3, 4]], + mask=[[False, True], + [False, False]], + fill_value=2) >>> ma.getmask(a) array([[False, True], [False, False]]) @@ -1402,12 +1399,11 @@ def getmask(a): >>> b = ma.masked_array([[1,2],[3,4]]) >>> b - masked_array(data = - [[1 2] - [3 4]], - mask = - False, - fill_value=999999) + masked_array( + data=[[1, 2], + [3, 4]], + mask=False, + fill_value=999999) >>> ma.nomask False >>> ma.getmask(b) == ma.nomask @@ -1445,13 +1441,12 @@ def getmaskarray(arr): >>> import numpy.ma as ma >>> a = ma.masked_equal([[1,2],[3,4]], 2) >>> a - masked_array(data = - [[1 --] - [3 4]], - mask = - [[False True] - [False False]], - fill_value=999999) + masked_array( + data=[[1, --], + [3, 4]], + mask=[[False, True], + [False, False]], + fill_value=2) >>> ma.getmaskarray(a) array([[False, True], [False, False]]) @@ -1460,13 +1455,12 @@ def getmaskarray(arr): >>> b = ma.masked_array([[1,2],[3,4]]) >>> b - masked_array(data = - [[1 2] - [3 4]], - mask = - False, - fill_value=999999) - >>> >ma.getmaskarray(b) + masked_array( + data=[[1, 2], + [3, 4]], + mask=False, + fill_value=999999) + >>> ma.getmaskarray(b) array([[False, False], [False, False]]) @@ -1504,9 +1498,9 @@ def is_mask(m): >>> import numpy.ma as ma >>> m = ma.masked_equal([0, 1, 0, 2, 3], 0) >>> m - masked_array(data = [-- 1 -- 2 3], - mask = [ True False True False False], - fill_value=999999) + masked_array(data=[--, 1, --, 2, 3], + mask=[ True, False, True, False, False], + fill_value=0) >>> ma.is_mask(m) False >>> ma.is_mask(m.mask) @@ -1527,14 +1521,14 @@ def is_mask(m): Arrays with complex dtypes don't return True. >>> dtype = np.dtype({'names':['monty', 'pithon'], - 'formats':[bool, bool]}) + ... 'formats':[bool, bool]}) >>> dtype dtype([('monty', '|b1'), ('pithon', '|b1')]) >>> m = np.array([(True, False), (False, True), (True, False)], - dtype=dtype) + ... dtype=dtype) >>> m - array([(True, False), (False, True), (True, False)], - dtype=[('monty', '|b1'), ('pithon', '|b1')]) + array([( True, False), (False, True), ( True, False)], + dtype=[('monty', '?'), ('pithon', '?')]) >>> ma.is_mask(m) False @@ -1600,7 +1594,7 @@ def make_mask(m, copy=False, shrink=True, dtype=MaskType): >>> m = np.zeros(4) >>> m - array([ 0., 0., 0., 0.]) + array([0., 0., 0., 0.]) >>> ma.make_mask(m) False >>> ma.make_mask(m, shrink=False) @@ -1616,11 +1610,11 @@ def make_mask(m, copy=False, shrink=True, dtype=MaskType): >>> arr [(1, 0), (0, 1), (1, 0), (1, 0)] >>> dtype = np.dtype({'names':['man', 'mouse'], - 'formats':[int, int]}) + ... 'formats':[np.int64, np.int64]}) >>> arr = np.array(arr, dtype=dtype) >>> arr array([(1, 0), (0, 1), (1, 0), (1, 0)], - dtype=[('man', '<i4'), ('mouse', '<i4')]) + dtype=[('man', '<i8'), ('mouse', '<i8')]) >>> ma.make_mask(arr, dtype=dtype) array([(True, False), (False, True), (True, False), (True, False)], dtype=[('man', '|b1'), ('mouse', '|b1')]) @@ -1679,9 +1673,9 @@ def make_mask_none(newshape, dtype=None): Defining a more complex dtype. >>> dtype = np.dtype({'names':['foo', 'bar'], - 'formats':[np.float32, int]}) + ... 'formats':[np.float32, np.int64]}) >>> dtype - dtype([('foo', '<f4'), ('bar', '<i4')]) + dtype([('foo', '<f4'), ('bar', '<i8')]) >>> ma.make_mask_none((3,), dtype=dtype) array([(False, False), (False, False), (False, False)], dtype=[('foo', '|b1'), ('bar', '|b1')]) @@ -1779,16 +1773,16 @@ def flatten_mask(mask): Examples -------- >>> mask = np.array([0, 0, 1]) - >>> flatten_mask(mask) + >>> np.ma.flatten_mask(mask) array([False, False, True]) >>> mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)]) - >>> flatten_mask(mask) + >>> np.ma.flatten_mask(mask) array([False, False, False, True]) >>> mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])] >>> mask = np.array([(0, (0, 0)), (0, (0, 1))], dtype=mdtype) - >>> flatten_mask(mask) + >>> np.ma.flatten_mask(mask) array([False, False, False, False, False, True]) """ @@ -1873,38 +1867,39 @@ def masked_where(condition, a, copy=True): >>> a array([0, 1, 2, 3]) >>> ma.masked_where(a <= 2, a) - masked_array(data = [-- -- -- 3], - mask = [ True True True False], - fill_value=999999) + masked_array(data=[--, --, --, 3], + mask=[ True, True, True, False], + fill_value=999999) Mask array `b` conditional on `a`. >>> b = ['a', 'b', 'c', 'd'] >>> ma.masked_where(a == 2, b) - masked_array(data = [a b -- d], - mask = [False False True False], - fill_value=N/A) + masked_array(data=['a', 'b', --, 'd'], + mask=[False, False, True, False], + fill_value='N/A', + dtype='<U1') Effect of the `copy` argument. >>> c = ma.masked_where(a <= 2, a) >>> c - masked_array(data = [-- -- -- 3], - mask = [ True True True False], - fill_value=999999) + masked_array(data=[--, --, --, 3], + mask=[ True, True, True, False], + fill_value=999999) >>> c[0] = 99 >>> c - masked_array(data = [99 -- -- 3], - mask = [False True True False], - fill_value=999999) + masked_array(data=[99, --, --, 3], + mask=[False, True, True, False], + fill_value=999999) >>> a array([0, 1, 2, 3]) >>> c = ma.masked_where(a <= 2, a, copy=False) >>> c[0] = 99 >>> c - masked_array(data = [99 -- -- 3], - mask = [False True True False], - fill_value=999999) + masked_array(data=[99, --, --, 3], + mask=[False, True, True, False], + fill_value=999999) >>> a array([99, 1, 2, 3]) @@ -1913,19 +1908,19 @@ def masked_where(condition, a, copy=True): >>> a = np.arange(4) >>> a = ma.masked_where(a == 2, a) >>> a - masked_array(data = [0 1 -- 3], - mask = [False False True False], - fill_value=999999) + masked_array(data=[0, 1, --, 3], + mask=[False, False, True, False], + fill_value=999999) >>> b = np.arange(4) >>> b = ma.masked_where(b == 0, b) >>> b - masked_array(data = [-- 1 2 3], - mask = [ True False False False], - fill_value=999999) + masked_array(data=[--, 1, 2, 3], + mask=[ True, False, False, False], + fill_value=999999) >>> ma.masked_where(a == 3, b) - masked_array(data = [-- 1 -- --], - mask = [ True False True True], - fill_value=999999) + masked_array(data=[--, 1, --, --], + mask=[ True, False, True, True], + fill_value=999999) """ # Make sure that condition is a valid standard-type mask. @@ -1965,9 +1960,9 @@ def masked_greater(x, value, copy=True): >>> a array([0, 1, 2, 3]) >>> ma.masked_greater(a, 2) - masked_array(data = [0 1 2 --], - mask = [False False False True], - fill_value=999999) + masked_array(data=[0, 1, 2, --], + mask=[False, False, False, True], + fill_value=999999) """ return masked_where(greater(x, value), x, copy=copy) @@ -1991,9 +1986,9 @@ def masked_greater_equal(x, value, copy=True): >>> a array([0, 1, 2, 3]) >>> ma.masked_greater_equal(a, 2) - masked_array(data = [0 1 -- --], - mask = [False False True True], - fill_value=999999) + masked_array(data=[0, 1, --, --], + mask=[False, False, True, True], + fill_value=999999) """ return masked_where(greater_equal(x, value), x, copy=copy) @@ -2017,9 +2012,9 @@ def masked_less(x, value, copy=True): >>> a array([0, 1, 2, 3]) >>> ma.masked_less(a, 2) - masked_array(data = [-- -- 2 3], - mask = [ True True False False], - fill_value=999999) + masked_array(data=[--, --, 2, 3], + mask=[ True, True, False, False], + fill_value=999999) """ return masked_where(less(x, value), x, copy=copy) @@ -2043,9 +2038,9 @@ def masked_less_equal(x, value, copy=True): >>> a array([0, 1, 2, 3]) >>> ma.masked_less_equal(a, 2) - masked_array(data = [-- -- -- 3], - mask = [ True True True False], - fill_value=999999) + masked_array(data=[--, --, --, 3], + mask=[ True, True, True, False], + fill_value=999999) """ return masked_where(less_equal(x, value), x, copy=copy) @@ -2069,9 +2064,9 @@ def masked_not_equal(x, value, copy=True): >>> a array([0, 1, 2, 3]) >>> ma.masked_not_equal(a, 2) - masked_array(data = [-- -- 2 --], - mask = [ True True False True], - fill_value=999999) + masked_array(data=[--, --, 2, --], + mask=[ True, True, False, True], + fill_value=999999) """ return masked_where(not_equal(x, value), x, copy=copy) @@ -2097,9 +2092,9 @@ def masked_equal(x, value, copy=True): >>> a array([0, 1, 2, 3]) >>> ma.masked_equal(a, 2) - masked_array(data = [0 1 -- 3], - mask = [False False True False], - fill_value=999999) + masked_array(data=[0, 1, --, 3], + mask=[False, False, True, False], + fill_value=2) """ output = masked_where(equal(x, value), x, copy=copy) @@ -2128,16 +2123,16 @@ def masked_inside(x, v1, v2, copy=True): >>> import numpy.ma as ma >>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1] >>> ma.masked_inside(x, -0.3, 0.3) - masked_array(data = [0.31 1.2 -- -- -0.4 -1.1], - mask = [False False True True False False], - fill_value=1e+20) + masked_array(data=[0.31, 1.2, --, --, -0.4, -1.1], + mask=[False, False, True, True, False, False], + fill_value=1e+20) The order of `v1` and `v2` doesn't matter. >>> ma.masked_inside(x, 0.3, -0.3) - masked_array(data = [0.31 1.2 -- -- -0.4 -1.1], - mask = [False False True True False False], - fill_value=1e+20) + masked_array(data=[0.31, 1.2, --, --, -0.4, -1.1], + mask=[False, False, True, True, False, False], + fill_value=1e+20) """ if v2 < v1: @@ -2168,16 +2163,16 @@ def masked_outside(x, v1, v2, copy=True): >>> import numpy.ma as ma >>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1] >>> ma.masked_outside(x, -0.3, 0.3) - masked_array(data = [-- -- 0.01 0.2 -- --], - mask = [ True True False False True True], - fill_value=1e+20) + masked_array(data=[--, --, 0.01, 0.2, --, --], + mask=[ True, True, False, False, True, True], + fill_value=1e+20) The order of `v1` and `v2` doesn't matter. >>> ma.masked_outside(x, 0.3, -0.3) - masked_array(data = [-- -- 0.01 0.2 -- --], - mask = [ True True False False True True], - fill_value=1e+20) + masked_array(data=[--, --, 0.01, 0.2, --, --], + mask=[ True, True, False, False, True, True], + fill_value=1e+20) """ if v2 < v1: @@ -2222,20 +2217,27 @@ def masked_object(x, value, copy=True, shrink=True): >>> food = np.array(['green_eggs', 'ham'], dtype=object) >>> # don't eat spoiled food >>> eat = ma.masked_object(food, 'green_eggs') - >>> print(eat) - [-- ham] + >>> eat + masked_array(data=[--, 'ham'], + mask=[ True, False], + fill_value='green_eggs', + dtype=object) >>> # plain ol` ham is boring >>> fresh_food = np.array(['cheese', 'ham', 'pineapple'], dtype=object) >>> eat = ma.masked_object(fresh_food, 'green_eggs') - >>> print(eat) - [cheese ham pineapple] + >>> eat + masked_array(data=['cheese', 'ham', 'pineapple'], + mask=False, + fill_value='green_eggs', + dtype=object) Note that `mask` is set to ``nomask`` if possible. >>> eat - masked_array(data = [cheese ham pineapple], - mask = False, - fill_value=?) + masked_array(data=['cheese', 'ham', 'pineapple'], + mask=False, + fill_value='green_eggs', + dtype=object) """ if isMaskedArray(x): @@ -2290,16 +2292,16 @@ def masked_values(x, value, rtol=1e-5, atol=1e-8, copy=True, shrink=True): >>> import numpy.ma as ma >>> x = np.array([1, 1.1, 2, 1.1, 3]) >>> ma.masked_values(x, 1.1) - masked_array(data = [1.0 -- 2.0 -- 3.0], - mask = [False True False True False], - fill_value=1.1) + masked_array(data=[1.0, --, 2.0, --, 3.0], + mask=[False, True, False, True, False], + fill_value=1.1) Note that `mask` is set to ``nomask`` if possible. >>> ma.masked_values(x, 1.5) - masked_array(data = [ 1. 1.1 2. 1.1 3. ], - mask = False, - fill_value=1.5) + masked_array(data=[1. , 1.1, 2. , 1.1, 3. ], + mask=False, + fill_value=1.5) For integers, the fill value will be different in general to the result of ``masked_equal``. @@ -2308,13 +2310,13 @@ def masked_values(x, value, rtol=1e-5, atol=1e-8, copy=True, shrink=True): >>> x array([0, 1, 2, 3, 4]) >>> ma.masked_values(x, 2) - masked_array(data = [0 1 -- 3 4], - mask = [False False True False False], - fill_value=2) + masked_array(data=[0, 1, --, 3, 4], + mask=[False, False, True, False, False], + fill_value=2) >>> ma.masked_equal(x, 2) - masked_array(data = [0 1 -- 3 4], - mask = [False False True False False], - fill_value=999999) + masked_array(data=[0, 1, --, 3, 4], + mask=[False, False, True, False, False], + fill_value=2) """ xnew = filled(x, value) @@ -2348,11 +2350,11 @@ def masked_invalid(a, copy=True): >>> a[2] = np.NaN >>> a[3] = np.PINF >>> a - array([ 0., 1., NaN, Inf, 4.]) + array([ 0., 1., nan, inf, 4.]) >>> ma.masked_invalid(a) - masked_array(data = [0.0 1.0 -- -- 4.0], - mask = [False False True True False], - fill_value=1e+20) + masked_array(data=[0.0, 1.0, --, --, 4.0], + mask=[False, False, True, True, False], + fill_value=1e+20) """ a = np.array(a, copy=copy, subok=True) @@ -2513,7 +2515,7 @@ def flatten_structured_array(a): -------- >>> ndtype = [('a', int), ('b', float)] >>> a = np.array([(1, 1), (2, 2)], dtype=ndtype) - >>> flatten_structured_array(a) + >>> np.ma.flatten_structured_array(a) array([[1., 1.], [2., 2.]]) @@ -2684,9 +2686,7 @@ class MaskedIterator(object): >>> fl.next() 3 >>> fl.next() - masked_array(data = --, - mask = True, - fill_value = 1e+20) + masked >>> fl.next() Traceback (most recent call last): File "<stdin>", line 1, in <module> @@ -3551,6 +3551,11 @@ class MaskedArray(ndarray): array([[False, False], [False, False]]) >>> x.shrink_mask() + masked_array( + data=[[1, 2], + [3, 4]], + mask=False, + fill_value=999999) >>> x.mask False @@ -3639,7 +3644,7 @@ class MaskedArray(ndarray): -inf >>> x.set_fill_value(np.pi) >>> x.fill_value - 3.1415926535897931 + 3.1415926535897931 # may vary Reset to default: @@ -3688,9 +3693,9 @@ class MaskedArray(ndarray): -------- >>> x = np.ma.array([1,2,3,4,5], mask=[0,0,1,0,1], fill_value=-999) >>> x.filled() - array([1, 2, -999, 4, -999]) + array([ 1, 2, -999, 4, -999]) >>> type(x.filled()) - <type 'numpy.ndarray'> + <class 'numpy.ndarray'> Subclassing is preserved. This means that if, e.g., the data part of the masked array is a recarray, `filled` returns a recarray: @@ -3755,7 +3760,7 @@ class MaskedArray(ndarray): >>> x.compressed() array([0, 1]) >>> type(x.compressed()) - <type 'numpy.ndarray'> + <class 'numpy.ndarray'> """ data = ndarray.ravel(self._data) @@ -3797,25 +3802,29 @@ class MaskedArray(ndarray): Examples -------- >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) - >>> print(x) - [[1 -- 3] - [-- 5 --] - [7 -- 9]] + >>> x + masked_array( + data=[[1, --, 3], + [--, 5, --], + [7, --, 9]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) >>> x.compress([1, 0, 1]) - masked_array(data = [1 3], - mask = [False False], - fill_value=999999) + masked_array(data=[1, 3], + mask=[False, False], + fill_value=999999) >>> x.compress([1, 0, 1], axis=1) - masked_array(data = - [[1 3] - [-- --] - [7 9]], - mask = - [[False False] - [ True True] - [False False]], - fill_value=999999) + masked_array( + data=[[1, 3], + [--, --], + [7, 9]], + mask=[[False, False], + [ True, True], + [False, False]], + fill_value=999999) """ # Get the basic components @@ -4348,9 +4357,9 @@ class MaskedArray(ndarray): -------- >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False]) >>> x.get_imag() - masked_array(data = [1.0 -- 1.6], - mask = [False True False], - fill_value = 1e+20) + masked_array(data=[1.0, --, 1.6], + mask=[False, True, False], + fill_value=1e+20) """ result = self._data.imag.view(type(self)) @@ -4383,9 +4392,9 @@ class MaskedArray(ndarray): -------- >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False]) >>> x.get_real() - masked_array(data = [1.0 -- 3.45], - mask = [False True False], - fill_value = 1e+20) + masked_array(data=[1.0, --, 3.45], + mask=[False, True, False], + fill_value=1e+20) """ result = self._data.real.view(type(self)) @@ -4431,13 +4440,12 @@ class MaskedArray(ndarray): >>> a = ma.arange(6).reshape((2, 3)) >>> a[1, :] = ma.masked >>> a - masked_array(data = - [[0 1 2] - [-- -- --]], - mask = - [[False False False] - [ True True True]], - fill_value = 999999) + masked_array( + data=[[0, 1, 2], + [--, --, --]], + mask=[[False, False, False], + [ True, True, True]], + fill_value=999999) >>> a.count() 3 @@ -4522,12 +4530,20 @@ class MaskedArray(ndarray): Examples -------- >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) - >>> print(x) - [[1 -- 3] - [-- 5 --] - [7 -- 9]] - >>> print(x.ravel()) - [1 -- 3 -- 5 -- 7 -- 9] + >>> x + masked_array( + data=[[1, --, 3], + [--, 5, --], + [7, --, 9]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + >>> x.ravel() + masked_array(data=[1, --, 3, --, 5, --, 7, --, 9], + mask=[False, True, False, True, False, True, False, True, + False], + fill_value=999999) """ r = ndarray.ravel(self._data, order=order).view(type(self)) @@ -4576,15 +4592,25 @@ class MaskedArray(ndarray): Examples -------- >>> x = np.ma.array([[1,2],[3,4]], mask=[1,0,0,1]) - >>> print(x) - [[-- 2] - [3 --]] + >>> x + masked_array( + data=[[--, 2], + [3, --]], + mask=[[ True, False], + [False, True]], + fill_value=999999) >>> x = x.reshape((4,1)) - >>> print(x) - [[--] - [2] - [3] - [--]] + >>> x + masked_array( + data=[[--], + [2], + [3], + [--]], + mask=[[ True], + [False], + [False], + [ True]], + fill_value=999999) """ kwargs.update(order=kwargs.get('order', 'C')) @@ -4641,21 +4667,36 @@ class MaskedArray(ndarray): Examples -------- >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) - >>> print(x) - [[1 -- 3] - [-- 5 --] - [7 -- 9]] + >>> x + masked_array( + data=[[1, --, 3], + [--, 5, --], + [7, --, 9]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) >>> x.put([0,4,8],[10,20,30]) - >>> print(x) - [[10 -- 3] - [-- 20 --] - [7 -- 30]] + >>> x + masked_array( + data=[[10, --, 3], + [--, 20, --], + [7, --, 30]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) >>> x.put(4,999) - >>> print(x) - [[10 -- 3] - [-- 999 --] - [7 -- 30]] + >>> x + masked_array( + data=[[10, --, 3], + [--, 999, --], + [7, --, 30]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) """ # Hard mask: Get rid of the values/indices that fall on masked data @@ -4695,14 +4736,14 @@ class MaskedArray(ndarray): -------- >>> x = np.ma.array([1, 2, 3], mask=[0, 1, 1]) >>> x.ids() - (166670640, 166659832) + (166670640, 166659832) # may vary If the array has no mask, the address of `nomask` is returned. This address is typically not close to the data in memory: >>> x = np.ma.array([1, 2, 3]) >>> x.ids() - (166691080, 3083169284L) + (166691080, 3083169284L) # may vary """ if self._mask is nomask: @@ -4851,13 +4892,12 @@ class MaskedArray(ndarray): >>> import numpy.ma as ma >>> x = ma.array(np.eye(3)) >>> x - masked_array(data = - [[ 1. 0. 0.] - [ 0. 1. 0.] - [ 0. 0. 1.]], - mask = - False, - fill_value=1e+20) + masked_array( + data=[[1., 0., 0.], + [0., 1., 0.], + [0., 0., 1.]], + mask=False, + fill_value=1e+20) >>> x.nonzero() (array([0, 1, 2]), array([0, 1, 2])) @@ -4865,15 +4905,14 @@ class MaskedArray(ndarray): >>> x[1, 1] = ma.masked >>> x - masked_array(data = - [[1.0 0.0 0.0] - [0.0 -- 0.0] - [0.0 0.0 1.0]], - mask = - [[False False False] - [False True False] - [False False False]], - fill_value=1e+20) + masked_array( + data=[[1.0, 0.0, 0.0], + [0.0, --, 0.0], + [0.0, 0.0, 1.0]], + mask=[[False, False, False], + [False, True, False], + [False, False, False]], + fill_value=1e+20) >>> x.nonzero() (array([0, 2]), array([0, 2])) @@ -4890,13 +4929,12 @@ class MaskedArray(ndarray): >>> a = ma.array([[1,2,3],[4,5,6],[7,8,9]]) >>> a > 3 - masked_array(data = - [[False False False] - [ True True True] - [ True True True]], - mask = - False, - fill_value=999999) + masked_array( + data=[[False, False, False], + [ True, True, True], + [ True, True, True]], + mask=False, + fill_value=True) >>> ma.nonzero(a > 3) (array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2])) @@ -4978,18 +5016,27 @@ class MaskedArray(ndarray): Examples -------- >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) - >>> print(x) - [[1 -- 3] - [-- 5 --] - [7 -- 9]] - >>> print(x.sum()) + >>> x + masked_array( + data=[[1, --, 3], + [--, 5, --], + [7, --, 9]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + >>> x.sum() 25 - >>> print(x.sum(axis=1)) - [4 5 16] - >>> print(x.sum(axis=0)) - [8 5 12] + >>> x.sum(axis=1) + masked_array(data=[4, 5, 16], + mask=[False, False, False], + fill_value=999999) + >>> x.sum(axis=0) + masked_array(data=[8, 5, 12], + mask=[False, False, False], + fill_value=999999) >>> print(type(x.sum(axis=0, dtype=np.int64)[0])) - <type 'numpy.int64'> + <class 'numpy.int64'> """ kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} @@ -5040,8 +5087,11 @@ class MaskedArray(ndarray): Examples -------- >>> marr = np.ma.array(np.arange(10), mask=[0,0,0,1,1,1,0,0,0,0]) - >>> print(marr.cumsum()) - [0 1 3 -- -- -- 9 16 24 33] + >>> marr.cumsum() + masked_array(data=[0, 1, 3, --, --, --, 9, 16, 24, 33], + mask=[False, False, False, True, True, True, False, False, + False, False], + fill_value=999999) """ result = self.filled(0).cumsum(axis=axis, dtype=dtype, out=out) @@ -5145,9 +5195,9 @@ class MaskedArray(ndarray): -------- >>> a = np.ma.array([1,2,3], mask=[False, False, True]) >>> a - masked_array(data = [1 2 --], - mask = [False False True], - fill_value = 999999) + masked_array(data=[1, 2, --], + mask=[False, False, True], + fill_value=999999) >>> a.mean() 1.5 @@ -5200,9 +5250,9 @@ class MaskedArray(ndarray): -------- >>> a = np.ma.array([1,2,3]) >>> a.anom() - masked_array(data = [-1. 0. 1.], - mask = False, - fill_value = 1e+20) + masked_array(data=[-1., 0., 1.], + mask=False, + fill_value=1e+20) """ m = self.mean(axis, dtype) @@ -5382,9 +5432,9 @@ class MaskedArray(ndarray): -------- >>> a = np.ma.array([3,2,1], mask=[False, False, True]) >>> a - masked_array(data = [3 2 --], - mask = [False False True], - fill_value = 999999) + masked_array(data=[3, 2, --], + mask=[False, False, True], + fill_value=999999) >>> a.argsort() array([1, 0, 2]) @@ -5432,15 +5482,19 @@ class MaskedArray(ndarray): Examples -------- - >>> x = np.ma.array(arange(4), mask=[1,1,0,0]) + >>> x = np.ma.array(np.arange(4), mask=[1,1,0,0]) >>> x.shape = (2,2) - >>> print(x) - [[-- --] - [2 3]] - >>> print(x.argmin(axis=0, fill_value=-1)) - [0 0] - >>> print(x.argmin(axis=0, fill_value=9)) - [1 1] + >>> x + masked_array( + data=[[--, --], + [2, 3]], + mask=[[ True, True], + [False, False]], + fill_value=999999) + >>> x.argmin(axis=0, fill_value=-1) + array([0, 0]) + >>> x.argmin(axis=0, fill_value=9) + array([1, 1]) """ if fill_value is None: @@ -5531,23 +5585,29 @@ class MaskedArray(ndarray): Examples -------- - >>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) + >>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) >>> # Default >>> a.sort() - >>> print(a) - [1 3 5 -- --] + >>> a + masked_array(data=[1, 3, 5, --, --], + mask=[False, False, False, True, True], + fill_value=999999) - >>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) + >>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) >>> # Put missing values in the front >>> a.sort(endwith=False) - >>> print(a) - [-- -- 1 3 5] + >>> a + masked_array(data=[--, --, 1, 3, 5], + mask=[ True, True, False, False, False], + fill_value=999999) - >>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) + >>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0]) >>> # fill_value takes over endwith >>> a.sort(endwith=False, fill_value=3) - >>> print(a) - [1 -- -- 3 5] + >>> a + masked_array(data=[1, --, --, 3, 5], + mask=[False, True, True, False, False], + fill_value=999999) """ if self._mask is nomask: @@ -5653,27 +5713,36 @@ class MaskedArray(ndarray): Examples -------- >>> x = np.ma.array(np.arange(6), mask=[0 ,1, 0, 0, 0 ,1]).reshape(3, 2) - >>> print(x) - [[0 --] - [2 3] - [4 --]] + >>> x + masked_array( + data=[[0, --], + [2, 3], + [4, --]], + mask=[[False, True], + [False, False], + [False, True]], + fill_value=999999) >>> x.mini() - 0 + masked_array(data=0, + mask=False, + fill_value=999999) >>> x.mini(axis=0) - masked_array(data = [0 3], - mask = [False False], - fill_value = 999999) - >>> print(x.mini(axis=1)) - [0 2 4] + masked_array(data=[0, 3], + mask=[False, False], + fill_value=999999) + >>> x.mini(axis=1) + masked_array(data=[0, 2, 4], + mask=[False, False, False], + fill_value=999999) There is a small difference between `mini` and `min`: >>> x[:,1].mini(axis=0) - masked_array(data = --, - mask = True, - fill_value = 999999) + masked_array(data=3, + mask=False, + fill_value=999999) >>> x[:,1].min(axis=0) - masked + 3 """ # 2016-04-13, 1.13.0, gh-8764 @@ -5926,7 +5995,7 @@ class MaskedArray(ndarray): -------- >>> x = np.ma.array(np.array([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]]) >>> x.tobytes() - '\\x01\\x00\\x00\\x00?B\\x0f\\x00?B\\x0f\\x00\\x04\\x00\\x00\\x00' + b'\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00?B\\x0f\\x00\\x00\\x00\\x00\\x00?B\\x0f\\x00\\x00\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x00\\x00\\x00\\x00' """ return self.filled(fill_value).tobytes(order=order) @@ -5974,14 +6043,20 @@ class MaskedArray(ndarray): Examples -------- >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) - >>> print(x) - [[1 -- 3] - [-- 5 --] - [7 -- 9]] - >>> print(x.toflex()) - [[(1, False) (2, True) (3, False)] - [(4, True) (5, False) (6, True)] - [(7, False) (8, True) (9, False)]] + >>> x + masked_array( + data=[[1, --, 3], + [--, 5, --], + [7, --, 9]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) + >>> x.toflex() + array([[(1, False), (2, True), (3, False)], + [(4, True), (5, False), (6, True)], + [(7, False), (8, True), (9, False)]], + dtype=[('_data', '<i8'), ('_mask', '?')]) """ # Get the basic dtype. @@ -6228,15 +6303,14 @@ def isMaskedArray(x): [ 0., 0., 1.]]) >>> m = ma.masked_values(a, 0) >>> m - masked_array(data = - [[1.0 -- --] - [-- 1.0 --] - [-- -- 1.0]], - mask = - [[False True True] - [ True False True] - [ True True False]], - fill_value=0.0) + masked_array( + data=[[1.0, --, --], + [--, 1.0, --], + [--, --, 1.0]], + mask=[[False, True, True], + [ True, False, True], + [ True, True, False]], + fill_value=0.0) >>> ma.isMaskedArray(a) False >>> ma.isMaskedArray(m) @@ -6400,16 +6474,16 @@ def is_masked(x): >>> import numpy.ma as ma >>> x = ma.masked_equal([0, 1, 0, 2, 3], 0) >>> x - masked_array(data = [-- 1 -- 2 3], - mask = [ True False True False False], - fill_value=999999) + masked_array(data=[--, 1, --, 2, 3], + mask=[ True, False, True, False, False], + fill_value=0) >>> ma.is_masked(x) True >>> x = ma.masked_equal([0, 1, 0, 2, 3], 42) >>> x - masked_array(data = [0 1 0 2 3], - mask = False, - fill_value=999999) + masked_array(data=[0, 1, 0, 2, 3], + mask=False, + fill_value=42) >>> ma.is_masked(x) False @@ -6759,17 +6833,17 @@ def concatenate(arrays, axis=0): >>> a[1] = ma.masked >>> b = ma.arange(2, 5) >>> a - masked_array(data = [0 -- 2], - mask = [False True False], - fill_value = 999999) + masked_array(data=[0, --, 2], + mask=[False, True, False], + fill_value=999999) >>> b - masked_array(data = [2 3 4], - mask = False, - fill_value = 999999) + masked_array(data=[2, 3, 4], + mask=False, + fill_value=999999) >>> ma.concatenate([a, b]) - masked_array(data = [0 -- 2 2 3 4], - mask = [False True False False False False], - fill_value = 999999) + masked_array(data=[0, --, 2, 2, 3, 4], + mask=[False, True, False, False, False, False], + fill_value=999999) """ d = np.concatenate([getdata(a) for a in arrays], axis) @@ -6924,24 +6998,21 @@ def transpose(a, axes=None): >>> import numpy.ma as ma >>> x = ma.arange(4).reshape((2,2)) >>> x[1, 1] = ma.masked - >>>> x - masked_array(data = - [[0 1] - [2 --]], - mask = - [[False False] - [False True]], - fill_value = 999999) + >>> x + masked_array( + data=[[0, 1], + [2, --]], + mask=[[False, False], + [False, True]], + fill_value=999999) >>> ma.transpose(x) - masked_array(data = - [[0 2] - [1 --]], - mask = - [[False False] - [False True]], - fill_value = 999999) - + masked_array( + data=[[0, 2], + [1, --]], + mask=[[False, False], + [False, True]], + fill_value=999999) """ # We can't use 'frommethod', as 'transpose' doesn't take keywords try: @@ -6988,39 +7059,39 @@ def resize(x, new_shape): >>> a = ma.array([[1, 2] ,[3, 4]]) >>> a[0, 1] = ma.masked >>> a - masked_array(data = - [[1 --] - [3 4]], - mask = - [[False True] - [False False]], - fill_value = 999999) + masked_array( + data=[[1, --], + [3, 4]], + mask=[[False, True], + [False, False]], + fill_value=999999) >>> np.resize(a, (3, 3)) - array([[1, 2, 3], - [4, 1, 2], - [3, 4, 1]]) + masked_array( + data=[[1, 2, 3], + [4, 1, 2], + [3, 4, 1]], + mask=False, + fill_value=999999) >>> ma.resize(a, (3, 3)) - masked_array(data = - [[1 -- 3] - [4 1 --] - [3 4 1]], - mask = - [[False True False] - [False False True] - [False False False]], - fill_value = 999999) + masked_array( + data=[[1, --, 3], + [4, 1, --], + [3, 4, 1]], + mask=[[False, True, False], + [False, False, True], + [False, False, False]], + fill_value=999999) A MaskedArray is always returned, regardless of the input type. >>> a = np.array([[1, 2] ,[3, 4]]) >>> ma.resize(a, (3, 3)) - masked_array(data = - [[1 2 3] - [4 1 2] - [3 4 1]], - mask = - False, - fill_value = 999999) + masked_array( + data=[[1, 2, 3], + [4, 1, 2], + [3, 4, 1]], + mask=False, + fill_value=999999) """ # We can't use _frommethods here, as N.resize is notoriously whiny. @@ -7111,14 +7182,24 @@ def where(condition, x=_NoValue, y=_NoValue): >>> x = np.ma.array(np.arange(9.).reshape(3, 3), mask=[[0, 1, 0], ... [1, 0, 1], ... [0, 1, 0]]) - >>> print(x) - [[0.0 -- 2.0] - [-- 4.0 --] - [6.0 -- 8.0]] - >>> print(np.ma.where(x > 5, x, -3.1416)) - [[-3.1416 -- -3.1416] - [-- -3.1416 --] - [6.0 -- 8.0]] + >>> x + masked_array( + data=[[0.0, --, 2.0], + [--, 4.0, --], + [6.0, --, 8.0]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=1e+20) + >>> np.ma.where(x > 5, x, -3.1416) + masked_array( + data=[[-3.1416, --, -3.1416], + [--, -3.1416, --], + [6.0, --, 8.0]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=1e+20) """ @@ -7198,9 +7279,9 @@ def choose(indices, choices, out=None, mode='raise'): >>> choice = np.array([[1,1,1], [2,2,2], [3,3,3]]) >>> a = np.array([2, 1, 0]) >>> np.ma.choose(a, choice) - masked_array(data = [3 2 1], - mask = False, - fill_value=999999) + masked_array(data=[3, 2, 1], + mask=False, + fill_value=999999) """ def fmask(x): @@ -7323,25 +7404,23 @@ def mask_rowcols(a, axis=None): [0, 0, 0]]) >>> a = ma.masked_equal(a, 1) >>> a - masked_array(data = - [[0 0 0] - [0 -- 0] - [0 0 0]], - mask = - [[False False False] - [False True False] - [False False False]], - fill_value=999999) + masked_array( + data=[[0, 0, 0], + [0, --, 0], + [0, 0, 0]], + mask=[[False, False, False], + [False, True, False], + [False, False, False]], + fill_value=1) >>> ma.mask_rowcols(a) - masked_array(data = - [[0 -- 0] - [-- -- --] - [0 -- 0]], - mask = - [[False True False] - [ True True True] - [False True False]], - fill_value=999999) + masked_array( + data=[[0, --, 0], + [--, --, --], + [0, --, 0]], + mask=[[False, True, False], + [ True, True, True], + [False, True, False]], + fill_value=1) """ a = array(a, subok=False) @@ -7402,24 +7481,22 @@ def dot(a, b, strict=False, out=None): Examples -------- - >>> a = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[1, 0, 0], [0, 0, 0]]) - >>> b = ma.array([[1, 2], [3, 4], [5, 6]], mask=[[1, 0], [0, 0], [0, 0]]) + >>> a = np.ma.array([[1, 2, 3], [4, 5, 6]], mask=[[1, 0, 0], [0, 0, 0]]) + >>> b = np.ma.array([[1, 2], [3, 4], [5, 6]], mask=[[1, 0], [0, 0], [0, 0]]) >>> np.ma.dot(a, b) - masked_array(data = - [[21 26] - [45 64]], - mask = - [[False False] - [False False]], - fill_value = 999999) + masked_array( + data=[[21, 26], + [45, 64]], + mask=[[False, False], + [False, False]], + fill_value=999999) >>> np.ma.dot(a, b, strict=True) - masked_array(data = - [[-- --] - [-- 64]], - mask = - [[ True True] - [ True False]], - fill_value = 999999) + masked_array( + data=[[--, --], + [--, 64]], + mask=[[ True, True], + [ True, False]], + fill_value=999999) """ # !!!: Works only with 2D arrays. There should be a way to get it to run @@ -7587,18 +7664,18 @@ def allequal(a, b, fill_value=True): Examples -------- - >>> a = ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1]) + >>> a = np.ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1]) >>> a - masked_array(data = [10000000000.0 1e-07 --], - mask = [False False True], - fill_value=1e+20) + masked_array(data=[10000000000.0, 1e-07, --], + mask=[False, False, True], + fill_value=1e+20) - >>> b = array([1e10, 1e-7, -42.0]) + >>> b = np.array([1e10, 1e-7, -42.0]) >>> b array([ 1.00000000e+10, 1.00000000e-07, -4.20000000e+01]) - >>> ma.allequal(a, b, fill_value=False) + >>> np.ma.allequal(a, b, fill_value=False) False - >>> ma.allequal(a, b) + >>> np.ma.allequal(a, b) True """ @@ -7664,29 +7741,29 @@ def allclose(a, b, masked_equal=True, rtol=1e-5, atol=1e-8): Examples -------- - >>> a = ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1]) + >>> a = np.ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1]) >>> a - masked_array(data = [10000000000.0 1e-07 --], - mask = [False False True], - fill_value = 1e+20) - >>> b = ma.array([1e10, 1e-8, -42.0], mask=[0, 0, 1]) - >>> ma.allclose(a, b) + masked_array(data=[10000000000.0, 1e-07, --], + mask=[False, False, True], + fill_value=1e+20) + >>> b = np.ma.array([1e10, 1e-8, -42.0], mask=[0, 0, 1]) + >>> np.ma.allclose(a, b) False - >>> a = ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1]) - >>> b = ma.array([1.00001e10, 1e-9, -42.0], mask=[0, 0, 1]) - >>> ma.allclose(a, b) + >>> a = np.ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1]) + >>> b = np.ma.array([1.00001e10, 1e-9, -42.0], mask=[0, 0, 1]) + >>> np.ma.allclose(a, b) True - >>> ma.allclose(a, b, masked_equal=False) + >>> np.ma.allclose(a, b, masked_equal=False) False Masked values are not compared directly. - >>> a = ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1]) - >>> b = ma.array([1.00001e10, 1e-9, 42.0], mask=[0, 0, 1]) - >>> ma.allclose(a, b) + >>> a = np.ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1]) + >>> b = np.ma.array([1.00001e10, 1e-9, 42.0], mask=[0, 0, 1]) + >>> np.ma.allclose(a, b) True - >>> ma.allclose(a, b, masked_equal=False) + >>> np.ma.allclose(a, b, masked_equal=False) False """ @@ -7753,15 +7830,14 @@ def asarray(a, dtype=None, order=None): -------- >>> x = np.arange(10.).reshape(2, 5) >>> x - array([[ 0., 1., 2., 3., 4.], - [ 5., 6., 7., 8., 9.]]) + array([[0., 1., 2., 3., 4.], + [5., 6., 7., 8., 9.]]) >>> np.ma.asarray(x) - masked_array(data = - [[ 0. 1. 2. 3. 4.] - [ 5. 6. 7. 8. 9.]], - mask = - False, - fill_value = 1e+20) + masked_array( + data=[[0., 1., 2., 3., 4.], + [5., 6., 7., 8., 9.]], + mask=False, + fill_value=1e+20) >>> type(np.ma.asarray(x)) <class 'numpy.ma.core.MaskedArray'> @@ -7801,15 +7877,14 @@ def asanyarray(a, dtype=None): -------- >>> x = np.arange(10.).reshape(2, 5) >>> x - array([[ 0., 1., 2., 3., 4.], - [ 5., 6., 7., 8., 9.]]) + array([[0., 1., 2., 3., 4.], + [5., 6., 7., 8., 9.]]) >>> np.ma.asanyarray(x) - masked_array(data = - [[ 0. 1. 2. 3. 4.] - [ 5. 6. 7. 8. 9.]], - mask = - False, - fill_value = 1e+20) + masked_array( + data=[[0., 1., 2., 3., 4.], + [5., 6., 7., 8., 9.]], + mask=False, + fill_value=1e+20) >>> type(np.ma.asanyarray(x)) <class 'numpy.ma.core.MaskedArray'> @@ -7953,39 +8028,38 @@ def fromflex(fxarray): >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[0] + [1, 0] * 4) >>> rec = x.toflex() >>> rec - array([[(0, False), (1, True), (2, False)], - [(3, True), (4, False), (5, True)], - [(6, False), (7, True), (8, False)]], - dtype=[('_data', '<i4'), ('_mask', '|b1')]) + array([[(0, False), (1, True), (2, False)], + [(3, True), (4, False), (5, True)], + [(6, False), (7, True), (8, False)]], + dtype=[('_data', '<i8'), ('_mask', '?')]) >>> x2 = np.ma.fromflex(rec) >>> x2 - masked_array(data = - [[0 -- 2] - [-- 4 --] - [6 -- 8]], - mask = - [[False True False] - [ True False True] - [False True False]], - fill_value = 999999) + masked_array( + data=[[0, --, 2], + [--, 4, --], + [6, --, 8]], + mask=[[False, True, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) Extra fields can be present in the structured array but are discarded: >>> dt = [('_data', '<i4'), ('_mask', '|b1'), ('field3', '<f4')] >>> rec2 = np.zeros((2, 2), dtype=dt) >>> rec2 - array([[(0, False, 0.0), (0, False, 0.0)], - [(0, False, 0.0), (0, False, 0.0)]], - dtype=[('_data', '<i4'), ('_mask', '|b1'), ('field3', '<f4')]) + array([[(0, False, 0.), (0, False, 0.)], + [(0, False, 0.), (0, False, 0.)]], + dtype=[('_data', '<i4'), ('_mask', '?'), ('field3', '<f4')]) >>> y = np.ma.fromflex(rec2) >>> y - masked_array(data = - [[0 0] - [0 0]], - mask = - [[False False] - [False False]], - fill_value = 999999) + masked_array( + data=[[0, 0], + [0, 0]], + mask=[[False, False], + [False, False]], + fill_value=999999, + dtype=int32) """ return masked_array(fxarray['_data'], mask=fxarray['_mask']) @@ -8086,7 +8160,10 @@ def append(a, b, axis=None): >>> import numpy.ma as ma >>> a = ma.masked_values([1, 2, 3], 2) >>> b = ma.masked_values([[4, 5, 6], [7, 8, 9]], 7) - >>> print(ma.append(a, b)) - [1 -- 3 4 5 6 -- 8 9] + >>> ma.append(a, b) + masked_array(data=[1, --, 3, 4, 5, 6, --, 8, 9], + mask=[False, True, False, False, False, False, True, False, + False], + fill_value=999999) """ return concatenate([a, b], axis) diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index 3be4d3625..2e3b84e1c 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -81,15 +81,14 @@ def count_masked(arr, axis=None): >>> a[1, 2] = ma.masked >>> a[2, 1] = ma.masked >>> a - masked_array(data = - [[0 1 2] - [-- 4 --] - [6 -- 8]], - mask = - [[False False False] - [ True False True] - [False True False]], - fill_value=999999) + masked_array( + data=[[0, 1, 2], + [--, 4, --], + [6, --, 8]], + mask=[[False, False, False], + [ True, False, True], + [False, True, False]], + fill_value=999999) >>> ma.count_masked(a) 3 @@ -132,15 +131,15 @@ def masked_all(shape, dtype=float): -------- >>> import numpy.ma as ma >>> ma.masked_all((3, 3)) - masked_array(data = - [[-- -- --] - [-- -- --] - [-- -- --]], - mask = - [[ True True True] - [ True True True] - [ True True True]], - fill_value=1e+20) + masked_array( + data=[[--, --, --], + [--, --, --], + [--, --, --]], + mask=[[ True, True, True], + [ True, True, True], + [ True, True, True]], + fill_value=1e+20, + dtype=float64) The `dtype` parameter defines the underlying data type. @@ -188,16 +187,16 @@ def masked_all_like(arr): >>> import numpy.ma as ma >>> arr = np.zeros((2, 3), dtype=np.float32) >>> arr - array([[ 0., 0., 0.], - [ 0., 0., 0.]], dtype=float32) + array([[0., 0., 0.], + [0., 0., 0.]], dtype=float32) >>> ma.masked_all_like(arr) - masked_array(data = - [[-- -- --] - [-- -- --]], - mask = - [[ True True True] - [ True True True]], - fill_value=1e+20) + masked_array( + data=[[--, --, --], + [--, --, --]], + mask=[[ True, True, True], + [ True, True, True]], + fill_value=1e+20, + dtype=float32) The dtype of the masked array matches the dtype of `arr`. @@ -492,28 +491,45 @@ if apply_over_axes.__doc__ is not None: Examples -------- - >>> a = ma.arange(24).reshape(2,3,4) - >>> a[:,0,1] = ma.masked - >>> a[:,1,:] = ma.masked - >>> print(a) - [[[0 -- 2 3] - [-- -- -- --] - [8 9 10 11]] - - [[12 -- 14 15] - [-- -- -- --] - [20 21 22 23]]] - >>> print(ma.apply_over_axes(ma.sum, a, [0,2])) - [[[46] - [--] - [124]]] + >>> a = np.ma.arange(24).reshape(2,3,4) + >>> a[:,0,1] = np.ma.masked + >>> a[:,1,:] = np.ma.masked + >>> a + masked_array( + data=[[[0, --, 2, 3], + [--, --, --, --], + [8, 9, 10, 11]], + [[12, --, 14, 15], + [--, --, --, --], + [20, 21, 22, 23]]], + mask=[[[False, True, False, False], + [ True, True, True, True], + [False, False, False, False]], + [[False, True, False, False], + [ True, True, True, True], + [False, False, False, False]]], + fill_value=999999) + >>> np.ma.apply_over_axes(np.ma.sum, a, [0,2]) + masked_array( + data=[[[46], + [--], + [124]]], + mask=[[[False], + [ True], + [False]]], + fill_value=999999) Tuple axis arguments to ufuncs are equivalent: - >>> print(ma.sum(a, axis=(0,2)).reshape((1,-1,1))) - [[[46] - [--] - [124]]] + >>> np.ma.sum(a, axis=(0,2)).reshape((1,-1,1)) + masked_array( + data=[[[46], + [--], + [124]]], + mask=[[[False], + [ True], + [False]]], + fill_value=999999) """ @@ -558,14 +574,19 @@ def average(a, axis=None, weights=None, returned=False): 1.25 >>> x = np.ma.arange(6.).reshape(3, 2) - >>> print(x) - [[ 0. 1.] - [ 2. 3.] - [ 4. 5.]] + >>> x + masked_array( + data=[[0., 1.], + [2., 3.], + [4., 5.]], + mask=False, + fill_value=1e+20) >>> avg, sumweights = np.ma.average(x, axis=0, weights=[1, 2, 3], ... returned=True) - >>> print(avg) - [2.66666666667 3.66666666667] + >>> avg + masked_array(data=[2.6666666666666665, 3.6666666666666665], + mask=[False, False], + fill_value=1e+20) """ a = asarray(a) @@ -676,9 +697,9 @@ def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): >>> np.ma.median(x) 2.5 >>> np.ma.median(x, axis=-1, overwrite_input=True) - masked_array(data = [ 2. 5.], - mask = False, - fill_value = 1e+20) + masked_array(data=[2.0, 5.0], + mask=[False, False], + fill_value=1e+20) """ if not hasattr(a, 'mask'): @@ -856,15 +877,14 @@ def compress_rowcols(x, axis=None): ... [1, 0, 0], ... [0, 0, 0]]) >>> x - masked_array(data = - [[-- 1 2] - [-- 4 5] - [6 7 8]], - mask = - [[ True False False] - [ True False False] - [False False False]], - fill_value = 999999) + masked_array( + data=[[--, 1, 2], + [--, 4, 5], + [6, 7, 8]], + mask=[[ True, False, False], + [ True, False, False], + [False, False, False]], + fill_value=999999) >>> np.ma.compress_rowcols(x) array([[7, 8]]) @@ -937,25 +957,24 @@ def mask_rows(a, axis=None): [0, 0, 0]]) >>> a = ma.masked_equal(a, 1) >>> a - masked_array(data = - [[0 0 0] - [0 -- 0] - [0 0 0]], - mask = - [[False False False] - [False True False] - [False False False]], - fill_value=999999) + masked_array( + data=[[0, 0, 0], + [0, --, 0], + [0, 0, 0]], + mask=[[False, False, False], + [False, True, False], + [False, False, False]], + fill_value=1) + >>> ma.mask_rows(a) - masked_array(data = - [[0 0 0] - [-- -- --] - [0 0 0]], - mask = - [[False False False] - [ True True True] - [False False False]], - fill_value=999999) + masked_array( + data=[[0, 0, 0], + [--, --, --], + [0, 0, 0]], + mask=[[False, False, False], + [ True, True, True], + [False, False, False]], + fill_value=1) """ return mask_rowcols(a, 0) @@ -982,25 +1001,23 @@ def mask_cols(a, axis=None): [0, 0, 0]]) >>> a = ma.masked_equal(a, 1) >>> a - masked_array(data = - [[0 0 0] - [0 -- 0] - [0 0 0]], - mask = - [[False False False] - [False True False] - [False False False]], - fill_value=999999) + masked_array( + data=[[0, 0, 0], + [0, --, 0], + [0, 0, 0]], + mask=[[False, False, False], + [False, True, False], + [False, False, False]], + fill_value=1) >>> ma.mask_cols(a) - masked_array(data = - [[0 -- 0] - [0 -- 0] - [0 -- 0]], - mask = - [[False True False] - [False True False] - [False True False]], - fill_value=999999) + masked_array( + data=[[0, --, 0], + [0, --, 0], + [0, --, 0]], + mask=[[False, True, False], + [False, True, False], + [False, True, False]], + fill_value=1) """ return mask_rowcols(a, 1) @@ -1078,12 +1095,12 @@ def intersect1d(ar1, ar2, assume_unique=False): Examples -------- - >>> x = array([1, 3, 3, 3], mask=[0, 0, 0, 1]) - >>> y = array([3, 1, 1, 1], mask=[0, 0, 0, 1]) - >>> intersect1d(x, y) - masked_array(data = [1 3 --], - mask = [False False True], - fill_value = 999999) + >>> x = np.ma.array([1, 3, 3, 3], mask=[0, 0, 0, 1]) + >>> y = np.ma.array([3, 1, 1, 1], mask=[0, 0, 0, 1]) + >>> np.ma.intersect1d(x, y) + masked_array(data=[1, 3, --], + mask=[False, False, True], + fill_value=999999) """ if assume_unique: @@ -1216,9 +1233,9 @@ def setdiff1d(ar1, ar2, assume_unique=False): -------- >>> x = np.ma.array([1, 2, 3, 4], mask=[0, 1, 0, 1]) >>> np.ma.setdiff1d(x, [1, 2]) - masked_array(data = [3 --], - mask = [False True], - fill_value = 999999) + masked_array(data=[3, --], + mask=[False, True], + fill_value=999999) """ if assume_unique: @@ -1483,7 +1500,9 @@ class mr_class(MAxisConcatenator): Examples -------- >>> np.ma.mr_[np.ma.array([1,2,3]), 0, 0, np.ma.array([4,5,6])] - array([1, 2, 3, 0, 0, 4, 5, 6]) + masked_array(data=[1, 2, 3, ..., 4, 5, 6], + mask=False, + fill_value=999999) """ def __init__(self): @@ -1524,19 +1543,19 @@ def flatnotmasked_edges(a): Examples -------- >>> a = np.ma.arange(10) - >>> flatnotmasked_edges(a) - [0,-1] + >>> np.ma.flatnotmasked_edges(a) + array([0, 9]) >>> mask = (a < 3) | (a > 8) | (a == 5) >>> a[mask] = np.ma.masked >>> np.array(a[~a.mask]) array([3, 4, 6, 7, 8]) - >>> flatnotmasked_edges(a) + >>> np.ma.flatnotmasked_edges(a) array([3, 8]) >>> a[:] = np.ma.masked - >>> print(flatnotmasked_edges(ma)) + >>> print(np.ma.flatnotmasked_edges(a)) None """ @@ -1588,7 +1607,7 @@ def notmasked_edges(a, axis=None): >>> np.array(am[~am.mask]) array([0, 1, 2, 3, 6]) - >>> np.ma.notmasked_edges(ma) + >>> np.ma.notmasked_edges(am) array([0, 6]) """ @@ -1709,15 +1728,11 @@ def notmasked_contiguous(a, axis=None): [slice(0, 1, None), slice(2, 4, None), slice(7, 9, None), slice(11, 12, None)] >>> np.ma.notmasked_contiguous(ma, axis=0) - [[slice(0, 1, None), slice(2, 3, None)], # column broken into two segments - [], # fully masked column - [slice(0, 1, None)], - [slice(0, 3, None)]] + [[slice(0, 1, None), slice(2, 3, None)], [], [slice(0, 1, None)], [slice(0, 3, None)]] >>> np.ma.notmasked_contiguous(ma, axis=1) - [[slice(0, 1, None), slice(2, 4, None)], # row broken into two segments - [slice(3, 4, None)], - [slice(0, 1, None), slice(3, 4, None)]] + [[slice(0, 1, None), slice(2, 4, None)], [slice(3, 4, None)], [slice(0, 1, None), slice(3, 4, None)]] + """ a = asarray(a) nd = a.ndim diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 2775b11ec..e0dbf1b1a 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -2401,9 +2401,9 @@ class TestMaskedArrayInPlaceArithmetics(object): assert_equal(xm, y + 1) (x, _, xm) = self.floatdata - id1 = x.data.ctypes._data + id1 = x.data.ctypes.data x += 1. - assert_(id1 == x.data.ctypes._data) + assert_(id1 == x.data.ctypes.data) assert_equal(x, y + 1.) def test_inplace_addition_array(self): diff --git a/numpy/matlib.py b/numpy/matlib.py index 004e5f0c8..9e115943a 100644 --- a/numpy/matlib.py +++ b/numpy/matlib.py @@ -39,11 +39,11 @@ def empty(shape, dtype=None, order='C'): -------- >>> import numpy.matlib >>> np.matlib.empty((2, 2)) # filled with random data - matrix([[ 6.76425276e-320, 9.79033856e-307], - [ 7.39337286e-309, 3.22135945e-309]]) #random + matrix([[ 6.76425276e-320, 9.79033856e-307], # random + [ 7.39337286e-309, 3.22135945e-309]]) >>> np.matlib.empty((2, 2), dtype=int) - matrix([[ 6600475, 0], - [ 6586976, 22740995]]) #random + matrix([[ 6600475, 0], # random + [ 6586976, 22740995]]) """ return ndarray.__new__(matrix, shape, dtype, order=order) @@ -82,11 +82,11 @@ def ones(shape, dtype=None, order='C'): Examples -------- >>> np.matlib.ones((2,3)) - matrix([[ 1., 1., 1.], - [ 1., 1., 1.]]) + matrix([[1., 1., 1.], + [1., 1., 1.]]) >>> np.matlib.ones(2) - matrix([[ 1., 1.]]) + matrix([[1., 1.]]) """ a = ndarray.__new__(matrix, shape, dtype, order=order) @@ -126,11 +126,11 @@ def zeros(shape, dtype=None, order='C'): -------- >>> import numpy.matlib >>> np.matlib.zeros((2, 3)) - matrix([[ 0., 0., 0.], - [ 0., 0., 0.]]) + matrix([[0., 0., 0.], + [0., 0., 0.]]) >>> np.matlib.zeros(2) - matrix([[ 0., 0.]]) + matrix([[0., 0.]]) """ a = ndarray.__new__(matrix, shape, dtype, order=order) @@ -210,9 +210,9 @@ def eye(n,M=None, k=0, dtype=float, order='C'): -------- >>> import numpy.matlib >>> np.matlib.eye(3, k=1, dtype=float) - matrix([[ 0., 1., 0.], - [ 0., 0., 1.], - [ 0., 0., 0.]]) + matrix([[0., 1., 0.], + [0., 0., 1.], + [0., 0., 0.]]) """ return asmatrix(np.eye(n, M=M, k=k, dtype=dtype, order=order)) @@ -243,19 +243,20 @@ def rand(*args): Examples -------- + >>> np.random.seed(123) >>> import numpy.matlib >>> np.matlib.rand(2, 3) - matrix([[ 0.68340382, 0.67926887, 0.83271405], - [ 0.00793551, 0.20468222, 0.95253525]]) #random + matrix([[0.69646919, 0.28613933, 0.22685145], + [0.55131477, 0.71946897, 0.42310646]]) >>> np.matlib.rand((2, 3)) - matrix([[ 0.84682055, 0.73626594, 0.11308016], - [ 0.85429008, 0.3294825 , 0.89139555]]) #random + matrix([[0.9807642 , 0.68482974, 0.4809319 ], + [0.39211752, 0.34317802, 0.72904971]]) If the first argument is a tuple, other arguments are ignored: >>> np.matlib.rand((2, 3), 4) - matrix([[ 0.46898646, 0.15163588, 0.95188261], - [ 0.59208621, 0.09561818, 0.00583606]]) #random + matrix([[0.43857224, 0.0596779 , 0.39804426], + [0.73799541, 0.18249173, 0.17545176]]) """ if isinstance(args[0], tuple): @@ -294,18 +295,19 @@ def randn(*args): Examples -------- + >>> np.random.seed(123) >>> import numpy.matlib >>> np.matlib.randn(1) - matrix([[-0.09542833]]) #random + matrix([[-1.0856306]]) >>> np.matlib.randn(1, 2, 3) - matrix([[ 0.16198284, 0.0194571 , 0.18312985], - [-0.7509172 , 1.61055 , 0.45298599]]) #random + matrix([[ 0.99734545, 0.2829785 , -1.50629471], + [-0.57860025, 1.65143654, -2.42667924]]) Two-by-four matrix of samples from :math:`N(3, 6.25)`: >>> 2.5 * np.matlib.randn((2, 4)) + 3 - matrix([[ 4.74085004, 8.89381862, 4.09042411, 4.83721922], - [ 7.52373709, 5.07933944, -2.64043543, 0.45610557]]) #random + matrix([[1.92771843, 6.16484065, 0.83314899, 1.30278462], + [2.76322758, 6.72847407, 1.40274501, 1.8900451 ]]) """ if isinstance(args[0], tuple): diff --git a/numpy/matrixlib/defmatrix.py b/numpy/matrixlib/defmatrix.py index 93b344cd4..6f8eadf86 100644 --- a/numpy/matrixlib/defmatrix.py +++ b/numpy/matrixlib/defmatrix.py @@ -104,9 +104,9 @@ class matrix(N.ndarray): Examples -------- >>> a = np.matrix('1 2; 3 4') - >>> print(a) - [[1 2] - [3 4]] + >>> a + matrix([[1, 2], + [3, 4]]) >>> np.matrix([[1, 2], [3, 4]]) matrix([[1, 2], @@ -310,12 +310,12 @@ class matrix(N.ndarray): matrix([[3], [7]]) >>> x.sum(axis=1, dtype='float') - matrix([[ 3.], - [ 7.]]) - >>> out = np.zeros((1, 2), dtype='float') - >>> x.sum(axis=1, dtype='float', out=out) - matrix([[ 3.], - [ 7.]]) + matrix([[3.], + [7.]]) + >>> out = np.zeros((2, 1), dtype='float') + >>> x.sum(axis=1, dtype='float', out=np.asmatrix(out)) + matrix([[3.], + [7.]]) """ return N.ndarray.sum(self, axis, dtype, out, keepdims=True)._collapse(axis) @@ -437,7 +437,7 @@ class matrix(N.ndarray): >>> x.mean() 5.5 >>> x.mean(0) - matrix([[ 4., 5., 6., 7.]]) + matrix([[4., 5., 6., 7.]]) >>> x.mean(1) matrix([[ 1.5], [ 5.5], @@ -469,9 +469,9 @@ class matrix(N.ndarray): [ 4, 5, 6, 7], [ 8, 9, 10, 11]]) >>> x.std() - 3.4520525295346629 + 3.4520525295346629 # may vary >>> x.std(0) - matrix([[ 3.26598632, 3.26598632, 3.26598632, 3.26598632]]) + matrix([[ 3.26598632, 3.26598632, 3.26598632, 3.26598632]]) # may vary >>> x.std(1) matrix([[ 1.11803399], [ 1.11803399], @@ -505,11 +505,11 @@ class matrix(N.ndarray): >>> x.var() 11.916666666666666 >>> x.var(0) - matrix([[ 10.66666667, 10.66666667, 10.66666667, 10.66666667]]) + matrix([[ 10.66666667, 10.66666667, 10.66666667, 10.66666667]]) # may vary >>> x.var(1) - matrix([[ 1.25], - [ 1.25], - [ 1.25]]) + matrix([[1.25], + [1.25], + [1.25]]) """ return N.ndarray.var(self, axis, dtype, out, ddof, keepdims=True)._collapse(axis) @@ -824,7 +824,7 @@ class matrix(N.ndarray): matrix([[-2. , 1. ], [ 1.5, -0.5]]) >>> m.getI() * m - matrix([[ 1., 0.], + matrix([[ 1., 0.], # may vary [ 0., 1.]]) """ @@ -886,7 +886,8 @@ class matrix(N.ndarray): [ 4, 5, 6, 7], [ 8, 9, 10, 11]]) >>> x.getA1() - array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) + array([ 0, 1, 2, ..., 9, 10, 11]) + """ return self.__array__().ravel() @@ -986,10 +987,10 @@ class matrix(N.ndarray): [ 4. -4.j, 5. -5.j, 6. -6.j, 7. -7.j], [ 8. -8.j, 9. -9.j, 10.-10.j, 11.-11.j]]) >>> z.getH() - matrix([[ 0. +0.j, 4. +4.j, 8. +8.j], - [ 1. +1.j, 5. +5.j, 9. +9.j], - [ 2. +2.j, 6. +6.j, 10.+10.j], - [ 3. +3.j, 7. +7.j, 11.+11.j]]) + matrix([[ 0. -0.j, 4. +4.j, 8. +8.j], + [ 1. +1.j, 5. +5.j, 9. +9.j], + [ 2. +2.j, 6. +6.j, 10.+10.j], + [ 3. +3.j, 7. +7.j, 11.+11.j]]) """ if issubclass(self.dtype.type, N.complexfloating): diff --git a/numpy/polynomial/chebyshev.py b/numpy/polynomial/chebyshev.py index 92cdb18d2..e0734e1b8 100644 --- a/numpy/polynomial/chebyshev.py +++ b/numpy/polynomial/chebyshev.py @@ -361,12 +361,12 @@ def poly2cheb(pol): >>> from numpy import polynomial as P >>> p = P.Polynomial(range(4)) >>> p - Polynomial([ 0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1]) + Polynomial([0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1]) >>> c = p.convert(kind=P.Chebyshev) >>> c - Chebyshev([ 1. , 3.25, 1. , 0.75], domain=[-1, 1], window=[-1, 1]) + Chebyshev([1. , 3.25, 1. , 0.75], domain=[-1., 1.], window=[-1., 1.]) >>> P.chebyshev.poly2cheb(range(4)) - array([ 1. , 3.25, 1. , 0.75]) + array([1. , 3.25, 1. , 0.75]) """ [pol] = pu.as_series([pol]) @@ -413,12 +413,12 @@ def cheb2poly(c): >>> from numpy import polynomial as P >>> c = P.Chebyshev(range(4)) >>> c - Chebyshev([ 0., 1., 2., 3.], [-1., 1.]) + Chebyshev([0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1]) >>> p = c.convert(kind=P.Polynomial) >>> p - Polynomial([ -2., -8., 4., 12.], [-1., 1.]) + Polynomial([-2., -8., 4., 12.], domain=[-1., 1.], window=[-1., 1.]) >>> P.chebyshev.cheb2poly(range(4)) - array([ -2., -8., 4., 12.]) + array([-2., -8., 4., 12.]) """ from .polynomial import polyadd, polysub, polymulx @@ -538,7 +538,7 @@ def chebfromroots(roots): array([ 0. , -0.25, 0. , 0.25]) >>> j = complex(0,1) >>> C.chebfromroots((-j,j)) # x^2 + 1 relative to the standard basis - array([ 1.5+0.j, 0.0+0.j, 0.5+0.j]) + array([1.5+0.j, 0. +0.j, 0.5+0.j]) """ if len(roots) == 0: @@ -594,7 +594,7 @@ def chebadd(c1, c2): >>> c1 = (1,2,3) >>> c2 = (3,2,1) >>> C.chebadd(c1,c2) - array([ 4., 4., 4.]) + array([4., 4., 4.]) """ # c1, c2 are trimmed copies @@ -688,7 +688,7 @@ def chebmulx(c): -------- >>> from numpy.polynomial import chebyshev as C >>> C.chebmulx([1,2,3]) - array([ 1., 2.5, 3., 1.5, 2.]) + array([1. , 2.5, 1. , 1.5]) """ # c is a trimmed copy @@ -796,10 +796,10 @@ def chebdiv(c1, c2): >>> c1 = (1,2,3) >>> c2 = (3,2,1) >>> C.chebdiv(c1,c2) # quotient "intuitive," remainder not - (array([ 3.]), array([-8., -4.])) + (array([3.]), array([-8., -4.])) >>> c2 = (0,1,2,3) >>> C.chebdiv(c2,c1) # neither "intuitive" - (array([ 0., 2.]), array([-2., -4.])) + (array([0., 2.]), array([-2., -4.])) """ # c1, c2 are trimmed copies @@ -853,7 +853,7 @@ def chebpow(c, pow, maxpower=16): -------- >>> from numpy.polynomial import chebyshev as C >>> C.chebpow([1, 2, 3, 4], 2) - array([15.5, 22. , 16. , 14. , 12.5, 12. , 8. ]) + array([15.5, 22. , 16. , ..., 12.5, 12. , 8. ]) """ # c is a trimmed copy @@ -928,13 +928,13 @@ def chebder(c, m=1, scl=1, axis=0): >>> from numpy.polynomial import chebyshev as C >>> c = (1,2,3,4) >>> C.chebder(c) - array([ 14., 12., 24.]) + array([14., 12., 24.]) >>> C.chebder(c,3) - array([ 96.]) + array([96.]) >>> C.chebder(c,scl=-1) array([-14., -12., -24.]) >>> C.chebder(c,2,-1) - array([ 12., 96.]) + array([12., 96.]) """ c = np.array(c, ndmin=1, copy=1) @@ -1048,8 +1048,8 @@ def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0): >>> C.chebint(c) array([ 0.5, -0.5, 0.5, 0.5]) >>> C.chebint(c,3) - array([ 0.03125 , -0.1875 , 0.04166667, -0.05208333, 0.01041667, - 0.00625 ]) + array([ 0.03125 , -0.1875 , 0.04166667, -0.05208333, 0.01041667, # may vary + 0.00625 ]) >>> C.chebint(c, k=3) array([ 3.5, -0.5, 0.5, 0.5]) >>> C.chebint(c,lbnd=-2) @@ -1674,7 +1674,7 @@ def chebfit(x, y, deg, rcond=None, full=False, w=None): warnings can be turned off by >>> import warnings - >>> warnings.simplefilter('ignore', RankWarning) + >>> warnings.simplefilter('ignore', np.RankWarning) See Also -------- @@ -1885,7 +1885,7 @@ def chebroots(c): -------- >>> import numpy.polynomial.chebyshev as cheb >>> cheb.chebroots((-1, 1,-1, 1)) # T3 - T2 + T1 - T0 has real roots - array([ -5.00000000e-01, 2.60860684e-17, 1.00000000e+00]) + array([ -5.00000000e-01, 2.60860684e-17, 1.00000000e+00]) # may vary """ # c is a trimmed copy diff --git a/numpy/polynomial/hermite.py b/numpy/polynomial/hermite.py index 4905f366f..93c9fc564 100644 --- a/numpy/polynomial/hermite.py +++ b/numpy/polynomial/hermite.py @@ -114,7 +114,7 @@ def poly2herm(pol): -------- >>> from numpy.polynomial.hermite import poly2herm >>> poly2herm(np.arange(4)) - array([ 1. , 2.75 , 0.5 , 0.375]) + array([1. , 2.75 , 0.5 , 0.375]) """ [pol] = pu.as_series([pol]) @@ -160,7 +160,7 @@ def herm2poly(c): -------- >>> from numpy.polynomial.hermite import herm2poly >>> herm2poly([ 1. , 2.75 , 0.5 , 0.375]) - array([ 0., 1., 2., 3.]) + array([0., 1., 2., 3.]) """ from .polynomial import polyadd, polysub, polymulx @@ -280,10 +280,10 @@ def hermfromroots(roots): >>> from numpy.polynomial.hermite import hermfromroots, hermval >>> coef = hermfromroots((-1, 0, 1)) >>> hermval((-1, 0, 1), coef) - array([ 0., 0., 0.]) + array([0., 0., 0.]) >>> coef = hermfromroots((-1j, 1j)) >>> hermval((-1j, 1j), coef) - array([ 0.+0.j, 0.+0.j]) + array([0.+0.j, 0.+0.j]) """ if len(roots) == 0: @@ -337,7 +337,7 @@ def hermadd(c1, c2): -------- >>> from numpy.polynomial.hermite import hermadd >>> hermadd([1, 2, 3], [1, 2, 3, 4]) - array([ 2., 4., 6., 4.]) + array([2., 4., 6., 4.]) """ # c1, c2 are trimmed copies @@ -385,7 +385,7 @@ def hermsub(c1, c2): -------- >>> from numpy.polynomial.hermite import hermsub >>> hermsub([1, 2, 3, 4], [1, 2, 3]) - array([ 0., 0., 0., 4.]) + array([0., 0., 0., 4.]) """ # c1, c2 are trimmed copies @@ -435,7 +435,7 @@ def hermmulx(c): -------- >>> from numpy.polynomial.hermite import hermmulx >>> hermmulx([1, 2, 3]) - array([ 2. , 6.5, 1. , 1.5]) + array([2. , 6.5, 1. , 1.5]) """ # c is a trimmed copy @@ -488,7 +488,7 @@ def hermmul(c1, c2): -------- >>> from numpy.polynomial.hermite import hermmul >>> hermmul([1, 2, 3], [0, 1, 2]) - array([ 52., 29., 52., 7., 6.]) + array([52., 29., 52., 7., 6.]) """ # s1, s2 are trimmed copies @@ -557,11 +557,11 @@ def hermdiv(c1, c2): -------- >>> from numpy.polynomial.hermite import hermdiv >>> hermdiv([ 52., 29., 52., 7., 6.], [0, 1, 2]) - (array([ 1., 2., 3.]), array([ 0.])) + (array([1., 2., 3.]), array([0.])) >>> hermdiv([ 54., 31., 52., 7., 6.], [0, 1, 2]) - (array([ 1., 2., 3.]), array([ 2., 2.])) + (array([1., 2., 3.]), array([2., 2.])) >>> hermdiv([ 53., 30., 52., 7., 6.], [0, 1, 2]) - (array([ 1., 2., 3.]), array([ 1., 1.])) + (array([1., 2., 3.]), array([1., 1.])) """ # c1, c2 are trimmed copies @@ -617,7 +617,7 @@ def hermpow(c, pow, maxpower=16): -------- >>> from numpy.polynomial.hermite import hermpow >>> hermpow([1, 2, 3], 2) - array([ 81., 52., 82., 12., 9.]) + array([81., 52., 82., 12., 9.]) """ # c is a trimmed copy @@ -690,9 +690,9 @@ def hermder(c, m=1, scl=1, axis=0): -------- >>> from numpy.polynomial.hermite import hermder >>> hermder([ 1. , 0.5, 0.5, 0.5]) - array([ 1., 2., 3.]) + array([1., 2., 3.]) >>> hermder([-0.5, 1./2., 1./8., 1./12., 1./16.], m=2) - array([ 1., 2., 3.]) + array([1., 2., 3.]) """ c = np.array(c, ndmin=1, copy=1) @@ -799,15 +799,15 @@ def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0): -------- >>> from numpy.polynomial.hermite import hermint >>> hermint([1,2,3]) # integrate once, value 0 at 0. - array([ 1. , 0.5, 0.5, 0.5]) + array([1. , 0.5, 0.5, 0.5]) >>> hermint([1,2,3], m=2) # integrate twice, value & deriv 0 at 0 - array([-0.5 , 0.5 , 0.125 , 0.08333333, 0.0625 ]) + array([-0.5 , 0.5 , 0.125 , 0.08333333, 0.0625 ]) # may vary >>> hermint([1,2,3], k=1) # integrate once, value 1 at 0. - array([ 2. , 0.5, 0.5, 0.5]) + array([2. , 0.5, 0.5, 0.5]) >>> hermint([1,2,3], lbnd=-1) # integrate once, value 0 at -1 array([-2. , 0.5, 0.5, 0.5]) >>> hermint([1,2,3], m=2, k=[1,2], lbnd=-1) - array([ 1.66666667, -0.5 , 0.125 , 0.08333333, 0.0625 ]) + array([ 1.66666667, -0.5 , 0.125 , 0.08333333, 0.0625 ]) # may vary """ c = np.array(c, ndmin=1, copy=1) @@ -918,8 +918,8 @@ def hermval(x, c, tensor=True): >>> hermval(1, coef) 11.0 >>> hermval([[1,2],[3,4]], coef) - array([[ 11., 51.], - [ 115., 203.]]) + array([[ 11., 51.], + [115., 203.]]) """ c = np.array(c, ndmin=1, copy=0) @@ -1437,7 +1437,7 @@ def hermfit(x, y, deg, rcond=None, full=False, w=None): warnings can be turned off by >>> import warnings - >>> warnings.simplefilter('ignore', RankWarning) + >>> warnings.simplefilter('ignore', np.RankWarning) See Also -------- @@ -1490,7 +1490,7 @@ def hermfit(x, y, deg, rcond=None, full=False, w=None): >>> err = np.random.randn(len(x))/10 >>> y = hermval(x, [1, 2, 3]) + err >>> hermfit(x, y, 2) - array([ 0.97902637, 1.99849131, 3.00006 ]) + array([1.0218, 1.9986, 2.9999]) # may vary """ x = np.asarray(x) + 0.0 @@ -1656,9 +1656,9 @@ def hermroots(c): >>> from numpy.polynomial.hermite import hermroots, hermfromroots >>> coef = hermfromroots([-1, 0, 1]) >>> coef - array([ 0. , 0.25 , 0. , 0.125]) + array([0. , 0.25 , 0. , 0.125]) >>> hermroots(coef) - array([ -1.00000000e+00, -1.38777878e-17, 1.00000000e+00]) + array([-1.00000000e+00, -1.38777878e-17, 1.00000000e+00]) """ # c is a trimmed copy diff --git a/numpy/polynomial/hermite_e.py b/numpy/polynomial/hermite_e.py index 6cb044a55..bafb4b997 100644 --- a/numpy/polynomial/hermite_e.py +++ b/numpy/polynomial/hermite_e.py @@ -161,7 +161,7 @@ def herme2poly(c): -------- >>> from numpy.polynomial.hermite_e import herme2poly >>> herme2poly([ 2., 10., 2., 3.]) - array([ 0., 1., 2., 3.]) + array([0., 1., 2., 3.]) """ from .polynomial import polyadd, polysub, polymulx @@ -281,10 +281,10 @@ def hermefromroots(roots): >>> from numpy.polynomial.hermite_e import hermefromroots, hermeval >>> coef = hermefromroots((-1, 0, 1)) >>> hermeval((-1, 0, 1), coef) - array([ 0., 0., 0.]) + array([0., 0., 0.]) >>> coef = hermefromroots((-1j, 1j)) >>> hermeval((-1j, 1j), coef) - array([ 0.+0.j, 0.+0.j]) + array([0.+0.j, 0.+0.j]) """ if len(roots) == 0: @@ -338,7 +338,7 @@ def hermeadd(c1, c2): -------- >>> from numpy.polynomial.hermite_e import hermeadd >>> hermeadd([1, 2, 3], [1, 2, 3, 4]) - array([ 2., 4., 6., 4.]) + array([2., 4., 6., 4.]) """ # c1, c2 are trimmed copies @@ -386,7 +386,7 @@ def hermesub(c1, c2): -------- >>> from numpy.polynomial.hermite_e import hermesub >>> hermesub([1, 2, 3, 4], [1, 2, 3]) - array([ 0., 0., 0., 4.]) + array([0., 0., 0., 4.]) """ # c1, c2 are trimmed copies @@ -432,7 +432,7 @@ def hermemulx(c): -------- >>> from numpy.polynomial.hermite_e import hermemulx >>> hermemulx([1, 2, 3]) - array([ 2., 7., 2., 3.]) + array([2., 7., 2., 3.]) """ # c is a trimmed copy @@ -485,7 +485,7 @@ def hermemul(c1, c2): -------- >>> from numpy.polynomial.hermite_e import hermemul >>> hermemul([1, 2, 3], [0, 1, 2]) - array([ 14., 15., 28., 7., 6.]) + array([14., 15., 28., 7., 6.]) """ # s1, s2 are trimmed copies @@ -554,9 +554,9 @@ def hermediv(c1, c2): -------- >>> from numpy.polynomial.hermite_e import hermediv >>> hermediv([ 14., 15., 28., 7., 6.], [0, 1, 2]) - (array([ 1., 2., 3.]), array([ 0.])) + (array([1., 2., 3.]), array([0.])) >>> hermediv([ 15., 17., 28., 7., 6.], [0, 1, 2]) - (array([ 1., 2., 3.]), array([ 1., 2.])) + (array([1., 2., 3.]), array([1., 2.])) """ # c1, c2 are trimmed copies @@ -612,7 +612,7 @@ def hermepow(c, pow, maxpower=16): -------- >>> from numpy.polynomial.hermite_e import hermepow >>> hermepow([1, 2, 3], 2) - array([ 23., 28., 46., 12., 9.]) + array([23., 28., 46., 12., 9.]) """ # c is a trimmed copy @@ -685,9 +685,9 @@ def hermeder(c, m=1, scl=1, axis=0): -------- >>> from numpy.polynomial.hermite_e import hermeder >>> hermeder([ 1., 1., 1., 1.]) - array([ 1., 2., 3.]) + array([1., 2., 3.]) >>> hermeder([-0.25, 1., 1./2., 1./3., 1./4 ], m=2) - array([ 1., 2., 3.]) + array([1., 2., 3.]) """ c = np.array(c, ndmin=1, copy=1) @@ -794,15 +794,15 @@ def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0): -------- >>> from numpy.polynomial.hermite_e import hermeint >>> hermeint([1, 2, 3]) # integrate once, value 0 at 0. - array([ 1., 1., 1., 1.]) + array([1., 1., 1., 1.]) >>> hermeint([1, 2, 3], m=2) # integrate twice, value & deriv 0 at 0 - array([-0.25 , 1. , 0.5 , 0.33333333, 0.25 ]) + array([-0.25 , 1. , 0.5 , 0.33333333, 0.25 ]) # may vary >>> hermeint([1, 2, 3], k=1) # integrate once, value 1 at 0. - array([ 2., 1., 1., 1.]) + array([2., 1., 1., 1.]) >>> hermeint([1, 2, 3], lbnd=-1) # integrate once, value 0 at -1 array([-1., 1., 1., 1.]) >>> hermeint([1, 2, 3], m=2, k=[1, 2], lbnd=-1) - array([ 1.83333333, 0. , 0.5 , 0.33333333, 0.25 ]) + array([ 1.83333333, 0. , 0.5 , 0.33333333, 0.25 ]) # may vary """ c = np.array(c, ndmin=1, copy=1) @@ -913,8 +913,8 @@ def hermeval(x, c, tensor=True): >>> hermeval(1, coef) 3.0 >>> hermeval([[1,2],[3,4]], coef) - array([[ 3., 14.], - [ 31., 54.]]) + array([[ 3., 14.], + [31., 54.]]) """ c = np.array(c, ndmin=1, copy=0) @@ -1430,7 +1430,7 @@ def hermefit(x, y, deg, rcond=None, full=False, w=None): warnings can be turned off by >>> import warnings - >>> warnings.simplefilter('ignore', RankWarning) + >>> warnings.simplefilter('ignore', np.RankWarning) See Also -------- @@ -1480,10 +1480,11 @@ def hermefit(x, y, deg, rcond=None, full=False, w=None): -------- >>> from numpy.polynomial.hermite_e import hermefit, hermeval >>> x = np.linspace(-10, 10) + >>> np.random.seed(123) >>> err = np.random.randn(len(x))/10 >>> y = hermeval(x, [1, 2, 3]) + err >>> hermefit(x, y, 2) - array([ 1.01690445, 1.99951418, 2.99948696]) + array([ 1.01690445, 1.99951418, 2.99948696]) # may vary """ x = np.asarray(x) + 0.0 @@ -1650,9 +1651,9 @@ def hermeroots(c): >>> from numpy.polynomial.hermite_e import hermeroots, hermefromroots >>> coef = hermefromroots([-1, 0, 1]) >>> coef - array([ 0., 2., 0., 1.]) + array([0., 2., 0., 1.]) >>> hermeroots(coef) - array([-1., 0., 1.]) + array([-1., 0., 1.]) # may vary """ # c is a trimmed copy diff --git a/numpy/polynomial/laguerre.py b/numpy/polynomial/laguerre.py index a116d20a7..9207c9afe 100644 --- a/numpy/polynomial/laguerre.py +++ b/numpy/polynomial/laguerre.py @@ -160,7 +160,7 @@ def lag2poly(c): -------- >>> from numpy.polynomial.laguerre import lag2poly >>> lag2poly([ 23., -63., 58., -18.]) - array([ 0., 1., 2., 3.]) + array([0., 1., 2., 3.]) """ from .polynomial import polyadd, polysub, polymulx @@ -277,10 +277,10 @@ def lagfromroots(roots): >>> from numpy.polynomial.laguerre import lagfromroots, lagval >>> coef = lagfromroots((-1, 0, 1)) >>> lagval((-1, 0, 1), coef) - array([ 0., 0., 0.]) + array([0., 0., 0.]) >>> coef = lagfromroots((-1j, 1j)) >>> lagval((-1j, 1j), coef) - array([ 0.+0.j, 0.+0.j]) + array([0.+0.j, 0.+0.j]) """ if len(roots) == 0: @@ -334,7 +334,7 @@ def lagadd(c1, c2): -------- >>> from numpy.polynomial.laguerre import lagadd >>> lagadd([1, 2, 3], [1, 2, 3, 4]) - array([ 2., 4., 6., 4.]) + array([2., 4., 6., 4.]) """ @@ -383,7 +383,7 @@ def lagsub(c1, c2): -------- >>> from numpy.polynomial.laguerre import lagsub >>> lagsub([1, 2, 3, 4], [1, 2, 3]) - array([ 0., 0., 0., 4.]) + array([0., 0., 0., 4.]) """ # c1, c2 are trimmed copies @@ -433,7 +433,7 @@ def lagmulx(c): -------- >>> from numpy.polynomial.laguerre import lagmulx >>> lagmulx([1, 2, 3]) - array([ -1., -1., 11., -9.]) + array([-1., -1., 11., -9.]) """ # c is a trimmed copy @@ -556,9 +556,9 @@ def lagdiv(c1, c2): -------- >>> from numpy.polynomial.laguerre import lagdiv >>> lagdiv([ 8., -13., 38., -51., 36.], [0, 1, 2]) - (array([ 1., 2., 3.]), array([ 0.])) + (array([1., 2., 3.]), array([0.])) >>> lagdiv([ 9., -12., 38., -51., 36.], [0, 1, 2]) - (array([ 1., 2., 3.]), array([ 1., 1.])) + (array([1., 2., 3.]), array([1., 1.])) """ # c1, c2 are trimmed copies @@ -687,9 +687,9 @@ def lagder(c, m=1, scl=1, axis=0): -------- >>> from numpy.polynomial.laguerre import lagder >>> lagder([ 1., 1., 1., -3.]) - array([ 1., 2., 3.]) + array([1., 2., 3.]) >>> lagder([ 1., 0., 0., -4., 3.], m=2) - array([ 1., 2., 3.]) + array([1., 2., 3.]) """ c = np.array(c, ndmin=1, copy=1) @@ -805,9 +805,9 @@ def lagint(c, m=1, k=[], lbnd=0, scl=1, axis=0): >>> lagint([1,2,3], k=1) array([ 2., 1., 1., -3.]) >>> lagint([1,2,3], lbnd=-1) - array([ 11.5, 1. , 1. , -3. ]) + array([11.5, 1. , 1. , -3. ]) >>> lagint([1,2], m=2, k=[1,2], lbnd=-1) - array([ 11.16666667, -5. , -3. , 2. ]) + array([ 11.16666667, -5. , -3. , 2. ]) # may vary """ c = np.array(c, ndmin=1, copy=1) @@ -1436,7 +1436,7 @@ def lagfit(x, y, deg, rcond=None, full=False, w=None): warnings can be turned off by >>> import warnings - >>> warnings.simplefilter('ignore', RankWarning) + >>> warnings.simplefilter('ignore', np.RankWarning) See Also -------- @@ -1489,7 +1489,7 @@ def lagfit(x, y, deg, rcond=None, full=False, w=None): >>> err = np.random.randn(len(x))/10 >>> y = lagval(x, [1, 2, 3]) + err >>> lagfit(x, y, 2) - array([ 0.96971004, 2.00193749, 3.00288744]) + array([ 0.96971004, 2.00193749, 3.00288744]) # may vary """ x = np.asarray(x) + 0.0 @@ -1656,7 +1656,7 @@ def lagroots(c): >>> coef array([ 2., -8., 12., -6.]) >>> lagroots(coef) - array([ -4.44089210e-16, 1.00000000e+00, 2.00000000e+00]) + array([-4.4408921e-16, 1.0000000e+00, 2.0000000e+00]) """ # c is a trimmed copy diff --git a/numpy/polynomial/legendre.py b/numpy/polynomial/legendre.py index e9c24594b..f81bc002c 100644 --- a/numpy/polynomial/legendre.py +++ b/numpy/polynomial/legendre.py @@ -136,10 +136,10 @@ def poly2leg(pol): >>> from numpy import polynomial as P >>> p = P.Polynomial(np.arange(4)) >>> p - Polynomial([ 0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1]) + Polynomial([0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1]) >>> c = P.Legendre(P.legendre.poly2leg(p.coef)) >>> c - Legendre([ 1. , 3.25, 1. , 0.75], domain=[-1, 1], window=[-1, 1]) + Legendre([ 1. , 3.25, 1. , 0.75], domain=[-1, 1], window=[-1, 1]) # may vary """ [pol] = pu.as_series([pol]) @@ -183,12 +183,13 @@ def leg2poly(c): Examples -------- + >>> from numpy import polynomial as P >>> c = P.Legendre(range(4)) >>> c - Legendre([ 0., 1., 2., 3.], [-1., 1.]) + Legendre([0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1]) >>> p = c.convert(kind=P.Polynomial) >>> p - Polynomial([-1. , -3.5, 3. , 7.5], [-1., 1.]) + Polynomial([-1. , -3.5, 3. , 7.5], domain=[-1., 1.], window=[-1., 1.]) >>> P.leg2poly(range(4)) array([-1. , -3.5, 3. , 7.5]) @@ -310,7 +311,7 @@ def legfromroots(roots): array([ 0. , -0.4, 0. , 0.4]) >>> j = complex(0,1) >>> L.legfromroots((-j,j)) # x^2 + 1 relative to the standard basis - array([ 1.33333333+0.j, 0.00000000+0.j, 0.66666667+0.j]) + array([ 1.33333333+0.j, 0.00000000+0.j, 0.66666667+0.j]) # may vary """ if len(roots) == 0: @@ -366,7 +367,7 @@ def legadd(c1, c2): >>> c1 = (1,2,3) >>> c2 = (3,2,1) >>> L.legadd(c1,c2) - array([ 4., 4., 4.]) + array([4., 4., 4.]) """ # c1, c2 are trimmed copies @@ -468,7 +469,7 @@ def legmulx(c): -------- >>> from numpy.polynomial import legendre as L >>> L.legmulx([1,2,3]) - array([ 0.66666667, 2.2, 1.33333333, 1.8]) + array([ 0.66666667, 2.2, 1.33333333, 1.8]) # may vary """ # c is a trimmed copy @@ -525,8 +526,8 @@ def legmul(c1, c2): >>> from numpy.polynomial import legendre as L >>> c1 = (1,2,3) >>> c2 = (3,2) - >>> P.legmul(c1,c2) # multiplication requires "reprojection" - array([ 4.33333333, 10.4 , 11.66666667, 3.6 ]) + >>> L.legmul(c1,c2) # multiplication requires "reprojection" + array([ 4.33333333, 10.4 , 11.66666667, 3.6 ]) # may vary """ # s1, s2 are trimmed copies @@ -597,10 +598,10 @@ def legdiv(c1, c2): >>> c1 = (1,2,3) >>> c2 = (3,2,1) >>> L.legdiv(c1,c2) # quotient "intuitive," remainder not - (array([ 3.]), array([-8., -4.])) + (array([3.]), array([-8., -4.])) >>> c2 = (0,1,2,3) >>> L.legdiv(c2,c1) # neither "intuitive" - (array([-0.07407407, 1.66666667]), array([-1.03703704, -2.51851852])) + (array([-0.07407407, 1.66666667]), array([-1.03703704, -2.51851852])) # may vary """ # c1, c2 are trimmed copies @@ -729,7 +730,7 @@ def legder(c, m=1, scl=1, axis=0): >>> L.legder(c) array([ 6., 9., 20.]) >>> L.legder(c, 3) - array([ 60.]) + array([60.]) >>> L.legder(c, scl=-1) array([ -6., -9., -20.]) >>> L.legder(c, 2,-1) @@ -845,16 +846,16 @@ def legint(c, m=1, k=[], lbnd=0, scl=1, axis=0): >>> from numpy.polynomial import legendre as L >>> c = (1,2,3) >>> L.legint(c) - array([ 0.33333333, 0.4 , 0.66666667, 0.6 ]) + array([ 0.33333333, 0.4 , 0.66666667, 0.6 ]) # may vary >>> L.legint(c, 3) - array([ 1.66666667e-02, -1.78571429e-02, 4.76190476e-02, - -1.73472348e-18, 1.90476190e-02, 9.52380952e-03]) + array([ 1.66666667e-02, -1.78571429e-02, 4.76190476e-02, # may vary + -1.73472348e-18, 1.90476190e-02, 9.52380952e-03]) >>> L.legint(c, k=3) - array([ 3.33333333, 0.4 , 0.66666667, 0.6 ]) + array([ 3.33333333, 0.4 , 0.66666667, 0.6 ]) # may vary >>> L.legint(c, lbnd=-2) - array([ 7.33333333, 0.4 , 0.66666667, 0.6 ]) + array([ 7.33333333, 0.4 , 0.66666667, 0.6 ]) # may vary >>> L.legint(c, scl=2) - array([ 0.66666667, 0.8 , 1.33333333, 1.2 ]) + array([ 0.66666667, 0.8 , 1.33333333, 1.2 ]) # may vary """ c = np.array(c, ndmin=1, copy=1) @@ -1476,7 +1477,7 @@ def legfit(x, y, deg, rcond=None, full=False, w=None): warnings can be turned off by >>> import warnings - >>> warnings.simplefilter('ignore', RankWarning) + >>> warnings.simplefilter('ignore', np.RankWarning) See Also -------- @@ -1686,7 +1687,7 @@ def legroots(c): -------- >>> import numpy.polynomial.legendre as leg >>> leg.legroots((1, 2, 3, 4)) # 4L_3 + 3L_2 + 2L_1 + 1L_0, all real roots - array([-0.85099543, -0.11407192, 0.51506735]) + array([-0.85099543, -0.11407192, 0.51506735]) # may vary """ # c is a trimmed copy diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py index 259cd31f5..69599e3fd 100644 --- a/numpy/polynomial/polynomial.py +++ b/numpy/polynomial/polynomial.py @@ -185,7 +185,7 @@ def polyfromroots(roots): array([ 0., -1., 0., 1.]) >>> j = complex(0,1) >>> P.polyfromroots((-j,j)) # complex returned, though values are real - array([ 1.+0.j, 0.+0.j, 1.+0.j]) + array([1.+0.j, 0.+0.j, 1.+0.j]) """ if len(roots) == 0: @@ -233,7 +233,7 @@ def polyadd(c1, c2): >>> c1 = (1,2,3) >>> c2 = (3,2,1) >>> sum = P.polyadd(c1,c2); sum - array([ 4., 4., 4.]) + array([4., 4., 4.]) >>> P.polyval(2, sum) # 4 + 4(2) + 4(2**2) 28.0 @@ -401,9 +401,9 @@ def polydiv(c1, c2): >>> c1 = (1,2,3) >>> c2 = (3,2,1) >>> P.polydiv(c1,c2) - (array([ 3.]), array([-8., -4.])) + (array([3.]), array([-8., -4.])) >>> P.polydiv(c2,c1) - (array([ 0.33333333]), array([ 2.66666667, 1.33333333])) + (array([ 0.33333333]), array([ 2.66666667, 1.33333333])) # may vary """ # c1, c2 are trimmed copies @@ -529,7 +529,7 @@ def polyder(c, m=1, scl=1, axis=0): >>> P.polyder(c) # (d/dx)(c) = 2 + 6x + 12x**2 array([ 2., 6., 12.]) >>> P.polyder(c,3) # (d**3/dx**3)(c) = 24 - array([ 24.]) + array([24.]) >>> P.polyder(c,scl=-1) # (d/d(-x))(c) = -2 - 6x - 12x**2 array([ -2., -6., -12.]) >>> P.polyder(c,2,-1) # (d**2/d(-x)**2)(c) = 6 + 24x @@ -636,14 +636,14 @@ def polyint(c, m=1, k=[], lbnd=0, scl=1, axis=0): >>> from numpy.polynomial import polynomial as P >>> c = (1,2,3) >>> P.polyint(c) # should return array([0, 1, 1, 1]) - array([ 0., 1., 1., 1.]) + array([0., 1., 1., 1.]) >>> P.polyint(c,3) # should return array([0, 0, 0, 1/6, 1/12, 1/20]) - array([ 0. , 0. , 0. , 0.16666667, 0.08333333, - 0.05 ]) + array([ 0. , 0. , 0. , 0.16666667, 0.08333333, # may vary + 0.05 ]) >>> P.polyint(c,k=3) # should return array([3, 1, 1, 1]) - array([ 3., 1., 1., 1.]) + array([3., 1., 1., 1.]) >>> P.polyint(c,lbnd=-2) # should return array([6, 1, 1, 1]) - array([ 6., 1., 1., 1.]) + array([6., 1., 1., 1.]) >>> P.polyint(c,scl=-2) # should return array([0, -2, -2, -2]) array([ 0., -2., -2., -2.]) @@ -761,17 +761,17 @@ def polyval(x, c, tensor=True): array([[0, 1], [2, 3]]) >>> polyval(a, [1,2,3]) - array([[ 1., 6.], - [ 17., 34.]]) + array([[ 1., 6.], + [17., 34.]]) >>> coef = np.arange(4).reshape(2,2) # multidimensional coefficients >>> coef array([[0, 1], [2, 3]]) >>> polyval([1,2], coef, tensor=True) - array([[ 2., 4.], - [ 4., 7.]]) + array([[2., 4.], + [4., 7.]]) >>> polyval([1,2], coef, tensor=False) - array([ 2., 7.]) + array([2., 7.]) """ c = np.array(c, ndmin=1, copy=0) @@ -851,8 +851,8 @@ def polyvalfromroots(x, r, tensor=True): array([[0, 1], [2, 3]]) >>> polyvalfromroots(a, [-1, 0, 1]) - array([[ -0., 0.], - [ 6., 24.]]) + array([[-0., 0.], + [ 6., 24.]]) >>> r = np.arange(-2, 2).reshape(2,2) # multidimensional coefficients >>> r # each column of r defines one polynomial array([[-2, -1], @@ -1363,7 +1363,7 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None): be turned off by: >>> import warnings - >>> warnings.simplefilter('ignore', RankWarning) + >>> warnings.simplefilter('ignore', np.RankWarning) See Also -------- @@ -1410,26 +1410,27 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None): Examples -------- + >>> np.random.seed(123) >>> from numpy.polynomial import polynomial as P >>> x = np.linspace(-1,1,51) # x "data": [-1, -0.96, ..., 0.96, 1] >>> y = x**3 - x + np.random.randn(len(x)) # x^3 - x + N(0,1) "noise" >>> c, stats = P.polyfit(x,y,3,full=True) + >>> np.random.seed(123) >>> c # c[0], c[2] should be approx. 0, c[1] approx. -1, c[3] approx. 1 - array([ 0.01909725, -1.30598256, -0.00577963, 1.02644286]) + array([ 0.01909725, -1.30598256, -0.00577963, 1.02644286]) # may vary >>> stats # note the large SSR, explaining the rather poor results - [array([ 38.06116253]), 4, array([ 1.38446749, 1.32119158, 0.50443316, - 0.28853036]), 1.1324274851176597e-014] + [array([ 38.06116253]), 4, array([ 1.38446749, 1.32119158, 0.50443316, # may vary + 0.28853036]), 1.1324274851176597e-014] Same thing without the added noise >>> y = x**3 - x >>> c, stats = P.polyfit(x,y,3,full=True) >>> c # c[0], c[2] should be "very close to 0", c[1] ~= -1, c[3] ~= 1 - array([ -1.73362882e-17, -1.00000000e+00, -2.67471909e-16, - 1.00000000e+00]) + array([-6.36925336e-18, -1.00000000e+00, -4.08053781e-16, 1.00000000e+00]) >>> stats # note the minuscule SSR - [array([ 7.46346754e-31]), 4, array([ 1.38446749, 1.32119158, - 0.50443316, 0.28853036]), 1.1324274851176597e-014] + [array([ 7.46346754e-31]), 4, array([ 1.38446749, 1.32119158, # may vary + 0.50443316, 0.28853036]), 1.1324274851176597e-014] """ x = np.asarray(x) + 0.0 @@ -1591,7 +1592,7 @@ def polyroots(c): dtype('float64') >>> j = complex(0,1) >>> poly.polyroots(poly.polyfromroots((-j,0,j))) - array([ 0.00000000e+00+0.j, 0.00000000e+00+1.j, 2.77555756e-17-1.j]) + array([ 0.00000000e+00+0.j, 0.00000000e+00+1.j, 2.77555756e-17-1.j]) # may vary """ # c is a trimmed copy diff --git a/numpy/polynomial/polyutils.py b/numpy/polynomial/polyutils.py index c1ed0c9b3..eff4a8ee1 100644 --- a/numpy/polynomial/polyutils.py +++ b/numpy/polynomial/polyutils.py @@ -156,19 +156,19 @@ def as_series(alist, trim=True): >>> from numpy.polynomial import polyutils as pu >>> a = np.arange(4) >>> pu.as_series(a) - [array([ 0.]), array([ 1.]), array([ 2.]), array([ 3.])] + [array([0.]), array([1.]), array([2.]), array([3.])] >>> b = np.arange(6).reshape((2,3)) >>> pu.as_series(b) - [array([ 0., 1., 2.]), array([ 3., 4., 5.])] + [array([0., 1., 2.]), array([3., 4., 5.])] >>> pu.as_series((1, np.arange(3), np.arange(2, dtype=np.float16))) - [array([ 1.]), array([ 0., 1., 2.]), array([ 0., 1.])] + [array([1.]), array([0., 1., 2.]), array([0., 1.])] >>> pu.as_series([2, [1.1, 0.]]) - [array([ 2.]), array([ 1.1])] + [array([2.]), array([1.1])] >>> pu.as_series([2, [1.1, 0.]], trim=False) - [array([ 2.]), array([ 1.1, 0. ])] + [array([2.]), array([1.1, 0. ])] """ arrays = [np.array(a, ndmin=1, copy=0) for a in alist] @@ -233,12 +233,12 @@ def trimcoef(c, tol=0): -------- >>> from numpy.polynomial import polyutils as pu >>> pu.trimcoef((0,0,3,0,5,0,0)) - array([ 0., 0., 3., 0., 5.]) + array([0., 0., 3., 0., 5.]) >>> pu.trimcoef((0,0,1e-3,0,1e-5,0,0),1e-3) # item == tol is trimmed - array([ 0.]) + array([0.]) >>> i = complex(0,1) # works for complex >>> pu.trimcoef((3e-4,1e-3*(1-i),5e-4,2e-5*(1+i)), 1e-3) - array([ 0.0003+0.j , 0.0010-0.001j]) + array([0.0003+0.j , 0.001 -0.001j]) """ if tol < 0: @@ -332,10 +332,10 @@ def mapparms(old, new): >>> pu.mapparms((-1,1),(-1,1)) (0.0, 1.0) >>> pu.mapparms((1,-1),(-1,1)) - (0.0, -1.0) + (-0.0, -1.0) >>> i = complex(0,1) >>> pu.mapparms((-i,-1),(1,i)) - ((1+1j), (1+0j)) + ((1+1j), (1-0j)) """ oldlen = old[1] - old[0] @@ -390,10 +390,10 @@ def mapdomain(x, old, new): >>> x = np.linspace(-1,1,6); x array([-1. , -0.6, -0.2, 0.2, 0.6, 1. ]) >>> x_out = pu.mapdomain(x, old_domain, new_domain); x_out - array([ 0. , 1.25663706, 2.51327412, 3.76991118, 5.02654825, + array([ 0. , 1.25663706, 2.51327412, 3.76991118, 5.02654825, # may vary 6.28318531]) >>> x - pu.mapdomain(x_out, new_domain, old_domain) - array([ 0., 0., 0., 0., 0., 0.]) + array([0., 0., 0., 0., 0., 0.]) Also works for complex numbers (and thus can be used to map any line in the complex plane to any other line therein). @@ -402,9 +402,9 @@ def mapdomain(x, old, new): >>> old = (-1 - i, 1 + i) >>> new = (-1 + i, 1 - i) >>> z = np.linspace(old[0], old[1], 6); z - array([-1.0-1.j , -0.6-0.6j, -0.2-0.2j, 0.2+0.2j, 0.6+0.6j, 1.0+1.j ]) - >>> new_z = P.mapdomain(z, old, new); new_z - array([-1.0+1.j , -0.6+0.6j, -0.2+0.2j, 0.2-0.2j, 0.6-0.6j, 1.0-1.j ]) + array([-1. -1.j , -0.6-0.6j, -0.2-0.2j, 0.2+0.2j, 0.6+0.6j, 1. +1.j ]) + >>> new_z = pu.mapdomain(z, old, new); new_z + array([-1.0+1.j , -0.6+0.6j, -0.2+0.2j, 0.2-0.2j, 0.6-0.6j, 1.0-1.j ]) # may vary """ x = np.asanyarray(x) diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx index 21bc73e54..2a6daa88c 100644 --- a/numpy/random/mtrand/mtrand.pyx +++ b/numpy/random/mtrand/mtrand.pyx @@ -1,3 +1,5 @@ +# cython: language_level=3 + # mtrand.pyx -- A Pyrex wrapper of Jean-Sebastien Roy's RandomKit # # Copyright 2005 Robert Kern (robert.kern@gmail.com) @@ -844,16 +846,16 @@ cdef class RandomState: Examples -------- >>> np.random.random_sample() - 0.47108547995356098 + 0.47108547995356098 # random >>> type(np.random.random_sample()) - <type 'float'> + <class 'float'> >>> np.random.random_sample((5,)) - array([ 0.30220482, 0.86820401, 0.1654503 , 0.11659149, 0.54323428]) + array([ 0.30220482, 0.86820401, 0.1654503 , 0.11659149, 0.54323428]) # random Three-by-two array of random numbers from [-5, 0): >>> 5 * np.random.random_sample((3, 2)) - 5 - array([[-3.99149989, -0.52338984], + array([[-3.99149989, -0.52338984], # random [-2.99091858, -0.79479508], [-1.23204345, -1.75224494]]) @@ -954,14 +956,14 @@ cdef class RandomState: Examples -------- >>> np.random.randint(2, size=10) - array([1, 0, 0, 0, 1, 1, 0, 0, 1, 0]) + array([1, 0, 0, 0, 1, 1, 0, 0, 1, 0]) # random >>> np.random.randint(1, size=10) array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) Generate a 2 x 4 array of ints between 0 and 4, inclusive: >>> np.random.randint(5, size=(2, 4)) - array([[4, 0, 2, 1], + array([[4, 0, 2, 1], # random [3, 2, 2, 0]]) """ @@ -1076,34 +1078,34 @@ cdef class RandomState: Generate a uniform random sample from np.arange(5) of size 3: >>> np.random.choice(5, 3) - array([0, 3, 4]) + array([0, 3, 4]) # random >>> #This is equivalent to np.random.randint(0,5,3) Generate a non-uniform random sample from np.arange(5) of size 3: >>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) - array([3, 3, 0]) + array([3, 3, 0]) # random Generate a uniform random sample from np.arange(5) of size 3 without replacement: >>> np.random.choice(5, 3, replace=False) - array([3,1,0]) + array([3,1,0]) # random >>> #This is equivalent to np.random.permutation(np.arange(5))[:3] Generate a non-uniform random sample from np.arange(5) of size 3 without replacement: >>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0]) - array([2, 3, 0]) + array([2, 3, 0]) # random Any of the above can be repeated with an arbitrary array-like instead of just integers. For instance: >>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher'] >>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3]) - array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'], - dtype='|S11') + array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'], # random + dtype='<U11') """ @@ -1470,11 +1472,11 @@ cdef class RandomState: Examples -------- >>> np.random.random_integers(5) - 4 + 4 # random >>> type(np.random.random_integers(5)) - <type 'int'> + <class 'numpy.int64'> >>> np.random.random_integers(5, size=(3,2)) - array([[5, 4], + array([[5, 4], # random [3, 3], [4, 5]]) @@ -1483,7 +1485,7 @@ cdef class RandomState: :math:`{0, 5/8, 10/8, 15/8, 20/8}`): >>> 2.5 * (np.random.random_integers(5, size=(5,)) - 1) / 4. - array([ 0.625, 1.25 , 0.625, 0.625, 2.5 ]) + array([ 0.625, 1.25 , 0.625, 0.625, 2.5 ]) # random Roll two six sided dice 1000 times and sum the results: @@ -2068,8 +2070,8 @@ cdef class RandomState: The lower bound for the top 1% of the samples is : - >>> sort(s)[-10] - 7.61988120985 + >>> np.sort(s)[-10] + 7.61988120985 # random So there is about a 1% chance that the F statistic will exceed 7.62, the measured value is 36, so the null hypothesis is rejected at the 1% @@ -2166,6 +2168,8 @@ cdef class RandomState: >>> NF = np.histogram(nc_vals, bins=50, density=True) >>> c_vals = np.random.f(dfnum, dfden, 1000000) >>> F = np.histogram(c_vals, bins=50, density=True) + >>> import matplotlib + >>> import matplotlib.pyplot as plt >>> plt.plot(F[1][1:], F[0]) >>> plt.plot(NF[1][1:], NF[0]) >>> plt.show() @@ -2261,7 +2265,7 @@ cdef class RandomState: Examples -------- >>> np.random.chisquare(2,4) - array([ 1.89920014, 9.00867716, 3.13710533, 5.62318272]) + array([ 1.89920014, 9.00867716, 3.13710533, 5.62318272]) # random """ cdef ndarray odf @@ -2443,6 +2447,8 @@ cdef class RandomState: -------- Draw samples and plot the distribution: + >>> import matplotlib + >>> import matplotlib.pyplot as plt >>> s = np.random.standard_cauchy(1000000) >>> s = s[(s>-25) & (s<25)] # truncate distribution so it plots well >>> plt.hist(s, bins=100) @@ -3279,12 +3285,14 @@ cdef class RandomState: >>> loc, scale = 10, 1 >>> s = np.random.logistic(loc, scale, 10000) + >>> import matplotlib + >>> import matplotlib.pyplot as plt >>> count, bins, ignored = plt.hist(s, bins=50) # plot against distribution >>> def logist(x, loc, scale): - ... return exp((loc-x)/scale)/(scale*(1+exp((loc-x)/scale))**2) + ... return np.exp((loc-x)/scale)/(scale*(1+np.exp((loc-x)/scale))**2) >>> plt.plot(bins, logist(bins, loc, scale)*count.max()/\\ ... logist(bins, loc, scale).max()) >>> plt.show() @@ -3479,6 +3487,8 @@ cdef class RandomState: -------- Draw values from the distribution and plot the histogram + >>> import matplotlib + >>> from matplotlib.pyplot import hist >>> values = hist(np.random.rayleigh(3, 100000), bins=200, density=True) Wave heights tend to follow a Rayleigh distribution. If the mean wave @@ -3492,7 +3502,7 @@ cdef class RandomState: The percentage of waves larger than 3 meters is: >>> 100.*sum(s>3)/1000000. - 0.087300000000000003 + 0.087300000000000003 # random """ cdef ndarray oscale @@ -3873,9 +3883,9 @@ cdef class RandomState: single success after drilling 5 wells, after 6 wells, etc.? >>> s = np.random.negative_binomial(1, 0.1, 100000) - >>> for i in range(1, 11): + >>> for i in range(1, 11): # doctest: +SKIP ... probability = sum(s<i) / 100000. - ... print i, "wells drilled, probability of one success =", probability + ... print(i, "wells drilled, probability of one success =", probability) """ cdef ndarray on @@ -4233,6 +4243,8 @@ cdef class RandomState: >>> ngood, nbad, nsamp = 100, 2, 10 # number of good, number of bad, and number of samples >>> s = np.random.hypergeometric(ngood, nbad, nsamp, 1000) + >>> import matplotlib + >>> from matplotlib.pyplot import hist >>> hist(s) # note that it is very unlikely to grab both bad items @@ -4342,14 +4354,16 @@ cdef class RandomState: >>> a = .6 >>> s = np.random.logseries(a, 10000) + >>> import matplotlib + >>> import matplotlib.pyplot as plt >>> count, bins, ignored = plt.hist(s) # plot against distribution >>> def logseries(k, p): - ... return -p**k/(k*log(1-p)) + ... return -p**k/(k*np.log(1-p)) >>> plt.plot(bins, logseries(bins, a)*count.max()/ - logseries(bins, a).max(), 'r') + ... logseries(bins, a).max(), 'r') >>> plt.show() """ @@ -4474,7 +4488,7 @@ cdef class RandomState: standard deviation: >>> list((x[0,0,:] - mean) < 0.6) - [True, True] + [True, True] # random """ from numpy.dual import svd @@ -4580,14 +4594,14 @@ cdef class RandomState: Throw a dice 20 times: >>> np.random.multinomial(20, [1/6.]*6, size=1) - array([[4, 1, 7, 5, 2, 1]]) + array([[4, 1, 7, 5, 2, 1]]) # random It landed 4 times on 1, once on 2, etc. Now, throw the dice 20 times, and 20 times again: >>> np.random.multinomial(20, [1/6.]*6, size=2) - array([[3, 4, 3, 3, 4, 3], + array([[3, 4, 3, 3, 4, 3], # random [2, 4, 3, 4, 0, 7]]) For the first run, we threw 3 times 1, 4 times 2, etc. For the second, @@ -4596,7 +4610,7 @@ cdef class RandomState: A loaded die is more likely to land on number 6: >>> np.random.multinomial(100, [1/7.]*5 + [2/7.]) - array([11, 16, 14, 17, 16, 26]) + array([11, 16, 14, 17, 16, 26]) # random The probability inputs should be normalized. As an implementation detail, the value of the last entry is ignored and assumed to take @@ -4605,7 +4619,7 @@ cdef class RandomState: other should be sampled like so: >>> np.random.multinomial(100, [1.0 / 3, 2.0 / 3]) # RIGHT - array([38, 62]) + array([38, 62]) # random not like: @@ -4659,8 +4673,9 @@ cdef class RandomState: Draw `size` samples of dimension k from a Dirichlet distribution. A Dirichlet-distributed random variable can be seen as a multivariate - generalization of a Beta distribution. Dirichlet pdf is the conjugate - prior of a multinomial in Bayesian inference. + generalization of a Beta distribution. The Dirichlet distribution + is a conjugate prior of a multinomial distribution in Bayesian + inference. Parameters ---------- @@ -4684,13 +4699,24 @@ cdef class RandomState: Notes ----- - .. math:: X \\approx \\prod_{i=1}^{k}{x^{\\alpha_i-1}_i} - Uses the following property for computation: for each dimension, - draw a random sample y_i from a standard gamma generator of shape - `alpha_i`, then - :math:`X = \\frac{1}{\\sum_{i=1}^k{y_i}} (y_1, \\ldots, y_n)` is - Dirichlet distributed. + The Dirichlet distribution is a distribution over vectors + :math:`x` that fulfil the conditions :math:`x_i>0` and + :math:`\\sum_{i=1}^k x_i = 1`. + + The probability density function :math:`p` of a + Dirichlet-distributed random vector :math:`X` is + proportional to + + .. math:: p(x) \\propto \\prod_{i=1}^{k}{x^{\\alpha_i-1}_i}, + + where :math:`\\alpha` is a vector containing the positive + concentration parameters. + + The method uses the following property for computation: let :math:`Y` + be a random vector which has components that follow a standard gamma + distribution, then :math:`X = \\frac{1}{\\sum_{i=1}^k{Y_i}} Y` + is Dirichlet-distributed References ---------- @@ -4710,6 +4736,8 @@ cdef class RandomState: >>> s = np.random.dirichlet((10, 5, 3), 20).transpose() + >>> import matplotlib + >>> import matplotlib.pyplot as plt >>> plt.barh(range(20), s[0]) >>> plt.barh(range(20), s[1], left=s[0], color='g') >>> plt.barh(range(20), s[2], left=s[0]+s[1], color='r') @@ -4798,14 +4826,14 @@ cdef class RandomState: >>> arr = np.arange(10) >>> np.random.shuffle(arr) >>> arr - [1 7 5 2 9 4 3 6 0 8] + [1 7 5 2 9 4 3 6 0 8] # random Multi-dimensional arrays are only shuffled along the first axis: >>> arr = np.arange(9).reshape((3, 3)) >>> np.random.shuffle(arr) >>> arr - array([[3, 4, 5], + array([[3, 4, 5], # random [6, 7, 8], [0, 1, 2]]) @@ -4885,14 +4913,14 @@ cdef class RandomState: Examples -------- >>> np.random.permutation(10) - array([1, 7, 4, 3, 0, 9, 2, 5, 8, 6]) + array([1, 7, 4, 3, 0, 9, 2, 5, 8, 6]) # random >>> np.random.permutation([1, 4, 9, 12, 15]) - array([15, 1, 9, 4, 12]) + array([15, 1, 9, 4, 12]) # random >>> arr = np.arange(9).reshape((3, 3)) >>> np.random.permutation(arr) - array([[6, 7, 8], + array([[6, 7, 8], # random [0, 1, 2], [3, 4, 5]]) diff --git a/numpy/random/mtrand/numpy.pxd b/numpy/random/mtrand/numpy.pxd index 9092fa113..1b4fe6c10 100644 --- a/numpy/random/mtrand/numpy.pxd +++ b/numpy/random/mtrand/numpy.pxd @@ -1,3 +1,5 @@ +# cython: language_level=3 + # :Author: Travis Oliphant from cpython.exc cimport PyErr_Print diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 55306e499..4059f6ee6 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -521,7 +521,6 @@ def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True): ... <type 'exceptions.AssertionError'>: Arrays are not almost equal - <BLANKLINE> (mismatch 50.0%) x: array([ 1. , 2.33333333]) y: array([ 1. , 2.33333334]) @@ -854,7 +853,6 @@ def assert_array_equal(x, y, err_msg='', verbose=True): <type 'exceptions.ValueError'>: AssertionError: Arrays are not equal - <BLANKLINE> (mismatch 50.0%) x: array([ 1. , 3.14159265, NaN]) y: array([ 1. , 3.14159265, NaN]) @@ -930,7 +928,6 @@ def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True): <type 'exceptions.AssertionError'>: AssertionError: Arrays are not almost equal - <BLANKLINE> (mismatch 50.0%) x: array([ 1. , 2.33333, NaN]) y: array([ 1. , 2.33339, NaN]) diff --git a/numpy/tests/test_ctypeslib.py b/numpy/tests/test_ctypeslib.py index 53b75db07..d389b37a8 100644 --- a/numpy/tests/test_ctypeslib.py +++ b/numpy/tests/test_ctypeslib.py @@ -2,6 +2,7 @@ from __future__ import division, absolute_import, print_function import sys import pytest +import weakref import numpy as np from numpy.ctypeslib import ndpointer, load_library, as_array @@ -260,3 +261,15 @@ class TestAsArray(object): b = np.ctypeslib.as_array(newpnt, (N,)) # now delete both, which should cleanup both objects del newpnt, b + + def test_segmentation_fault(self): + arr = np.zeros((224, 224, 3)) + c_arr = np.ctypeslib.as_ctypes(arr) + arr_ref = weakref.ref(arr) + del arr + + # check the reference wasn't cleaned up + assert_(arr_ref() is not None) + + # check we avoid the segfault + c_arr[0][0][0] diff --git a/numpy/tests/test_scripts.py b/numpy/tests/test_scripts.py index 9e27cc6ce..e42dc25f9 100644 --- a/numpy/tests/test_scripts.py +++ b/numpy/tests/test_scripts.py @@ -8,7 +8,7 @@ import sys import os import pytest from os.path import join as pathjoin, isfile, dirname -from subprocess import Popen, PIPE +import subprocess import numpy as np from numpy.compat.py3k import basestring @@ -17,74 +17,13 @@ from numpy.testing import assert_, assert_equal is_inplace = isfile(pathjoin(dirname(np.__file__), '..', 'setup.py')) -def run_command(cmd, check_code=True): - """ Run command sequence `cmd` returning exit code, stdout, stderr - - Parameters - ---------- - cmd : str or sequence - string with command name or sequence of strings defining command - check_code : {True, False}, optional - If True, raise error for non-zero return code - - Returns - ------- - returncode : int - return code from execution of `cmd` - stdout : bytes (python 3) or str (python 2) - stdout from `cmd` - stderr : bytes (python 3) or str (python 2) - stderr from `cmd` - - Raises - ------ - RuntimeError - If `check_code` is True, and return code !=0 - """ - cmd = [cmd] if isinstance(cmd, basestring) else list(cmd) - if os.name == 'nt': - # Quote any arguments with spaces. The quotes delimit the arguments - # on Windows, and the arguments might be file paths with spaces. - # On Unix the list elements are each separate arguments. - cmd = ['"{0}"'.format(c) if ' ' in c else c for c in cmd] - proc = Popen(cmd, stdout=PIPE, stderr=PIPE) - stdout, stderr = proc.communicate() - if proc.poll() is None: - proc.terminate() - if check_code and proc.returncode != 0: - raise RuntimeError('\n'.join( - ['Command "{0}" failed with', - 'stdout', '------', '{1}', '', - 'stderr', '------', '{2}']).format(cmd, stdout, stderr)) - return proc.returncode, stdout, stderr - - -@pytest.mark.skipif(is_inplace, reason="Cannot test f2py command inplace") -@pytest.mark.xfail(reason="Test is unreliable") -def test_f2py(): - # test that we can run f2py script - - def try_f2py_commands(cmds): - success = 0 - for f2py_cmd in cmds: - try: - code, stdout, stderr = run_command([f2py_cmd, '-v']) - assert_equal(stdout.strip(), b'2') - success += 1 - except Exception: - pass - return success - +def find_f2py_commands(): if sys.platform == 'win32': - # Only the single 'f2py' script is installed in windows. exe_dir = dirname(sys.executable) if exe_dir.endswith('Scripts'): # virtualenv - f2py_cmds = [os.path.join(exe_dir, 'f2py')] + return [os.path.join(exe_dir, 'f2py')] else: - f2py_cmds = [os.path.join(exe_dir, "Scripts", 'f2py')] - success = try_f2py_commands(f2py_cmds) - msg = "Warning: f2py not found in path" - assert_(success == 1, msg) + return [os.path.join(exe_dir, "Scripts", 'f2py')] else: # Three scripts are installed in Unix-like systems: # 'f2py', 'f2py{major}', and 'f2py{major.minor}'. For example, @@ -93,7 +32,18 @@ def test_f2py(): version = sys.version_info major = str(version.major) minor = str(version.minor) - f2py_cmds = ('f2py', 'f2py' + major, 'f2py' + major + '.' + minor) - success = try_f2py_commands(f2py_cmds) - msg = "Warning: not all of %s, %s, and %s are found in path" % f2py_cmds - assert_(success == 3, msg) + return ['f2py', 'f2py' + major, 'f2py' + major + '.' + minor] + + +@pytest.mark.skipif(is_inplace, reason="Cannot test f2py command inplace") +@pytest.mark.xfail(reason="Test is unreliable") +@pytest.mark.parametrize('f2py_cmd', find_f2py_commands()) +def test_f2py(f2py_cmd): + # test that we can run f2py script + stdout = subprocess.check_output([f2py_cmd, '-v']) + assert_equal(stdout.strip(), b'2') + + +def test_pep338(): + stdout = subprocess.check_output([sys.executable, '-mnumpy.f2py', '-v']) + assert_equal(stdout.strip(), b'2') diff --git a/pavement.py b/pavement.py index f2c56883b..2a5225f71 100644 --- a/pavement.py +++ b/pavement.py @@ -42,13 +42,12 @@ from paver.easy import Bunch, options, task, sh #----------------------------------- # Path to the release notes -RELEASE_NOTES = 'doc/release/1.16.0-notes.rst' +RELEASE_NOTES = 'doc/release/1.17.0-notes.rst' #------------------------------------------------------- # Hardcoded build/install dirs, virtualenv options, etc. #------------------------------------------------------- -DEFAULT_PYTHON = "2.7" # Where to put the release installers options(installers=Bunch(releasedir="release", diff --git a/pytest.ini b/pytest.ini index 1a49e5dea..4748e3575 100644 --- a/pytest.ini +++ b/pytest.ini @@ -1,7 +1,7 @@ [pytest] addopts = -l norecursedirs = doc tools numpy/linalg/lapack_lite numpy/core/code_generators -doctest_optionflags = NORMALIZE_WHITESPACE +doctest_optionflags = NORMALIZE_WHITESPACE ELLIPSIS ALLOW_UNICODE ALLOW_BYTES filterwarnings = error diff --git a/runtests.py b/runtests.py index 81c7c103f..41768a2ea 100755 --- a/runtests.py +++ b/runtests.py @@ -73,8 +73,8 @@ def main(argv): help="just build, do not run any tests") parser.add_argument("--doctests", action="store_true", default=False, help="Run doctests in module") - #parser.add_argument("--refguide-check", action="store_true", default=False, - #help="Run refguide check (do not run regular tests.)") + parser.add_argument("--refguide-check", action="store_true", default=False, + help="Run refguide check (do not run regular tests.)") parser.add_argument("--coverage", action="store_true", default=False, help=("report coverage of project code. HTML output goes " "under build/coverage")) @@ -202,6 +202,14 @@ def main(argv): shutil.rmtree(dst_dir) extra_argv += ['--cov-report=html:' + dst_dir] + if args.refguide_check: + cmd = [os.path.join(ROOT_DIR, 'tools', 'refguide_check.py'), + '--doctests'] + if args.submodule: + cmd += [args.submodule] + os.execv(sys.executable, [sys.executable] + cmd) + sys.exit(0) + if args.bench: # Run ASV items = extra_argv @@ -335,7 +343,6 @@ def build_project(args): # add flags used as werrors warnings_as_errors = ' '.join([ # from tools/travis-test.sh - '-Werror=declaration-after-statement', '-Werror=vla', '-Werror=nonnull', '-Werror=pointer-arith', @@ -344,6 +351,8 @@ def build_project(args): '-Werror=unused-function', ]) env['CFLAGS'] = warnings_as_errors + ' ' + env.get('CFLAGS', '') + # NumPy > 1.16 should be C99 compatible. + env['CFLAGS'] = '-std=c99' + ' ' + env.get('CFLAGS', '') if args.debug or args.gcov: # assume everyone uses gcc/gfortran env['OPT'] = '-O0 -ggdb' @@ -27,13 +27,10 @@ import subprocess import textwrap -if sys.version_info[:2] < (2, 7) or (3, 0) <= sys.version_info[:2] < (3, 4): - raise RuntimeError("Python version 2.7 or >= 3.4 required.") +if sys.version_info[:2] < (3, 5): + raise RuntimeError("Python version >= 3.5 required.") -if sys.version_info[0] >= 3: - import builtins -else: - import __builtin__ as builtins +import builtins CLASSIFIERS = """\ @@ -43,10 +40,7 @@ Intended Audience :: Developers License :: OSI Approved Programming Language :: C Programming Language :: Python -Programming Language :: Python :: 2 -Programming Language :: Python :: 2.7 Programming Language :: Python :: 3 -Programming Language :: Python :: 3.4 Programming Language :: Python :: 3.5 Programming Language :: Python :: 3.6 Programming Language :: Python :: 3.7 @@ -60,7 +54,7 @@ Operating System :: MacOS """ MAJOR = 1 -MINOR = 16 +MINOR = 17 MICRO = 0 ISRELEASED = False VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO) @@ -375,7 +369,7 @@ def setup_package(): platforms = ["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"], test_suite='nose.collector', cmdclass={"sdist": sdist_checked}, - python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*', + python_requires='>=3.5', zip_safe=False, entry_points={ 'console_scripts': f2py_cmds diff --git a/shippable.yml b/shippable.yml index 6a92c0f34..82ee9461f 100644 --- a/shippable.yml +++ b/shippable.yml @@ -1,17 +1,17 @@ branches: only: - master + - maintenance/* language: python python: # use versions available for job image - # aarch64_u16pytall:v6.7.4 + # aarch64_u16pytall:v6.7.4 # (what we currently have access to by default) # this is a bit restrictive in terms # of version availability / control, # but it is convenient - - 2.7 - 3.7 runtime: diff --git a/tools/changelog.py b/tools/changelog.py index 84e046c5f..b135b14e5 100755 --- a/tools/changelog.py +++ b/tools/changelog.py @@ -42,8 +42,10 @@ import codecs from git import Repo from github import Github -UTF8Writer = codecs.getwriter('utf8') -sys.stdout = UTF8Writer(sys.stdout) +if sys.version_info.major < 3: + UTF8Writer = codecs.getwriter('utf8') + sys.stdout = UTF8Writer(sys.stdout) + this_repo = Repo(os.path.join(os.path.dirname(__file__), "..")) author_msg =\ diff --git a/tools/refguide_check.py b/tools/refguide_check.py new file mode 100644 index 000000000..3d885e37f --- /dev/null +++ b/tools/refguide_check.py @@ -0,0 +1,952 @@ +#!/usr/bin/env python +""" +refguide_check.py [OPTIONS] [-- ARGS] + +Check for a NumPy submodule whether the objects in its __all__ dict +correspond to the objects included in the reference guide. + +Example of usage:: + + $ python refguide_check.py optimize + +Note that this is a helper script to be able to check if things are missing; +the output of this script does need to be checked manually. In some cases +objects are left out of the refguide for a good reason (it's an alias of +another function, or deprecated, or ...) + +Another use of this helper script is to check validity of code samples +in docstrings. This is different from doctesting [we do not aim to have +numpy docstrings doctestable!], this is just to make sure that code in +docstrings is valid python:: + + $ python refguide_check.py --doctests optimize + +""" +from __future__ import print_function + +import sys +import os +import re +import copy +import inspect +import warnings +import doctest +import tempfile +import io +import docutils.core +from docutils.parsers.rst import directives +import shutil +import glob +from doctest import NORMALIZE_WHITESPACE, ELLIPSIS, IGNORE_EXCEPTION_DETAIL +from argparse import ArgumentParser +from pkg_resources import parse_version + +import sphinx +import numpy as np + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'doc', 'sphinxext')) +from numpydoc.docscrape_sphinx import get_doc_object + +if parse_version(sphinx.__version__) >= parse_version('1.5'): + # Enable specific Sphinx directives + from sphinx.directives import SeeAlso, Only + directives.register_directive('seealso', SeeAlso) + directives.register_directive('only', Only) +else: + # Remove sphinx directives that don't run without Sphinx environment. + # Sphinx < 1.5 installs all directives on import... + directives._directives.pop('versionadded', None) + directives._directives.pop('versionchanged', None) + directives._directives.pop('moduleauthor', None) + directives._directives.pop('sectionauthor', None) + directives._directives.pop('codeauthor', None) + directives._directives.pop('toctree', None) + + +BASE_MODULE = "numpy" + +PUBLIC_SUBMODULES = [ + 'core', + 'f2py', + 'linalg', + 'lib', + 'lib.recfunctions', + 'fft', + 'ma', + 'polynomial', + 'matrixlib', + 'random', +] + +# Docs for these modules are included in the parent module +OTHER_MODULE_DOCS = { + 'fftpack.convolve': 'fftpack', + 'io.wavfile': 'io', + 'io.arff': 'io', +} + +# these names are known to fail doctesting and we like to keep it that way +# e.g. sometimes pseudocode is acceptable etc +DOCTEST_SKIPLIST = set([ + # cases where NumPy docstrings import things from SciPy: + 'numpy.lib.vectorize', + 'numpy.random.standard_gamma', + 'numpy.random.gamma', + 'numpy.random.vonmises', + 'numpy.random.power', + 'numpy.random.zipf', + # remote / local file IO with DataSource is problematic in doctest: + 'numpy.lib.DataSource', + 'numpy.lib.Repository', +]) + +# these names are not required to be present in ALL despite being in +# autosummary:: listing +REFGUIDE_ALL_SKIPLIST = [ + r'scipy\.sparse\.linalg', + r'scipy\.spatial\.distance', + r'scipy\.linalg\.blas\.[sdczi].*', + r'scipy\.linalg\.lapack\.[sdczi].*', +] + +# these names are not required to be in an autosummary:: listing +# despite being in ALL +REFGUIDE_AUTOSUMMARY_SKIPLIST = [ + # NOTE: should NumPy have a better match between autosummary + # listings and __all__? For now, TR isn't convinced this is a + # priority -- focus on just getting docstrings executed / correct + r'numpy\.*', +] +# deprecated windows in scipy.signal namespace +for name in ('barthann', 'bartlett', 'blackmanharris', 'blackman', 'bohman', + 'boxcar', 'chebwin', 'cosine', 'exponential', 'flattop', + 'gaussian', 'general_gaussian', 'hamming', 'hann', 'hanning', + 'kaiser', 'nuttall', 'parzen', 'slepian', 'triang', 'tukey'): + REFGUIDE_AUTOSUMMARY_SKIPLIST.append(r'scipy\.signal\.' + name) + +HAVE_MATPLOTLIB = False + + +def short_path(path, cwd=None): + """ + Return relative or absolute path name, whichever is shortest. + """ + if not isinstance(path, str): + return path + if cwd is None: + cwd = os.getcwd() + abspath = os.path.abspath(path) + relpath = os.path.relpath(path, cwd) + if len(abspath) <= len(relpath): + return abspath + return relpath + + +def find_names(module, names_dict): + # Refguide entries: + # + # - 3 spaces followed by function name, and maybe some spaces, some + # dashes, and an explanation; only function names listed in + # refguide are formatted like this (mostly, there may be some false + # positives) + # + # - special directives, such as data and function + # + # - (scipy.constants only): quoted list + # + patterns = [ + r"^\s\s\s([a-z_0-9A-Z]+)(\s+-+.*)?$", + r"^\.\. (?:data|function)::\s*([a-z_0-9A-Z]+)\s*$" + ] + + if module.__name__ == 'scipy.constants': + patterns += ["^``([a-z_0-9A-Z]+)``"] + + patterns = [re.compile(pattern) for pattern in patterns] + module_name = module.__name__ + + for line in module.__doc__.splitlines(): + res = re.search(r"^\s*\.\. (?:currentmodule|module):: ([a-z0-9A-Z_.]+)\s*$", line) + if res: + module_name = res.group(1) + continue + + for pattern in patterns: + res = re.match(pattern, line) + if res is not None: + name = res.group(1) + entry = '.'.join([module_name, name]) + names_dict.setdefault(module_name, set()).add(name) + break + + +def get_all_dict(module): + """Return a copy of the __all__ dict with irrelevant items removed.""" + if hasattr(module, "__all__"): + all_dict = copy.deepcopy(module.__all__) + else: + all_dict = copy.deepcopy(dir(module)) + all_dict = [name for name in all_dict + if not name.startswith("_")] + for name in ['absolute_import', 'division', 'print_function']: + try: + all_dict.remove(name) + except ValueError: + pass + + # Modules are almost always private; real submodules need a separate + # run of refguide_check. + all_dict = [name for name in all_dict + if not inspect.ismodule(getattr(module, name, None))] + + deprecated = [] + not_deprecated = [] + for name in all_dict: + f = getattr(module, name, None) + if callable(f) and is_deprecated(f): + deprecated.append(name) + else: + not_deprecated.append(name) + + others = set(dir(module)).difference(set(deprecated)).difference(set(not_deprecated)) + + return not_deprecated, deprecated, others + + +def compare(all_dict, others, names, module_name): + """Return sets of objects only in __all__, refguide, or completely missing.""" + only_all = set() + for name in all_dict: + if name not in names: + for pat in REFGUIDE_AUTOSUMMARY_SKIPLIST: + if re.match(pat, module_name + '.' + name): + break + else: + only_all.add(name) + + only_ref = set() + missing = set() + for name in names: + if name not in all_dict: + for pat in REFGUIDE_ALL_SKIPLIST: + if re.match(pat, module_name + '.' + name): + if name not in others: + missing.add(name) + break + else: + only_ref.add(name) + + return only_all, only_ref, missing + +def is_deprecated(f): + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("error") + try: + f(**{"not a kwarg":None}) + except DeprecationWarning: + return True + except Exception: + pass + return False + +def check_items(all_dict, names, deprecated, others, module_name, dots=True): + num_all = len(all_dict) + num_ref = len(names) + + output = "" + + output += "Non-deprecated objects in __all__: %i\n" % num_all + output += "Objects in refguide: %i\n\n" % num_ref + + only_all, only_ref, missing = compare(all_dict, others, names, module_name) + dep_in_ref = set(only_ref).intersection(deprecated) + only_ref = set(only_ref).difference(deprecated) + + if len(dep_in_ref) > 0: + output += "Deprecated objects in refguide::\n\n" + for name in sorted(deprecated): + output += " " + name + "\n" + + if len(only_all) == len(only_ref) == len(missing) == 0: + if dots: + output_dot('.') + return [(None, True, output)] + else: + if len(only_all) > 0: + output += "ERROR: objects in %s.__all__ but not in refguide::\n\n" % module_name + for name in sorted(only_all): + output += " " + name + "\n" + + output += "\nThis issue can be fixed by adding these objects to\n" + output += "the function listing in __init__.py for this module\n" + + if len(only_ref) > 0: + output += "ERROR: objects in refguide but not in %s.__all__::\n\n" % module_name + for name in sorted(only_ref): + output += " " + name + "\n" + + output += "\nThis issue should likely be fixed by removing these objects\n" + output += "from the function listing in __init__.py for this module\n" + output += "or adding them to __all__.\n" + + if len(missing) > 0: + output += "ERROR: missing objects::\n\n" + for name in sorted(missing): + output += " " + name + "\n" + + if dots: + output_dot('F') + return [(None, False, output)] + + +def validate_rst_syntax(text, name, dots=True): + if text is None: + if dots: + output_dot('E') + return False, "ERROR: %s: no documentation" % (name,) + + ok_unknown_items = set([ + 'mod', 'currentmodule', 'autosummary', 'data', + 'obj', 'versionadded', 'versionchanged', 'module', 'class', + 'ref', 'func', 'toctree', 'moduleauthor', + 'sectionauthor', 'codeauthor', 'eq', 'doi', 'DOI', 'arXiv', 'arxiv' + ]) + + # Run through docutils + error_stream = io.StringIO() + + def resolve(name, is_label=False): + return ("http://foo", name) + + token = '<RST-VALIDATE-SYNTAX-CHECK>' + + docutils.core.publish_doctree( + text, token, + settings_overrides = dict(halt_level=5, + traceback=True, + default_reference_context='title-reference', + default_role='emphasis', + link_base='', + resolve_name=resolve, + stylesheet_path='', + raw_enabled=0, + file_insertion_enabled=0, + warning_stream=error_stream)) + + # Print errors, disregarding unimportant ones + error_msg = error_stream.getvalue() + errors = error_msg.split(token) + success = True + output = "" + + for error in errors: + lines = error.splitlines() + if not lines: + continue + + m = re.match(r'.*Unknown (?:interpreted text role|directive type) "(.*)".*$', lines[0]) + if m: + if m.group(1) in ok_unknown_items: + continue + + m = re.match(r'.*Error in "math" directive:.*unknown option: "label"', " ".join(lines), re.S) + if m: + continue + + output += name + lines[0] + "::\n " + "\n ".join(lines[1:]).rstrip() + "\n" + success = False + + if not success: + output += " " + "-"*72 + "\n" + for lineno, line in enumerate(text.splitlines()): + output += " %-4d %s\n" % (lineno+1, line) + output += " " + "-"*72 + "\n\n" + + if dots: + output_dot('.' if success else 'F') + return success, output + + +def output_dot(msg='.', stream=sys.stderr): + stream.write(msg) + stream.flush() + + +def check_rest(module, names, dots=True): + """ + Check reStructuredText formatting of docstrings + + Returns: [(name, success_flag, output), ...] + """ + + try: + skip_types = (dict, str, unicode, float, int) + except NameError: + # python 3 + skip_types = (dict, str, float, int) + + + results = [] + + if module.__name__[6:] not in OTHER_MODULE_DOCS: + results += [(module.__name__,) + + validate_rst_syntax(inspect.getdoc(module), + module.__name__, dots=dots)] + + for name in names: + full_name = module.__name__ + '.' + name + obj = getattr(module, name, None) + + if obj is None: + results.append((full_name, False, "%s has no docstring" % (full_name,))) + continue + elif isinstance(obj, skip_types): + continue + + if inspect.ismodule(obj): + text = inspect.getdoc(obj) + else: + try: + text = str(get_doc_object(obj)) + except Exception: + import traceback + results.append((full_name, False, + "Error in docstring format!\n" + + traceback.format_exc())) + continue + + m = re.search("([\x00-\x09\x0b-\x1f])", text) + if m: + msg = ("Docstring contains a non-printable character %r! " + "Maybe forgot r\"\"\"?" % (m.group(1),)) + results.append((full_name, False, msg)) + continue + + try: + src_file = short_path(inspect.getsourcefile(obj)) + except TypeError: + src_file = None + + if src_file: + file_full_name = src_file + ':' + full_name + else: + file_full_name = full_name + + results.append((full_name,) + validate_rst_syntax(text, file_full_name, dots=dots)) + + return results + + +### Doctest helpers #### + +# the namespace to run examples in +DEFAULT_NAMESPACE = {'np': np} + +# the namespace to do checks in +CHECK_NAMESPACE = { + 'np': np, + 'assert_allclose': np.testing.assert_allclose, + 'assert_equal': np.testing.assert_equal, + # recognize numpy repr's + 'array': np.array, + 'matrix': np.matrix, + 'int64': np.int64, + 'uint64': np.uint64, + 'int8': np.int8, + 'int32': np.int32, + 'float32': np.float32, + 'float64': np.float64, + 'dtype': np.dtype, + 'nan': np.nan, + 'NaN': np.nan, + 'inf': np.inf, + 'Inf': np.inf,} + + +class DTRunner(doctest.DocTestRunner): + DIVIDER = "\n" + + def __init__(self, item_name, checker=None, verbose=None, optionflags=0): + self._item_name = item_name + doctest.DocTestRunner.__init__(self, checker=checker, verbose=verbose, + optionflags=optionflags) + + def _report_item_name(self, out, new_line=False): + if self._item_name is not None: + if new_line: + out("\n") + self._item_name = None + + def report_start(self, out, test, example): + self._checker._source = example.source + return doctest.DocTestRunner.report_start(self, out, test, example) + + def report_success(self, out, test, example, got): + if self._verbose: + self._report_item_name(out, new_line=True) + return doctest.DocTestRunner.report_success(self, out, test, example, got) + + def report_unexpected_exception(self, out, test, example, exc_info): + self._report_item_name(out) + return doctest.DocTestRunner.report_unexpected_exception( + self, out, test, example, exc_info) + + def report_failure(self, out, test, example, got): + self._report_item_name(out) + return doctest.DocTestRunner.report_failure(self, out, test, + example, got) + +class Checker(doctest.OutputChecker): + obj_pattern = re.compile('at 0x[0-9a-fA-F]+>') + int_pattern = re.compile('^[0-9]+L?$') + vanilla = doctest.OutputChecker() + rndm_markers = {'# random', '# Random', '#random', '#Random', "# may vary"} + stopwords = {'plt.', '.hist', '.show', '.ylim', '.subplot(', + 'set_title', 'imshow', 'plt.show', '.axis(', '.plot(', + '.bar(', '.title', '.ylabel', '.xlabel', 'set_ylim', 'set_xlim', + '# reformatted', '.set_xlabel(', '.set_ylabel(', '.set_zlabel(', + '.set(xlim=', '.set(ylim=', '.set(xlabel=', '.set(ylabel='} + + def __init__(self, parse_namedtuples=True, ns=None, atol=1e-8, rtol=1e-2): + self.parse_namedtuples = parse_namedtuples + self.atol, self.rtol = atol, rtol + if ns is None: + self.ns = dict(CHECK_NAMESPACE) + else: + self.ns = ns + + def check_output(self, want, got, optionflags): + # cut it short if they are equal + if want == got: + return True + + # skip stopwords in source + if any(word in self._source for word in self.stopwords): + return True + + # skip random stuff + if any(word in want for word in self.rndm_markers): + return True + + # skip function/object addresses + if self.obj_pattern.search(got): + return True + + # ignore comments (e.g. signal.freqresp) + if want.lstrip().startswith("#"): + return True + + # python 2 long integers are equal to python 3 integers + if self.int_pattern.match(want) and self.int_pattern.match(got): + if want.rstrip("L\r\n") == got.rstrip("L\r\n"): + return True + + # try the standard doctest + try: + if self.vanilla.check_output(want, got, optionflags): + return True + except Exception: + pass + + # OK then, convert strings to objects + try: + a_want = eval(want, dict(self.ns)) + a_got = eval(got, dict(self.ns)) + except Exception: + # Maybe we're printing a numpy array? This produces invalid python + # code: `print(np.arange(3))` produces "[0 1 2]" w/o commas between + # values. So, reinsert commas and retry. + # TODO: handle (1) abberivation (`print(np.arange(10000))`), and + # (2) n-dim arrays with n > 1 + s_want = want.strip() + s_got = got.strip() + cond = (s_want.startswith("[") and s_want.endswith("]") and + s_got.startswith("[") and s_got.endswith("]")) + if cond: + s_want = ", ".join(s_want[1:-1].split()) + s_got = ", ".join(s_got[1:-1].split()) + return self.check_output(s_want, s_got, optionflags) + + if not self.parse_namedtuples: + return False + # suppose that "want" is a tuple, and "got" is smth like + # MoodResult(statistic=10, pvalue=0.1). + # Then convert the latter to the tuple (10, 0.1), + # and then compare the tuples. + try: + num = len(a_want) + regex = ('[\w\d_]+\(' + + ', '.join(['[\w\d_]+=(.+)']*num) + + '\)') + grp = re.findall(regex, got.replace('\n', ' ')) + if len(grp) > 1: # no more than one for now + return False + # fold it back to a tuple + got_again = '(' + ', '.join(grp[0]) + ')' + return self.check_output(want, got_again, optionflags) + except Exception: + return False + + # ... and defer to numpy + try: + return self._do_check(a_want, a_got) + except Exception: + # heterog tuple, eg (1, np.array([1., 2.])) + try: + return all(self._do_check(w, g) for w, g in zip(a_want, a_got)) + except (TypeError, ValueError): + return False + + def _do_check(self, want, got): + # This should be done exactly as written to correctly handle all of + # numpy-comparable objects, strings, and heterogeneous tuples + try: + if want == got: + return True + except Exception: + pass + return np.allclose(want, got, atol=self.atol, rtol=self.rtol) + + +def _run_doctests(tests, full_name, verbose, doctest_warnings): + """Run modified doctests for the set of `tests`. + + Returns: list of [(success_flag, output), ...] + """ + flags = NORMALIZE_WHITESPACE | ELLIPSIS | IGNORE_EXCEPTION_DETAIL + runner = DTRunner(full_name, checker=Checker(), optionflags=flags, + verbose=verbose) + + output = [] + success = True + def out(msg): + output.append(msg) + + class MyStderr(object): + """Redirect stderr to the current stdout""" + def write(self, msg): + if doctest_warnings: + sys.stdout.write(msg) + else: + out(msg) + + # a flush method is required when a doctest uses multiprocessing + # multiprocessing/popen_fork.py flushes sys.stderr + def flush(self): + if doctest_warnings: + sys.stdout.flush() + + # Run tests, trying to restore global state afterward + old_printoptions = np.get_printoptions() + old_errstate = np.seterr() + old_stderr = sys.stderr + cwd = os.getcwd() + tmpdir = tempfile.mkdtemp() + sys.stderr = MyStderr() + try: + os.chdir(tmpdir) + + # try to ensure random seed is NOT reproducible + np.random.seed(None) + + for t in tests: + t.filename = short_path(t.filename, cwd) + fails, successes = runner.run(t, out=out) + if fails > 0: + success = False + finally: + sys.stderr = old_stderr + os.chdir(cwd) + shutil.rmtree(tmpdir) + np.set_printoptions(**old_printoptions) + np.seterr(**old_errstate) + + return success, output + + +def check_doctests(module, verbose, ns=None, + dots=True, doctest_warnings=False): + """Check code in docstrings of the module's public symbols. + + Returns: list of [(item_name, success_flag, output), ...] + """ + if ns is None: + ns = dict(DEFAULT_NAMESPACE) + + # Loop over non-deprecated items + results = [] + + for name in get_all_dict(module)[0]: + full_name = module.__name__ + '.' + name + + if full_name in DOCTEST_SKIPLIST: + continue + + try: + obj = getattr(module, name) + except AttributeError: + import traceback + results.append((full_name, False, + "Missing item!\n" + + traceback.format_exc())) + continue + + finder = doctest.DocTestFinder() + try: + tests = finder.find(obj, name, globs=dict(ns)) + except Exception: + import traceback + results.append((full_name, False, + "Failed to get doctests!\n" + + traceback.format_exc())) + continue + + success, output = _run_doctests(tests, full_name, verbose, + doctest_warnings) + + if dots: + output_dot('.' if success else 'F') + + results.append((full_name, success, "".join(output))) + + if HAVE_MATPLOTLIB: + import matplotlib.pyplot as plt + plt.close('all') + + return results + + +def check_doctests_testfile(fname, verbose, ns=None, + dots=True, doctest_warnings=False): + """Check code in a text file. + + Mimic `check_doctests` above, differing mostly in test discovery. + (which is borrowed from stdlib's doctest.testfile here, + https://github.com/python-git/python/blob/master/Lib/doctest.py) + + Returns: list of [(item_name, success_flag, output), ...] + + Notes + ----- + + We also try to weed out pseudocode: + * We maintain a list of exceptions which signal pseudocode, + * We split the text file into "blocks" of code separated by empty lines + and/or intervening text. + * If a block contains a marker, the whole block is then assumed to be + pseudocode. It is then not being doctested. + + The rationale is that typically, the text looks like this: + + blah + <BLANKLINE> + >>> from numpy import some_module # pseudocode! + >>> func = some_module.some_function + >>> func(42) # still pseudocode + 146 + <BLANKLINE> + blah + <BLANKLINE> + >>> 2 + 3 # real code, doctest it + 5 + + """ + results = [] + + if ns is None: + ns = dict(DEFAULT_NAMESPACE) + + _, short_name = os.path.split(fname) + if short_name in DOCTEST_SKIPLIST: + return results + + full_name = fname + if sys.version_info.major <= 2: + with open(fname) as f: + text = f.read() + else: + with open(fname, encoding='utf-8') as f: + text = f.read() + + PSEUDOCODE = set(['some_function', 'some_module', 'import example', + 'ctypes.CDLL', # likely need compiling, skip it + 'integrate.nquad(func,' # ctypes integrate tutotial + ]) + + # split the text into "blocks" and try to detect and omit pseudocode blocks. + parser = doctest.DocTestParser() + good_parts = [] + for part in text.split('\n\n'): + tests = parser.get_doctest(part, ns, fname, fname, 0) + if any(word in ex.source for word in PSEUDOCODE + for ex in tests.examples): + # omit it + pass + else: + # `part` looks like a good code, let's doctest it + good_parts += [part] + + # Reassemble the good bits and doctest them: + good_text = '\n\n'.join(good_parts) + tests = parser.get_doctest(good_text, ns, fname, fname, 0) + success, output = _run_doctests([tests], full_name, verbose, + doctest_warnings) + + if dots: + output_dot('.' if success else 'F') + + results.append((full_name, success, "".join(output))) + + if HAVE_MATPLOTLIB: + import matplotlib.pyplot as plt + plt.close('all') + + return results + + +def init_matplotlib(): + global HAVE_MATPLOTLIB + + try: + import matplotlib + matplotlib.use('Agg') + HAVE_MATPLOTLIB = True + except ImportError: + HAVE_MATPLOTLIB = False + + +def main(argv): + parser = ArgumentParser(usage=__doc__.lstrip()) + parser.add_argument("module_names", metavar="SUBMODULES", default=[], + nargs='*', help="Submodules to check (default: all public)") + parser.add_argument("--doctests", action="store_true", help="Run also doctests") + parser.add_argument("-v", "--verbose", action="count", default=0) + parser.add_argument("--doctest-warnings", action="store_true", + help="Enforce warning checking for doctests") + parser.add_argument("--skip-tutorial", action="store_true", + help="Skip running doctests in the tutorial.") + args = parser.parse_args(argv) + + modules = [] + names_dict = {} + + if args.module_names: + args.skip_tutorial = True + else: + args.module_names = list(PUBLIC_SUBMODULES) + + os.environ['SCIPY_PIL_IMAGE_VIEWER'] = 'true' + + module_names = list(args.module_names) + for name in list(module_names): + if name in OTHER_MODULE_DOCS: + name = OTHER_MODULE_DOCS[name] + if name not in module_names: + module_names.append(name) + + for submodule_name in module_names: + module_name = BASE_MODULE + '.' + submodule_name + __import__(module_name) + module = sys.modules[module_name] + + if submodule_name not in OTHER_MODULE_DOCS: + find_names(module, names_dict) + + if submodule_name in args.module_names: + modules.append(module) + + dots = True + success = True + results = [] + + print("Running checks for %d modules:" % (len(modules),)) + + if args.doctests or not args.skip_tutorial: + init_matplotlib() + + for module in modules: + if dots: + if module is not modules[0]: + sys.stderr.write(' ') + sys.stderr.write(module.__name__ + ' ') + sys.stderr.flush() + + all_dict, deprecated, others = get_all_dict(module) + names = names_dict.get(module.__name__, set()) + + mod_results = [] + mod_results += check_items(all_dict, names, deprecated, others, module.__name__) + mod_results += check_rest(module, set(names).difference(deprecated), + dots=dots) + if args.doctests: + mod_results += check_doctests(module, (args.verbose >= 2), dots=dots, + doctest_warnings=args.doctest_warnings) + + for v in mod_results: + assert isinstance(v, tuple), v + + results.append((module, mod_results)) + + if dots: + sys.stderr.write("\n") + sys.stderr.flush() + + if not args.skip_tutorial: + base_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..') + tut_path = os.path.join(base_dir, 'doc', 'source', 'tutorial', '*.rst') + print('\nChecking tutorial files at %s:' % os.path.relpath(tut_path, os.getcwd())) + for filename in sorted(glob.glob(tut_path)): + if dots: + sys.stderr.write('\n') + sys.stderr.write(os.path.split(filename)[1] + ' ') + sys.stderr.flush() + + tut_results = check_doctests_testfile(filename, (args.verbose >= 2), + dots=dots, doctest_warnings=args.doctest_warnings) + + def scratch(): pass # stub out a "module", see below + scratch.__name__ = filename + results.append((scratch, tut_results)) + + if dots: + sys.stderr.write("\n") + sys.stderr.flush() + + # Report results + all_success = True + + for module, mod_results in results: + success = all(x[1] for x in mod_results) + all_success = all_success and success + + if success and args.verbose == 0: + continue + + print("") + print("=" * len(module.__name__)) + print(module.__name__) + print("=" * len(module.__name__)) + print("") + + for name, success, output in mod_results: + if name is None: + if not success or args.verbose >= 1: + print(output.strip()) + print("") + elif not success or (args.verbose >= 2 and output.strip()): + print(name) + print("-"*len(name)) + print("") + print(output.strip()) + print("") + + if all_success: + print("\nOK: refguide and doctests checks passed!") + sys.exit(0) + else: + print("\nERROR: refguide or doctests have errors") + sys.exit(1) + + +if __name__ == '__main__': + main(argv=sys.argv[1:]) diff --git a/tools/travis-test.sh b/tools/travis-test.sh index fa83606b2..353362066 100755 --- a/tools/travis-test.sh +++ b/tools/travis-test.sh @@ -25,8 +25,7 @@ if [ -n "$PYTHON_OPTS" ]; then fi # make some warnings fatal, mostly to match windows compilers -werrors="-Werror=declaration-after-statement -Werror=vla " -werrors+="-Werror=nonnull -Werror=pointer-arith" +werrors="-Werror=vla -Werror=nonnull -Werror=pointer-arith" # build with c99 by default @@ -63,48 +62,6 @@ setup_base() fi } -setup_chroot() -{ - # this can all be replaced with: - # apt-get install libpython2.7-dev:i386 - # CC="gcc -m32" LDSHARED="gcc -m32 -shared" LDFLAGS="-m32 -shared" \ - # linux32 python setup.py build - # when travis updates to ubuntu 14.04 - # - # NumPy may not distinguish between 64 and 32 bit ATLAS in the - # configuration stage. - DIR=$1 - set -u - sudo debootstrap --variant=buildd --include=fakeroot,build-essential \ - --arch=$ARCH --foreign $DIST $DIR - sudo chroot $DIR ./debootstrap/debootstrap --second-stage - - # put the numpy repo in the chroot directory - sudo rsync -a $TRAVIS_BUILD_DIR $DIR/ - - # set up repos in the chroot directory for installing packages - echo deb http://archive.ubuntu.com/ubuntu/ \ - $DIST main restricted universe multiverse \ - | sudo tee -a $DIR/etc/apt/sources.list - echo deb http://archive.ubuntu.com/ubuntu/ \ - $DIST-updates main restricted universe multiverse \ - | sudo tee -a $DIR/etc/apt/sources.list - echo deb http://security.ubuntu.com/ubuntu \ - $DIST-security main restricted universe multiverse \ - | sudo tee -a $DIR/etc/apt/sources.list - - sudo chroot $DIR bash -c "apt-get update" - # faster operation with preloaded eatmydata - sudo chroot $DIR bash -c "apt-get install -qq -y eatmydata" - echo '/usr/$LIB/libeatmydata.so' | \ - sudo tee -a $DIR/etc/ld.so.preload - - # install needed packages - sudo chroot $DIR bash -c "apt-get install -qq -y \ - libatlas-base-dev gfortran python3-dev python3-pip \ - cython python3-pytest" -} - run_test() { if [ -n "$USE_DEBUG" ]; then @@ -113,7 +70,7 @@ run_test() if [ -n "$RUN_COVERAGE" ]; then $PIP install pytest-cov - NUMPY_EXPERIMENTAL_ARRAY_FUNCTION=1 + export NUMPY_EXPERIMENTAL_ARRAY_FUNCTION=1 COVERAGE_FLAG=--coverage fi @@ -224,15 +181,6 @@ elif [ -n "$USE_SDIST" ] && [ $# -eq 0 ]; then popd run_test -elif [ -n "$USE_CHROOT" ] && [ $# -eq 0 ]; then - DIR=/chroot - setup_chroot $DIR - # the chroot'ed environment will not have the current locale, - # avoid any warnings which may disturb testing - export LANG=C LC_ALL=C - # run again in chroot with this time testing with python3 - sudo linux32 chroot $DIR bash -c \ - "cd numpy && PYTHON=python3 PIP=pip3 IN_CHROOT=1 $0 test" else setup_base run_test |