summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.travis.yml27
-rw-r--r--benchmarks/benchmarks/bench_indexing.py8
-rw-r--r--benchmarks/benchmarks/bench_io.py7
-rw-r--r--benchmarks/benchmarks/bench_linalg.py14
-rw-r--r--benchmarks/benchmarks/bench_reduce.py11
-rw-r--r--benchmarks/benchmarks/bench_ufunc.py6
-rw-r--r--benchmarks/benchmarks/common.py112
-rw-r--r--doc/HOWTO_RELEASE.rst.txt203
-rw-r--r--doc/release/1.10.2-notes.rst4
-rw-r--r--doc/release/1.11.0-notes.rst2
-rw-r--r--doc/source/dev/governance/index.rst2
-rw-r--r--numpy/core/src/npymath/npy_math.c.src6
-rw-r--r--numpy/core/tests/test_datetime.py4
-rw-r--r--numpy/core/tests/test_defchararray.py10
-rw-r--r--numpy/core/tests/test_deprecations.py37
-rw-r--r--numpy/core/tests/test_mem_overlap.py9
-rw-r--r--numpy/core/tests/test_memmap.py12
-rw-r--r--numpy/core/tests/test_multiarray.py30
-rw-r--r--numpy/core/tests/test_numeric.py4
-rw-r--r--numpy/core/tests/test_scalarinherit.py12
-rw-r--r--numpy/core/tests/test_shape_base.py4
-rw-r--r--numpy/core/tests/test_ufunc.py6
-rw-r--r--numpy/distutils/ccompiler.py8
-rw-r--r--numpy/lib/function_base.py4
-rw-r--r--numpy/lib/tests/test_io.py6
-rw-r--r--numpy/lib/tests/test_nanfunctions.py6
-rw-r--r--numpy/linalg/tests/test_linalg.py4
-rw-r--r--numpy/ma/tests/test_core.py42
-rw-r--r--numpy/random/mtrand/distributions.c2
-rw-r--r--numpy/random/mtrand/mtrand.pyx2
-rw-r--r--numpy/random/tests/test_random.py6
-rw-r--r--numpy/tests/test_scripts.py9
-rwxr-xr-xtools/travis-test.sh142
-rwxr-xr-xtools/travis-upload-wheel.sh11
34 files changed, 513 insertions, 259 deletions
diff --git a/.travis.yml b/.travis.yml
index 2447360f5..589d7a9e6 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -29,16 +29,18 @@ python:
- 3.5
matrix:
include:
- - python: 3.3
- env: USE_CHROOT=1 ARCH=i386 DIST=trusty PYTHON=3.4
+ - python: 2.7
+ env: USE_CHROOT=1 ARCH=i386 DIST=trusty PYTHON=2.7
sudo: true
+ dist: trusty
addons:
apt:
packages:
- - *common_packages
- debootstrap
- - python: 3.2
+ - python: 3.4
env: USE_DEBUG=1
+ sudo: true
+ dist: trusty
addons:
apt:
packages:
@@ -48,10 +50,17 @@ matrix:
- python3-nose
- python: 2.7
env: NPY_RELAXED_STRIDES_CHECKING=0 PYTHON_OO=1
+ - python: 3.5
+ env:
+ - USE_WHEEL=1
+ - WHEELHOUSE_UPLOADER_USERNAME=travis.numpy
+ # The following is generated with the command:
+ # travis encrypt -r numpy/numpy WHEELHOUSE_UPLOADER_SECRET=tH3AP1KeY
+ - secure: "IEicLPrP2uW+jW51GRwkONQpdPqMVtQL5bdroqR/U8r9TrXrbCVRhp4AP8JYZT0ptoBpmZWWGjmKBndB68QlMiUjQPowiFWt9Ka92CaqYdU7nqfWp9VImSndPmssjmCXJ1v1IjZPAMahp7Qnm0rWRmA0z9SomuRUQOJQ6s684vU="
- python: 2.7
- env: USE_WHEEL=1
- - python: 2.7
- env: PYTHONOPTIMIZE=2
+ env:
+ - PYTHONOPTIMIZE=2
+ - USE_ASV=1
before_install:
- uname -a
- free -m
@@ -69,7 +78,11 @@ before_install:
# pip install coverage
# Speed up install by not compiling Cython
- pip install --install-option="--no-cython-compile" Cython
+ - if [ -n "$USE_ASV" ]; then pip install asv; fi
- popd
script:
- ./tools/travis-test.sh
+
+after_success:
+ - ./tools/travis-upload-wheel.sh
diff --git a/benchmarks/benchmarks/bench_indexing.py b/benchmarks/benchmarks/bench_indexing.py
index d6dc4edf0..3e5a2ee60 100644
--- a/benchmarks/benchmarks/bench_indexing.py
+++ b/benchmarks/benchmarks/bench_indexing.py
@@ -1,6 +1,6 @@
from __future__ import absolute_import, division, print_function
-from .common import Benchmark, squares_, indexes_, indexes_rand_
+from .common import Benchmark, get_squares_, get_indexes_, get_indexes_rand_
import sys
import six
@@ -17,10 +17,10 @@ class Indexing(Benchmark):
def setup(self, indexes, sel, op):
sel = sel.replace('I', indexes)
- ns = {'squares_': squares_,
+ ns = {'squares_': get_squares_(),
'np': np,
- 'indexes_': indexes_,
- 'indexes_rand_': indexes_rand_}
+ 'indexes_': get_indexes_(),
+ 'indexes_rand_': get_indexes_rand_()}
if sys.version_info[0] >= 3:
code = "def run():\n for a in squares_.values(): a[%s]%s"
diff --git a/benchmarks/benchmarks/bench_io.py b/benchmarks/benchmarks/bench_io.py
index 45cdf95ee..782d4ab30 100644
--- a/benchmarks/benchmarks/bench_io.py
+++ b/benchmarks/benchmarks/bench_io.py
@@ -1,6 +1,6 @@
from __future__ import absolute_import, division, print_function
-from .common import Benchmark, squares
+from .common import Benchmark, get_squares
import numpy as np
@@ -57,5 +57,8 @@ class CopyTo(Benchmark):
class Savez(Benchmark):
+ def setup(self):
+ self.squares = get_squares()
+
def time_vb_savez_squares(self):
- np.savez('tmp.npz', squares)
+ np.savez('tmp.npz', self.squares)
diff --git a/benchmarks/benchmarks/bench_linalg.py b/benchmarks/benchmarks/bench_linalg.py
index c844cc79e..a323609b7 100644
--- a/benchmarks/benchmarks/bench_linalg.py
+++ b/benchmarks/benchmarks/bench_linalg.py
@@ -1,6 +1,6 @@
from __future__ import absolute_import, division, print_function
-from .common import Benchmark, squares_, indexes_rand
+from .common import Benchmark, get_squares_, get_indexes_rand, TYPES1
import numpy as np
@@ -36,7 +36,7 @@ class Eindot(Benchmark):
class Linalg(Benchmark):
params = [['svd', 'pinv', 'det', 'norm'],
- list(squares_.keys())]
+ TYPES1]
param_names = ['op', 'type']
def setup(self, op, typename):
@@ -46,10 +46,10 @@ class Linalg(Benchmark):
if op == 'cholesky':
# we need a positive definite
- self.a = np.dot(squares_[typename],
- squares_[typename].T)
+ self.a = np.dot(get_squares_()[typename],
+ get_squares_()[typename].T)
else:
- self.a = squares_[typename]
+ self.a = get_squares_()[typename]
# check that dtype is supported at all
try:
@@ -63,8 +63,8 @@ class Linalg(Benchmark):
class Lstsq(Benchmark):
def setup(self):
- self.a = squares_['float64']
- self.b = indexes_rand[:100].astype(np.float64)
+ self.a = get_squares_()['float64']
+ self.b = get_indexes_rand()[:100].astype(np.float64)
def time_numpy_linalg_lstsq_a__b_float64(self):
np.linalg.lstsq(self.a, self.b)
diff --git a/benchmarks/benchmarks/bench_reduce.py b/benchmarks/benchmarks/bench_reduce.py
index a810e828e..704023528 100644
--- a/benchmarks/benchmarks/bench_reduce.py
+++ b/benchmarks/benchmarks/bench_reduce.py
@@ -1,16 +1,19 @@
from __future__ import absolute_import, division, print_function
-from .common import Benchmark, TYPES1, squares
+from .common import Benchmark, TYPES1, get_squares
import numpy as np
class AddReduce(Benchmark):
+ def setup(self):
+ self.squares = get_squares().values()
+
def time_axis_0(self):
- [np.add.reduce(a, axis=0) for a in squares.values()]
+ [np.add.reduce(a, axis=0) for a in self.squares]
def time_axis_1(self):
- [np.add.reduce(a, axis=1) for a in squares.values()]
+ [np.add.reduce(a, axis=1) for a in self.squares]
class AddReduceSeparate(Benchmark):
@@ -18,7 +21,7 @@ class AddReduceSeparate(Benchmark):
param_names = ['axis', 'type']
def setup(self, axis, typename):
- self.a = squares[typename]
+ self.a = get_squares()[typename]
def time_reduce(self, axis, typename):
np.add.reduce(self.a, axis=axis)
diff --git a/benchmarks/benchmarks/bench_ufunc.py b/benchmarks/benchmarks/bench_ufunc.py
index 7946ccf65..8f821ce08 100644
--- a/benchmarks/benchmarks/bench_ufunc.py
+++ b/benchmarks/benchmarks/bench_ufunc.py
@@ -1,6 +1,6 @@
from __future__ import absolute_import, division, print_function
-from .common import Benchmark, squares_
+from .common import Benchmark, get_squares_
import numpy as np
@@ -39,7 +39,7 @@ class Broadcast(Benchmark):
class UFunc(Benchmark):
params = [ufuncs]
param_names = ['ufunc']
- timeout = 2
+ timeout = 10
def setup(self, ufuncname):
np.seterr(all='ignore')
@@ -48,7 +48,7 @@ class UFunc(Benchmark):
except AttributeError:
raise NotImplementedError()
self.args = []
- for t, a in squares_.items():
+ for t, a in get_squares_().items():
arg = (a,) * self.f.nin
try:
self.f(*arg)
diff --git a/benchmarks/benchmarks/common.py b/benchmarks/benchmarks/common.py
index e98396bed..066d4b130 100644
--- a/benchmarks/benchmarks/common.py
+++ b/benchmarks/benchmarks/common.py
@@ -25,40 +25,90 @@ TYPES1 = [
'complex256',
]
+
+def memoize(func):
+ result = []
+ def wrapper():
+ if not result:
+ result.append(func())
+ return result[0]
+ return wrapper
+
+
# values which will be used to construct our sample data matrices
# replicate 10 times to speed up initial imports of this helper
# and generate some redundancy
-values = [random.uniform(0, 100) for x in range(nx*ny//10)]*10
-
-squares = {t: numpy.array(values,
- dtype=getattr(numpy, t)).reshape((nx, ny))
- for t in TYPES1}
-
-# adjust complex ones to have non-degenerated imagery part -- use
-# original data transposed for that
-for t, v in squares.items():
- if t.startswith('complex'):
- v += v.T*1j
-
-# smaller squares
-squares_ = {t: s[:nxs, :nys] for t, s in squares.items()}
-# vectors
-vectors = {t: s[0] for t, s in squares.items()}
-
-indexes = list(range(nx))
-# so we do not have all items
-indexes.pop(5)
-indexes.pop(95)
-
-indexes_rand = indexes[:] # copy
-random.shuffle(indexes_rand) # in-place shuffle
-
-# only now make them arrays
-indexes = numpy.array(indexes)
-indexes_rand = numpy.array(indexes_rand)
-# smaller versions
-indexes_ = indexes[indexes < nxs]
-indexes_rand_ = indexes_rand[indexes_rand < nxs]
+
+@memoize
+def get_values():
+ rnd = numpy.random.RandomState(1)
+ values = numpy.tile(rnd.uniform(0, 100, size=nx*ny//10), 10)
+ return values
+
+
+@memoize
+def get_squares():
+ values = get_values()
+ squares = {t: numpy.array(values,
+ dtype=getattr(numpy, t)).reshape((nx, ny))
+ for t in TYPES1}
+
+ # adjust complex ones to have non-degenerated imagery part -- use
+ # original data transposed for that
+ for t, v in squares.items():
+ if t.startswith('complex'):
+ v += v.T*1j
+ return squares
+
+
+@memoize
+def get_squares_():
+ # smaller squares
+ squares_ = {t: s[:nxs, :nys] for t, s in get_squares().items()}
+ return squares_
+
+
+@memoize
+def get_vectors():
+ # vectors
+ vectors = {t: s[0] for t, s in get_squares().items()}
+ return vectors
+
+
+@memoize
+def get_indexes():
+ indexes = list(range(nx))
+ # so we do not have all items
+ indexes.pop(5)
+ indexes.pop(95)
+
+ indexes = numpy.array(indexes)
+ return indexes
+
+
+@memoize
+def get_indexes_rand():
+ rnd = random.Random(1)
+
+ indexes_rand = get_indexes().tolist() # copy
+ rnd.shuffle(indexes_rand) # in-place shuffle
+ indexes_rand = numpy.array(indexes_rand)
+ return indexes_rand
+
+
+@memoize
+def get_indexes_():
+ # smaller versions
+ indexes = get_indexes()
+ indexes_ = indexes[indexes < nxs]
+ return indexes_
+
+
+@memoize
+def get_indexes_rand_():
+ indexes_rand = get_indexes_rand()
+ indexes_rand_ = indexes_rand[indexes_rand < nxs]
+ return indexes_rand_
class Benchmark(object):
diff --git a/doc/HOWTO_RELEASE.rst.txt b/doc/HOWTO_RELEASE.rst.txt
index b77a6c25c..ee05981fc 100644
--- a/doc/HOWTO_RELEASE.rst.txt
+++ b/doc/HOWTO_RELEASE.rst.txt
@@ -1,10 +1,12 @@
This file gives an overview of what is necessary to build binary releases for
-NumPy on OS X. Windows binaries are built here using Wine, they can of course
-also be built on Windows itself. Building OS X binaries on another platform is
-not possible.
+NumPy. Windows binaries are built here using Wine, they can of course also be
+built on Windows itself. Building OS X binaries on another platform is not
+possible, but our current OSX binary build procedure uses travis-ci virtual
+machines running OSX.
Current build and release info
==============================
+
The current info on building and releasing NumPy and SciPy is scattered in
several places. It should be summarized in one place, updated and where
necessary described in more detail. The sections below list all places where
@@ -34,24 +36,35 @@ Release Scripts
---------------
* https://github.com/numpy/numpy-vendor
-
Supported platforms and versions
================================
-Python 2.6-2.7 and >=3.2 are the currently supported versions on all platforms.
+
+Python 2.6-2.7 and >=3.2 are the currently supported versions when building
+from source. We test numpy against all these versions every time we merge
+code to trunk. Binary installers may be available for a subset of these
+versions (see below).
OS X
----
-OS X versions >= 10.5 are supported. Note that there are currently still
-issues with compiling on 10.7, due to Apple moving to gcc-llvm.
-Only the Python from `python.org <http://python.org>`_ is supported. Binaries
-do *not* support Apple Python.
+
+Python 2.7 and >=3.3 are the versions for which we provide binary installers.
+OS X versions >= 10.6 are supported. We build binary wheels for OSX that are
+compatible with Python.org Python, system Python, homebrew and macports - see
+this `OSX wheel building summary
+<https://github.com/MacPython/wiki/wiki/Spinning-wheels>`_ for details.
Windows
-------
-Windows XP, Vista and 7 are supported.
+
+32-bit Python 2.7, 3.3, 3.4 are the versions for which we provide binary
+installers. Windows XP, Vista and 7 are supported. Our current windows mingw
+toolchain is not able to build 64-bit binaries of numpy. We are hoping to
+update to a `mingw-w64 toolchain
+<https://github.com/numpy/numpy/wiki/Mingw-w64-faq>`_ soon.
Linux
-----
+
Many distributions include NumPy. Building from source is also relatively
straightforward. Only tarballs are created for Linux, no specific binary
installers are provided (yet).
@@ -61,28 +74,24 @@ BSD / Solaris
No binaries are provided, but succesful builds on Solaris and BSD have been
reported.
-
Tool chain
==========
+
Compilers
---------
+
The same gcc version is used as the one with which Python itself is built on
each platform. At the moment this means:
-* OS X uses gcc-4.0 (since that is what Python itself is built with) up to
- Python 2.6. Python 2.7 comes in two flavors; the 32-bit version is built with
- gcc-4.0 and the 64-bit version with gcc-4.2. The "release.sh" script
- sets environment variables to pick the right compiler.
- All binaries should be built on OS X 10.5, with the exception of the 64-bit
- Python 2.7 one which should be built on 10.6.
+* OS X builds on travis currently use `clang`. It appears that binary wheels
+ for OSX >= 10.6 can be safely built from from OSX 10.9 when building against
+ the Python from the Python.org installers.
* Windows builds use MinGW 3.4.5. Updating this to a more recent MinGW with
GCC 4.x is desired, but there are still practical difficulties in building
the binary installers.
-Cython is not needed for building the binaries, because generated C files from
-Cython sources are checked in at the moment. It is worth keeping an eye on what
-Cython versions have been used to generate all current C files, it should be
-the same and most recent version (0.16 as of now).
+You will need Cython for building the binaries. Cython compiles the ``.pyx``
+files in the numpy distribution to ``.c`` files.
Fortran: on OS X gfortran from `this site <http://r.research.att.com/tools/>`_
is used. On Windows g77 (included in MinGW) is the current default, in the future
@@ -93,13 +102,6 @@ Python
* Python(s) from `python.org <http://python.org>`_
* virtualenv
* paver
-* bdist_mpkg from https://github.com/rgommers/bdist_mpkg (has a necessary
- patch, don't use the unsupported version on PyPi).
-
-Python itself should be installed multiple times - each version a binary is
-built for should be installed. The other dependencies only have to be installed
-for the default Python version on the system. The same applies to the doc-build
-dependencies below.
Building docs
-------------
@@ -113,7 +115,7 @@ Wine
For building Windows binaries on OS X Wine can be used. In Wine the following
needs to be installed:
-* Python 2.6-2.7 and 3.2
+* Python 2.6-2.7 and 3.3
* MakeNsis
* CpuId plugin for MakeNsis : this can be found in the NumPy source tree under
tools/win32build/cpucaps and has to be built with MinGW (see SConstruct file in
@@ -167,22 +169,27 @@ What is released
Binaries
--------
-Windows binaries in "superpack" form for Python 2.6/2.7/3.2/3.3.
-A superpack contains three builds, for SSE2, SSE3 and no SSE.
-OS X binaries are made in dmg format, targeting only the Python from
-`python.org <http://python.org>`_
+Windows binary installers in "superpack" form for Python 2.7/3.3/3.4. A
+superpack contains three builds, for SSE2, SSE3 and no SSE.
+
+Wheels
+------
+
+OSX wheels built via travis-ci : see - see `building OSX wheels`_.
+.. _build OSX wheels: https://github.com/MacPython/numpy-wheels
Other
-----
+
* Release Notes
* Changelog
Source distribution
-------------------
-A source release in both .zip and .tar.gz formats is released.
+We build source releases in both .zip and .tar.gz formats.
Release process
===============
@@ -200,6 +207,7 @@ Make sure current trunk builds a package correctly
--------------------------------------------------
::
+ git clean -fxd
python setup.py bdist
python setup.py sdist
@@ -270,8 +278,12 @@ updated for a major release.
Check the release notes
-----------------------
-Check that the release notes are up-to-date, and mention at least the
-following:
+Check that the release notes are up-to-date.
+
+Write or update the release notes in a file named for the release, such as
+``doc/release/1.11.0-notes.rst``.
+
+Mention at least the following:
- major new features
- deprecated and removed features
@@ -289,15 +301,55 @@ Identify the commit hash of the release, e.g. 1b2e1d63ff.
::
git co 1b2e1d63ff # gives warning about detached head
-Now, set ``release=True`` in setup.py, then
+First, change/check the following variables in ``pavement.py`` depending on the
+release version::
-::
+ RELEASE_NOTES = 'doc/release/1.7.0-notes.rst'
+ LOG_START = 'v1.6.0'
+ LOG_END = 'maintenance/1.7.x'
+
+Do any other changes. When you are ready to release, do the following
+changes::
+
+ diff --git a/setup.py b/setup.py
+ index b1f53e3..8b36dbe 100755
+ --- a/setup.py
+ +++ b/setup.py
+ @@ -57,7 +57,7 @@ PLATFORMS = ["Windows", "Linux", "Solaris", "Mac OS-
+ MAJOR = 1
+ MINOR = 7
+ MICRO = 0
+ -ISRELEASED = False
+ +ISRELEASED = True
+ VERSION = '%d.%d.%drc1' % (MAJOR, MINOR, MICRO)
+
+ # Return the git revision as a string
- git commit -m "REL: Release." setup.py
+And make sure the ``VERSION`` variable is set properly.
+
+Now you can make the release commit and tag. We recommend you don't push
+the commit or tag immediately, just in case you need to do more cleanup. We
+prefer to defer the push of the tag until we're confident this is the exact
+form of the released code (see: :ref:`push-tag-and-commit`):
+
+ git commit -s -m "REL: Release." setup.py
git tag -s <version>
- git push origin <version>
-Note: ``git tag -s`` creates a signed tag - make sure your PGP key is public.
+The ``-s`` flag makes a PGP (usually GPG) signed tag. Please do sign the
+release tags.
+
+The release tag should have the release number in the annotation (tag
+message). Unfortunately the name of a tag can be changed without breaking the
+signature, the contents of the message cannot.
+
+See : https://github.com/scipy/scipy/issues/4919 for a discussion of signing
+release tags, and http://keyring.debian.org/creating-key.html for instructions
+on creating a GPG key if you do not have one.
+
+To make your key more readily identifiable as you, consider sending your key
+to public keyservers, with a command such as::
+
+ gpg --send-keys <yourkeyid>
Apply patch to fix bogus strides
--------------------------------
@@ -314,8 +366,34 @@ Increment the release number in setup.py. Release candidates should have "rc1"
Also create a new version hash in cversions.txt and a corresponding version
define NPY_x_y_API_VERSION in numpyconfig.h
+Trigger the OSX builds on travis
+--------------------------------
+
+See `build OSX wheels`_.
+
+You may need to check the ``.travis.yml`` file of the
+https://github.com/MacPython/numpy-wheels repository.
+
+Make sure that the releast tag has been pushed, and that the ``.travis.yml``
+is set thusly::
+
+ - NP_COMMIT=latest-tag # comment out to build version in submodule
+
+Trigger a build by doing an empty (or otherwise) commit to the repository::
+
+ cd /path/to/numpy-wheels
+ git commit --allow-empty
+ git push
+
+The wheels, once built, appear in http://wheels.scipy.org
+
Make the release
----------------
+
+Build the changelog and notes for upload with::
+
+ paver write_release_and_log
+
The tar-files and binary releases for distribution should be uploaded to SourceForge,
together with the Release Notes and the Changelog. Uploading can be done
through a web interface or, more efficiently, through scp/sftp/rsync as
@@ -327,19 +405,41 @@ For example::
Update PyPi
-----------
+
The final release (not betas or release candidates) should be uploaded to PyPi.
There are two ways to update PyPi, the first one is::
- $ python setup.py sdist upload
+ $ git clean -fxd # to be safe
+ $ python setup.py sdist --formats=gztar,zip # to check
+ # python setup.py sdist --formats=gztar,zip upload --sign
-and the second one is to upload the PKG_INFO file inside the sdist dir in the
+This will ask for your key PGP passphrase, in order to sign the built source
+packages.
+
+The second way is to upload the PKG_INFO file inside the sdist dir in the
web interface of PyPi. The source tarball can also be uploaded through this
-interface. A simple binary installer for windows, created with
-``bdist_wininst``, should also be uploaded to PyPi so ``easy_install numpy``
-works.
+interface.
+
+To push the travis-ci OSX wheels up to pypi see :
+https://github.com/MacPython/numpy-wheels#uploading-the-built-wheels-to-pypi
+
+.. _push-tag-and-commit:
+
+Push the release tag and commit
+-------------------------------
+
+Finally, now you are confident this tag correctly defines the source code that
+you released you can push the tag and release commit up to github::
+
+ git push # Push release commit
+ git push upstream <version> # Push tag named <version>
+
+where ``upstream`` points to the main https://github.com/numpy/numpy.git
+repository.
Update docs.scipy.org
---------------------
+
All documentation for a release can be updated on http://docs.scipy.org/ with:
make dist
@@ -361,11 +461,16 @@ https://github.com/scipy/docs.scipy.org. Do the following:
Update scipy.org
----------------
+
A release announcement with a link to the download site should be placed in the
sidebar of the front page of scipy.org.
+The scipy.org should be a PR at https://github.com/scipy/scipy.org. The file
+that needs modification is ``www/index.rst``. Search for ``News``.
+
Announce to the lists
---------------------
+
The release should be announced on the mailing lists of
NumPy and SciPy, to python-announce, and possibly also those of
Matplotlib,IPython and/or Pygame.
@@ -374,6 +479,12 @@ During the beta/RC phase an explicit request for testing the binaries with
several other libraries (SciPy/Matplotlib/Pygame) should be posted on the
mailing list.
+Announce to Linux Weekly News
+-----------------------------
+
+Email the editor of LWN to let them know of the release. Directions at:
+https://lwn.net/op/FAQ.lwn#contact
+
After the final release
-----------------------
After the final release is announced, a few administrative tasks are left to be
diff --git a/doc/release/1.10.2-notes.rst b/doc/release/1.10.2-notes.rst
index a597a817c..02e756474 100644
--- a/doc/release/1.10.2-notes.rst
+++ b/doc/release/1.10.2-notes.rst
@@ -68,6 +68,7 @@ Issues Fixed
* gh-6719 Error compiling Cython file: Pythonic division not allowed without gil.
* gh-6771 Numpy.rec.fromarrays losing dtype metadata between versions 1.9.2 and 1.10.1
* gh-6781 The travis-ci script in maintenance/1.10.x needs fixing.
+* gh-6807 Windows testing errors for 1.10.2
Merged PRs
@@ -126,6 +127,9 @@ the PR number for the original PR against master is listed.
* gh-6780 BUG: metadata is not copied to base_dtype.
* gh-6783 BUG: Fix travis ci testing for new google infrastructure.
* gh-6785 BUG: Quick and dirty fix for interp.
+* gh-6813 TST,BUG: Make test_mvoid_multidim_print work for 32 bit systems.
+* gh-6817 BUG: Disable 32-bit msvc9 compiler optimizations for npy_rint.
+* gh-6819 TST: Fix test_mvoid_multidim_print failures on Python 2.x for Windows.
Initial support for mingwpy was reverted as it was causing problems for
non-windows builds.
diff --git a/doc/release/1.11.0-notes.rst b/doc/release/1.11.0-notes.rst
index a6c04e95a..e7b9c57e2 100644
--- a/doc/release/1.11.0-notes.rst
+++ b/doc/release/1.11.0-notes.rst
@@ -111,7 +111,7 @@ Views of arrays in Fortran order
The f_contiguous flag was used to signal that views as a dtypes that
changed the element size would change the first index. This was always a
bit problematical for arrays that were both f_contiguous and c_contiguous
-because c_contiguous took precendence. Relaxed stride checking results in
+because c_contiguous took precedence. Relaxed stride checking results in
more such dual contiguous arrays and breaks some existing code as a result.
Note that this also affects changing the dtype by assigning to the dtype
attribute of an array. The aim of this deprecation is to restrict views to
diff --git a/doc/source/dev/governance/index.rst b/doc/source/dev/governance/index.rst
index 9a611a2fe..3919e5e66 100644
--- a/doc/source/dev/governance/index.rst
+++ b/doc/source/dev/governance/index.rst
@@ -1,5 +1,5 @@
#####################
-Contributing to Numpy
+NumPy governance
#####################
.. toctree::
diff --git a/numpy/core/src/npymath/npy_math.c.src b/numpy/core/src/npymath/npy_math.c.src
index 7f62810d5..32fa41788 100644
--- a/numpy/core/src/npymath/npy_math.c.src
+++ b/numpy/core/src/npymath/npy_math.c.src
@@ -260,6 +260,9 @@ double npy_atanh(double x)
#endif
#ifndef HAVE_RINT
+#if defined(_MSC_VER) && (_MSC_VER == 1500) && !defined(_WIN64)
+#pragma optimize("", off)
+#endif
double npy_rint(double x)
{
double y, r;
@@ -280,6 +283,9 @@ double npy_rint(double x)
}
return y;
}
+#if defined(_MSC_VER) && (_MSC_VER == 1500) && !defined(_WIN64)
+#pragma optimize("", on)
+#endif
#endif
#ifndef HAVE_TRUNC
diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py
index 5fa281867..563aa48fb 100644
--- a/numpy/core/tests/test_datetime.py
+++ b/numpy/core/tests/test_datetime.py
@@ -571,9 +571,9 @@ class TestDateTime(TestCase):
"Verify that datetime dtype __setstate__ can handle bad arguments"
dt = np.dtype('>M8[us]')
assert_raises(ValueError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, 1))
- assert (dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2])
+ assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2])
assert_raises(TypeError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, ({}, 'xxx')))
- assert (dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2])
+ assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2])
def test_dtype_promotion(self):
# datetime <op> datetime computes the metadata gcd
diff --git a/numpy/core/tests/test_defchararray.py b/numpy/core/tests/test_defchararray.py
index 9ef316481..e828b879f 100644
--- a/numpy/core/tests/test_defchararray.py
+++ b/numpy/core/tests/test_defchararray.py
@@ -680,15 +680,15 @@ class TestOperations(TestCase):
dtype='S4').view(np.chararray)
sl1 = arr[:]
assert_array_equal(sl1, arr)
- assert sl1.base is arr
- assert sl1.base.base is arr.base
+ assert_(sl1.base is arr)
+ assert_(sl1.base.base is arr.base)
sl2 = arr[:, :]
assert_array_equal(sl2, arr)
- assert sl2.base is arr
- assert sl2.base.base is arr.base
+ assert_(sl2.base is arr)
+ assert_(sl2.base.base is arr.base)
- assert arr[0, 0] == asbytes('abc')
+ assert_(arr[0, 0] == asbytes('abc'))
def test_empty_indexing():
diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py
index 8f7e55d91..f6dc3d842 100644
--- a/numpy/core/tests/test_deprecations.py
+++ b/numpy/core/tests/test_deprecations.py
@@ -89,7 +89,7 @@ class _DeprecationTestCase(object):
if num is not None and num_found != num:
msg = "%i warnings found but %i expected." % (len(self.log), num)
lst = [w.category for w in self.log]
- raise AssertionError("\n".join([msg] + [lst]))
+ raise AssertionError("\n".join([msg] + lst))
with warnings.catch_warnings():
warnings.filterwarnings("error", message=self.message,
@@ -163,8 +163,8 @@ class TestRankDeprecation(_DeprecationTestCase):
class TestComparisonDeprecations(_DeprecationTestCase):
- """This tests the deprecation, for non-elementwise comparison logic.
- This used to mean that when an error occured during element-wise comparison
+ """This tests the deprecation, for non-element-wise comparison logic.
+ This used to mean that when an error occurred during element-wise comparison
(i.e. broadcasting) NotImplemented was returned, but also in the comparison
itself, False was given instead of the error.
@@ -192,13 +192,13 @@ class TestComparisonDeprecations(_DeprecationTestCase):
b = np.array(['a', 'b', 'c'])
assert_raises(ValueError, lambda x, y: x == y, a, b)
- # The empty list is not cast to string, this is only to document
+ # The empty list is not cast to string, as this is only to document
# that fact (it likely should be changed). This means that the
# following works (and returns False) due to dtype mismatch:
a == []
def test_none_comparison(self):
- # Test comparison of None, which should result in elementwise
+ # Test comparison of None, which should result in element-wise
# comparison in the future. [1, 2] == None should be [False, False].
with warnings.catch_warnings():
warnings.filterwarnings('always', '', FutureWarning)
@@ -211,7 +211,7 @@ class TestComparisonDeprecations(_DeprecationTestCase):
assert_raises(FutureWarning, operator.ne, np.arange(3), None)
def test_scalar_none_comparison(self):
- # Scalars should still just return false and not give a warnings.
+ # Scalars should still just return False and not give a warnings.
# The comparisons are flagged by pep8, ignore that.
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', FutureWarning)
@@ -226,9 +226,9 @@ class TestComparisonDeprecations(_DeprecationTestCase):
assert_(np.datetime64('NaT') != None)
assert_(len(w) == 0)
- # For documentaiton purpose, this is why the datetime is dubious.
+ # For documentation purposes, this is why the datetime is dubious.
# At the time of deprecation this was no behaviour change, but
- # it has to be considered when the deprecations is done.
+ # it has to be considered when the deprecations are done.
assert_(np.equal(np.datetime64('NaT'), None))
def test_void_dtype_equality_failures(self):
@@ -277,7 +277,7 @@ class TestComparisonDeprecations(_DeprecationTestCase):
with warnings.catch_warnings() as l:
warnings.filterwarnings("always")
assert_raises(TypeError, f, arg1, arg2)
- assert not l
+ assert_(not l)
else:
# py2
assert_warns(DeprecationWarning, f, arg1, arg2)
@@ -338,8 +338,8 @@ class TestIdentityComparisonDeprecations(_DeprecationTestCase):
class TestAlterdotRestoredotDeprecations(_DeprecationTestCase):
"""The alterdot/restoredot functions are deprecated.
- These functions no longer do anything in numpy 1.10, so should not be
- used.
+ These functions no longer do anything in numpy 1.10, so
+ they should not be used.
"""
@@ -350,7 +350,7 @@ class TestAlterdotRestoredotDeprecations(_DeprecationTestCase):
class TestBooleanIndexShapeMismatchDeprecation():
"""Tests deprecation for boolean indexing where the boolean array
- does not match the input array along the given diemsions.
+ does not match the input array along the given dimensions.
"""
message = r"boolean index did not match indexed array"
@@ -400,5 +400,18 @@ class TestNonCContiguousViewDeprecation(_DeprecationTestCase):
self.assert_deprecated(np.ones((2,2)).T.view, args=(np.int8,))
+class TestTestDeprecated(object):
+ def test_assert_deprecated(self):
+ test_case_instance = _DeprecationTestCase()
+ test_case_instance.setUp()
+ assert_raises(AssertionError,
+ test_case_instance.assert_deprecated,
+ lambda: None)
+
+ def foo():
+ warnings.warn("foo", category=DeprecationWarning)
+
+ test_case_instance.assert_deprecated(foo)
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/core/tests/test_mem_overlap.py b/numpy/core/tests/test_mem_overlap.py
index 8d39fa4c0..a8b29ecd1 100644
--- a/numpy/core/tests/test_mem_overlap.py
+++ b/numpy/core/tests/test_mem_overlap.py
@@ -79,7 +79,8 @@ def _check_assignment(srcidx, dstidx):
cpy[dstidx] = arr[srcidx]
arr[dstidx] = arr[srcidx]
- assert np.all(arr == cpy), 'assigning arr[%s] = arr[%s]' % (dstidx, srcidx)
+ assert_(np.all(arr == cpy),
+ 'assigning arr[%s] = arr[%s]' % (dstidx, srcidx))
def test_overlapping_assignments():
@@ -129,7 +130,7 @@ def test_diophantine_fuzz():
if X is None:
# Check the simplified decision problem agrees
X_simplified = solve_diophantine(A, U, b, simplify=1)
- assert X_simplified is None, (A, U, b, X_simplified)
+ assert_(X_simplified is None, (A, U, b, X_simplified))
# Check no solution exists (provided the problem is
# small enough so that brute force checking doesn't
@@ -149,7 +150,7 @@ def test_diophantine_fuzz():
else:
# Check the simplified decision problem agrees
X_simplified = solve_diophantine(A, U, b, simplify=1)
- assert X_simplified is not None, (A, U, b, X_simplified)
+ assert_(X_simplified is not None, (A, U, b, X_simplified))
# Check validity
assert_(sum(a*x for a, x in zip(A, X)) == b)
@@ -391,7 +392,7 @@ def test_internal_overlap_slices():
s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps))
a = x[s1].transpose(t1)
- assert not internal_overlap(a)
+ assert_(not internal_overlap(a))
cases += 1
diff --git a/numpy/core/tests/test_memmap.py b/numpy/core/tests/test_memmap.py
index 1585586ca..e41758c51 100644
--- a/numpy/core/tests/test_memmap.py
+++ b/numpy/core/tests/test_memmap.py
@@ -103,28 +103,28 @@ class TestMemmap(TestCase):
shape=self.shape)
tmp = (fp + 10)
if isinstance(tmp, memmap):
- assert tmp._mmap is not fp._mmap
+ assert_(tmp._mmap is not fp._mmap)
def test_indexing_drops_references(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
tmp = fp[[(1, 2), (2, 3)]]
if isinstance(tmp, memmap):
- assert tmp._mmap is not fp._mmap
+ assert_(tmp._mmap is not fp._mmap)
def test_slicing_keeps_references(self):
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
shape=self.shape)
- assert fp[:2, :2]._mmap is fp._mmap
+ assert_(fp[:2, :2]._mmap is fp._mmap)
def test_view(self):
fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape)
new1 = fp.view()
new2 = new1.view()
- assert(new1.base is fp)
- assert(new2.base is fp)
+ assert_(new1.base is fp)
+ assert_(new2.base is fp)
new_array = asarray(fp)
- assert(new_array.base is fp)
+ assert_(new_array.base is fp)
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index 693847273..593607954 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -3588,8 +3588,8 @@ class TestFlat(TestCase):
self.a.flat[12] = 100.0
except ValueError:
testpassed = True
- assert testpassed
- assert self.a.flat[12] == 12.0
+ assert_(testpassed)
+ assert_(self.a.flat[12] == 12.0)
def test_discontiguous(self):
testpassed = False
@@ -3597,8 +3597,8 @@ class TestFlat(TestCase):
self.b.flat[4] = 100.0
except ValueError:
testpassed = True
- assert testpassed
- assert self.b.flat[4] == 12.0
+ assert_(testpassed)
+ assert_(self.b.flat[4] == 12.0)
def test___array__(self):
c = self.a.flat.__array__()
@@ -3606,16 +3606,16 @@ class TestFlat(TestCase):
e = self.a0.flat.__array__()
f = self.b0.flat.__array__()
- assert c.flags.writeable is False
- assert d.flags.writeable is False
- assert e.flags.writeable is True
- assert f.flags.writeable is True
+ assert_(c.flags.writeable is False)
+ assert_(d.flags.writeable is False)
+ assert_(e.flags.writeable is True)
+ assert_(f.flags.writeable is True)
- assert c.flags.updateifcopy is False
- assert d.flags.updateifcopy is False
- assert e.flags.updateifcopy is False
- assert f.flags.updateifcopy is True
- assert f.base is self.b0
+ assert_(c.flags.updateifcopy is False)
+ assert_(d.flags.updateifcopy is False)
+ assert_(e.flags.updateifcopy is False)
+ assert_(f.flags.updateifcopy is True)
+ assert_(f.base is self.b0)
class TestResize(TestCase):
def test_basic(self):
@@ -5440,14 +5440,14 @@ class TestNewBufferProtocol(object):
if np.ones((10, 1), order="C").flags.f_contiguous:
c.strides = (-1, 80, 8)
- assert memoryview(c).strides == (800, 80, 8)
+ assert_(memoryview(c).strides == (800, 80, 8))
# Writing C-contiguous data to a BytesIO buffer should work
fd = io.BytesIO()
fd.write(c.data)
fortran = c.T
- assert memoryview(fortran).strides == (8, 80, 800)
+ assert_(memoryview(fortran).strides == (8, 80, 800))
arr = np.ones((1, 10))
if arr.flags.f_contiguous:
diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py
index 43dad42f1..b7e146b5a 100644
--- a/numpy/core/tests/test_numeric.py
+++ b/numpy/core/tests/test_numeric.py
@@ -328,8 +328,8 @@ class TestSeterr(TestCase):
def log_err(*args):
self.called += 1
extobj_err = args
- assert (len(extobj_err) == 2)
- assert ("divide" in extobj_err[0])
+ assert_(len(extobj_err) == 2)
+ assert_("divide" in extobj_err[0])
with np.errstate(divide='ignore'):
np.seterrobj([20000, 3, log_err])
diff --git a/numpy/core/tests/test_scalarinherit.py b/numpy/core/tests/test_scalarinherit.py
index d8fd0acc3..e8cf7fde0 100644
--- a/numpy/core/tests/test_scalarinherit.py
+++ b/numpy/core/tests/test_scalarinherit.py
@@ -5,7 +5,7 @@
from __future__ import division, absolute_import, print_function
import numpy as np
-from numpy.testing import TestCase, run_module_suite
+from numpy.testing import TestCase, run_module_suite, assert_
class A(object):
@@ -26,17 +26,17 @@ class C0(B0):
class TestInherit(TestCase):
def test_init(self):
x = B(1.0)
- assert str(x) == '1.0'
+ assert_(str(x) == '1.0')
y = C(2.0)
- assert str(y) == '2.0'
+ assert_(str(y) == '2.0')
z = D(3.0)
- assert str(z) == '3.0'
+ assert_(str(z) == '3.0')
def test_init2(self):
x = B0(1.0)
- assert str(x) == '1.0'
+ assert_(str(x) == '1.0')
y = C0(2.0)
- assert str(y) == '2.0'
+ assert_(str(y) == '2.0')
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/core/tests/test_shape_base.py b/numpy/core/tests/test_shape_base.py
index cba083875..0d163c1dc 100644
--- a/numpy/core/tests/test_shape_base.py
+++ b/numpy/core/tests/test_shape_base.py
@@ -295,8 +295,8 @@ def test_stack():
for axis, expected_shape in zip(axes, expected_shapes):
assert_equal(np.stack(arrays, axis).shape, expected_shape)
# empty arrays
- assert stack([[], [], []]).shape == (3, 0)
- assert stack([[], [], []], axis=1).shape == (0, 3)
+ assert_(stack([[], [], []]).shape == (3, 0))
+ assert_(stack([[], [], []], axis=1).shape == (0, 3))
# edge cases
assert_raises_regex(ValueError, 'need at least one array', stack, [])
assert_raises_regex(ValueError, 'must have the same shape',
diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py
index 934d91e7c..eb0985386 100644
--- a/numpy/core/tests/test_ufunc.py
+++ b/numpy/core/tests/test_ufunc.py
@@ -37,17 +37,17 @@ class TestUfuncKwargs(TestCase):
class TestUfunc(TestCase):
def test_pickle(self):
import pickle
- assert pickle.loads(pickle.dumps(np.sin)) is np.sin
+ assert_(pickle.loads(pickle.dumps(np.sin)) is np.sin)
# Check that ufunc not defined in the top level numpy namespace such as
# numpy.core.test_rational.test_add can also be pickled
- assert pickle.loads(pickle.dumps(test_add)) is test_add
+ assert_(pickle.loads(pickle.dumps(test_add)) is test_add)
def test_pickle_withstring(self):
import pickle
astring = asbytes("cnumpy.core\n_ufunc_reconstruct\np0\n"
"(S'numpy.core.umath'\np1\nS'cos'\np2\ntp3\nRp4\n.")
- assert pickle.loads(astring) is np.cos
+ assert_(pickle.loads(astring) is np.cos)
def test_reduceat_shifting_sum(self):
L = 6
diff --git a/numpy/distutils/ccompiler.py b/numpy/distutils/ccompiler.py
index ad235ed19..2f2d63b59 100644
--- a/numpy/distutils/ccompiler.py
+++ b/numpy/distutils/ccompiler.py
@@ -1,23 +1,22 @@
from __future__ import division, absolute_import, print_function
-import re
import os
+import re
import sys
import types
from copy import copy
-
-from distutils.ccompiler import *
from distutils import ccompiler
+from distutils.ccompiler import *
from distutils.errors import DistutilsExecError, DistutilsModuleError, \
DistutilsPlatformError
from distutils.sysconfig import customize_compiler
from distutils.version import LooseVersion
from numpy.distutils import log
+from numpy.distutils.compat import get_exception
from numpy.distutils.exec_command import exec_command
from numpy.distutils.misc_util import cyg2win32, is_sequence, mingw32, \
quote_args, get_num_build_jobs
-from numpy.distutils.compat import get_exception
def replace_method(klass, method_name, func):
@@ -634,7 +633,6 @@ ccompiler.gen_preprocess_options = gen_preprocess_options
# that removing this fix causes f2py problems on Windows XP (see ticket #723).
# Specifically, on WinXP when gfortran is installed in a directory path, which
# contains spaces, then f2py is unable to find it.
-import re
import string
_wordchars_re = re.compile(r'[^\\\'\"%s ]*' % string.whitespace)
_squote_re = re.compile(r"'(?:[^'\\]|\\.)*'")
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index 9261dba22..3298789ee 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -268,14 +268,14 @@ def histogram(a, bins=10, range=None, normed=False, weights=None,
large datasets respectively. Switchover point is usually x.size~1000.
'FD' (Freedman Diaconis Estimator)
- .. math:: h = 2 \\frac{IQR}{n^{-1/3}}
+ .. math:: h = 2 \\frac{IQR}{n^{1/3}}
The binwidth is proportional to the interquartile range (IQR)
and inversely proportional to cube root of a.size. Can be too
conservative for small datasets, but is quite good
for large datasets. The IQR is very robust to outliers.
'Scott'
- .. math:: h = \\frac{3.5\\sigma}{n^{-1/3}}
+ .. math:: h = \\frac{3.5\\sigma}{n^{1/3}}
The binwidth is proportional to the standard deviation (sd) of the data
and inversely proportional to cube root of a.size. Can be too
conservative for small datasets, but is quite good
diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py
index af904e96a..bffc5c63e 100644
--- a/numpy/lib/tests/test_io.py
+++ b/numpy/lib/tests/test_io.py
@@ -1815,9 +1815,9 @@ M 33 21.99
assert_equal(test.dtype.names, ['f0', 'f1', 'f2'])
- assert test.dtype['f0'] == np.float
- assert test.dtype['f1'] == np.int64
- assert test.dtype['f2'] == np.integer
+ assert_(test.dtype['f0'] == np.float)
+ assert_(test.dtype['f1'] == np.int64)
+ assert_(test.dtype['f2'] == np.integer)
assert_allclose(test['f0'], 73786976294838206464.)
assert_equal(test['f1'], 17179869184)
diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py
index f418504c2..7a7b37b98 100644
--- a/numpy/lib/tests/test_nanfunctions.py
+++ b/numpy/lib/tests/test_nanfunctions.py
@@ -395,12 +395,12 @@ class TestNanFunctions_MeanVarStd(TestCase, SharedNanFunctionsTestsMixin):
def test_dtype_error(self):
for f in self.nanfuncs:
- for dtype in [np.bool_, np.int_, np.object]:
- assert_raises(TypeError, f, _ndat, axis=1, dtype=np.int)
+ for dtype in [np.bool_, np.int_, np.object_]:
+ assert_raises(TypeError, f, _ndat, axis=1, dtype=dtype)
def test_out_dtype_error(self):
for f in self.nanfuncs:
- for dtype in [np.bool_, np.int_, np.object]:
+ for dtype in [np.bool_, np.int_, np.object_]:
out = np.empty(_ndat.shape[0], dtype=dtype)
assert_raises(TypeError, f, _ndat, axis=1, out=out)
diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py
index afa098f12..fc139be19 100644
--- a/numpy/linalg/tests/test_linalg.py
+++ b/numpy/linalg/tests/test_linalg.py
@@ -61,7 +61,7 @@ def get_rtol(dtype):
class LinalgCase(object):
def __init__(self, name, a, b, exception_cls=None):
- assert isinstance(name, str)
+ assert_(isinstance(name, str))
self.name = name
self.a = a
self.b = b
@@ -267,7 +267,7 @@ def _stride_comb_iter(x):
xi = xi[slices]
xi[...] = x
xi = xi.view(x.__class__)
- assert np.all(xi == x)
+ assert_(np.all(xi == x))
yield xi, "stride_" + "_".join(["%+d" % j for j in repeats])
# generate also zero strides if possible
diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py
index cecdedf26..e0d9f072c 100644
--- a/numpy/ma/tests/test_core.py
+++ b/numpy/ma/tests/test_core.py
@@ -756,47 +756,47 @@ class TestMaskedArray(TestCase):
t_ma = masked_array(data = [([1, 2, 3],)],
mask = [([False, True, False],)],
fill_value = ([999999, 999999, 999999],),
- dtype = [('a', '<i8', (3,))])
- assert str(t_ma[0]) == "([1, --, 3],)"
- assert repr(t_ma[0]) == "([1, --, 3],)"
+ dtype = [('a', '<i4', (3,))])
+ assert_(str(t_ma[0]) == "([1, --, 3],)")
+ assert_(repr(t_ma[0]) == "([1, --, 3],)")
# additonal tests with structured arrays
t_2d = masked_array(data = [([[1, 2], [3,4]],)],
mask = [([[False, True], [True, False]],)],
- dtype = [('a', '<i8', (2,2))])
- assert str(t_2d[0]) == "([[1, --], [--, 4]],)"
- assert repr(t_2d[0]) == "([[1, --], [--, 4]],)"
+ dtype = [('a', '<i4', (2,2))])
+ assert_(str(t_2d[0]) == "([[1, --], [--, 4]],)")
+ assert_(repr(t_2d[0]) == "([[1, --], [--, 4]],)")
t_0d = masked_array(data = [(1,2)],
mask = [(True,False)],
- dtype = [('a', '<i8'), ('b', '<i8')])
- assert str(t_0d[0]) == "(--, 2)"
- assert repr(t_0d[0]) == "(--, 2)"
+ dtype = [('a', '<i4'), ('b', '<i4')])
+ assert_(str(t_0d[0]) == "(--, 2)")
+ assert_(repr(t_0d[0]) == "(--, 2)")
t_2d = masked_array(data = [([[1, 2], [3,4]], 1)],
mask = [([[False, True], [True, False]], False)],
- dtype = [('a', '<i8', (2,2)), ('b', float)])
- assert str(t_2d[0]) == "([[1, --], [--, 4]], 1.0)"
- assert repr(t_2d[0]) == "([[1, --], [--, 4]], 1.0)"
+ dtype = [('a', '<i4', (2,2)), ('b', float)])
+ assert_(str(t_2d[0]) == "([[1, --], [--, 4]], 1.0)")
+ assert_(repr(t_2d[0]) == "([[1, --], [--, 4]], 1.0)")
t_ne = masked_array(data=[(1, (1, 1))],
mask=[(True, (True, False))],
- dtype = [('a', '<i8'), ('b', 'i4,i4')])
- assert str(t_ne[0]) == "(--, (--, 1))"
- assert repr(t_ne[0]) == "(--, (--, 1))"
+ dtype = [('a', '<i4'), ('b', 'i4,i4')])
+ assert_(str(t_ne[0]) == "(--, (--, 1))")
+ assert_(repr(t_ne[0]) == "(--, (--, 1))")
def test_object_with_array(self):
mx1 = masked_array([1.], mask=[True])
mx2 = masked_array([1., 2.])
mx = masked_array([mx1, mx2], mask=[False, True])
- assert mx[0] is mx1
- assert mx[1] is not mx2
- assert np.all(mx[1].data == mx2.data)
- assert np.all(mx[1].mask)
+ assert_(mx[0] is mx1)
+ assert_(mx[1] is not mx2)
+ assert_(np.all(mx[1].data == mx2.data))
+ assert_(np.all(mx[1].mask))
# check that we return a view.
mx[1].data[0] = 0.
- assert mx2[0] == 0.
+ assert_(mx2[0] == 0.)
class TestMaskedArrayArithmetic(TestCase):
@@ -4254,7 +4254,7 @@ def test_append_masked_array_along_axis():
def test_default_fill_value_complex():
# regression test for Python 3, where 'unicode' was not defined
- assert default_fill_value(1 + 1j) == 1.e20 + 0.0j
+ assert_(default_fill_value(1 + 1j) == 1.e20 + 0.0j)
###############################################################################
if __name__ == "__main__":
diff --git a/numpy/random/mtrand/distributions.c b/numpy/random/mtrand/distributions.c
index 39004178d..7c44088a7 100644
--- a/numpy/random/mtrand/distributions.c
+++ b/numpy/random/mtrand/distributions.c
@@ -188,7 +188,7 @@ double rk_beta(rk_state *state, double a, double b)
if ((a <= 1.0) && (b <= 1.0))
{
double U, V, X, Y;
- /* Use Jonk's algorithm */
+ /* Use Johnk's algorithm */
while (1)
{
diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx
index 080591e5e..d6ba58bb2 100644
--- a/numpy/random/mtrand/mtrand.pyx
+++ b/numpy/random/mtrand/mtrand.pyx
@@ -1280,7 +1280,7 @@ cdef class RandomState:
Random values in a given shape.
- Create an array of the given shape and propagate it with
+ Create an array of the given shape and populate it with
random samples from a uniform distribution
over ``[0, 1)``.
diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py
index ab7f90d82..193844030 100644
--- a/numpy/random/tests/test_random.py
+++ b/numpy/random/tests/test_random.py
@@ -26,12 +26,12 @@ class TestSeed(TestCase):
assert_equal(s.randint(1000), 265)
def test_invalid_scalar(self):
- # seed must be a unsigned 32 bit integers
+ # seed must be an unsigned 32 bit integer
assert_raises(TypeError, np.random.RandomState, -0.5)
assert_raises(ValueError, np.random.RandomState, -1)
def test_invalid_array(self):
- # seed must be a unsigned 32 bit integers
+ # seed must be an unsigned 32 bit integer
assert_raises(TypeError, np.random.RandomState, [-0.5])
assert_raises(ValueError, np.random.RandomState, [-1])
assert_raises(ValueError, np.random.RandomState, [4294967296])
@@ -129,7 +129,7 @@ class TestSetState(TestCase):
self.prng.negative_binomial(0.5, 0.5)
class TestRandomDist(TestCase):
- # Make sure the random distrobution return the correct value for a
+ # Make sure the random distribution returns the correct value for a
# given seed
def setUp(self):
diff --git a/numpy/tests/test_scripts.py b/numpy/tests/test_scripts.py
index 552383d77..74efd2650 100644
--- a/numpy/tests/test_scripts.py
+++ b/numpy/tests/test_scripts.py
@@ -64,11 +64,12 @@ def test_f2py():
if sys.platform == 'win32':
f2py_cmd = r"%s\Scripts\f2py.py" % dirname(sys.executable)
code, stdout, stderr = run_command([sys.executable, f2py_cmd, '-v'])
- assert_equal(stdout.strip(), asbytes('2'))
+ success = stdout.strip() == asbytes('2')
+ assert_(success, "Warning: f2py not found in path")
else:
# unclear what f2py cmd was installed as, check plain (f2py) and
# current python version specific one (f2py3.4)
- f2py_cmds = ['f2py', 'f2py' + basename(sys.executable)[6:]]
+ f2py_cmds = ('f2py', 'f2py' + basename(sys.executable)[6:])
success = False
for f2py_cmd in f2py_cmds:
try:
@@ -76,6 +77,6 @@ def test_f2py():
assert_equal(stdout.strip(), asbytes('2'))
success = True
break
- except FileNotFoundError:
+ except OSError:
pass
- assert_(success, "wasn't able to find f2py or %s on commandline" % f2py_cmds[1])
+ assert_(success, "Warning: neither %s nor %s found in path" % f2py_cmds)
diff --git a/tools/travis-test.sh b/tools/travis-test.sh
index 795915d0b..939594d8c 100755
--- a/tools/travis-test.sh
+++ b/tools/travis-test.sh
@@ -1,4 +1,5 @@
#!/bin/bash
+
set -ex
# Travis legacy boxes give you 1.5 CPUs, container-based boxes give you 2 CPUs
@@ -10,54 +11,94 @@ if [ -r /usr/lib/libeatmydata/libeatmydata.so ]; then
export LD_PRELOAD=/usr/lib/libeatmydata/libeatmydata.so
fi
+# travis venv tests override python
+PYTHON=${PYTHON:-python}
+PIP=${PIP:-pip}
+
+# explicit python version needed here
+if [ -n "$USE_DEBUG" ]; then
+ PYTHON="python3-dbg"
+fi
+
+if [ -n "$PYTHON_OO" ]; then
+ PYTHON="${PYTHON} -OO"
+fi
+
# make some warnings fatal, mostly to match windows compilers
werrors="-Werror=declaration-after-statement -Werror=vla -Werror=nonnull"
setup_base()
{
# We used to use 'setup.py install' here, but that has the terrible
- # behaviour that if a copy of the package is already installed in
- # the install location, then the new copy just gets dropped on top
- # of it. Travis typically has a stable numpy release pre-installed,
- # and if we don't remove it, then we can accidentally end up
- # e.g. running old test modules that were in the stable release but
- # have been removed from master. (See gh-2765, gh-2768.) Using 'pip
- # install' also has the advantage that it tests that numpy is 'pip
- # install' compatible, see e.g. gh-2766...
-if [ -z "$USE_DEBUG" ]; then
- if [ -z "$IN_CHROOT" ]; then
- $PIP install .
+ # behaviour that if a copy of the package is already installed in the
+ # install location, then the new copy just gets dropped on top of it.
+ # Travis typically has a stable numpy release pre-installed, and if we
+ # don't remove it, then we can accidentally end up e.g. running old
+ # test modules that were in the stable release but have been removed
+ # from master. (See gh-2765, gh-2768.) Using 'pip install' also has
+ # the advantage that it tests that numpy is 'pip install' compatible,
+ # see e.g. gh-2766...
+ if [ -z "$USE_DEBUG" ]; then
+ if [ -z "$IN_CHROOT" ]; then
+ $PIP install .
+ else
+ sysflags="$($PYTHON -c "from distutils import sysconfig; \
+ print (sysconfig.get_config_var('CFLAGS'))")"
+ CFLAGS="$sysflags $werrors -Wlogical-op" $PIP install . 2>&1 | tee log
+ grep -v "_configtest" log \
+ | grep -vE "ld returned 1|no previously-included files matching" \
+ | grep -E "warning\>" \
+ | tee warnings
+ # Check for an acceptable number of warnings. Some warnings are out of
+ # our control, so adjust the number as needed.
+ [[ $(wc -l < warnings) -lt 1 ]]
+ fi
else
- sysflags="$($PYTHON -c "from distutils import sysconfig; print (sysconfig.get_config_var('CFLAGS'))")"
- CFLAGS="$sysflags $werrors -Wlogical-op" $PIP install . 2>&1 | tee log
- grep -v "_configtest" log | grep -vE "ld returned 1|no previously-included files matching" | grep -E "warning\>";
- # accept a mysterious memset warning that shows with -flto
- test $(grep -v "_configtest" log | grep -vE "ld returned 1|no previously-included files matching" | grep -E "warning\>" -c) -lt 2;
+ sysflags="$($PYTHON -c "from distutils import sysconfig; \
+ print (sysconfig.get_config_var('CFLAGS'))")"
+ CFLAGS="$sysflags $werrors" $PYTHON setup.py build_ext --inplace
fi
-else
- sysflags="$($PYTHON -c "from distutils import sysconfig; print (sysconfig.get_config_var('CFLAGS'))")"
- CFLAGS="$sysflags $werrors" $PYTHON setup.py build_ext --inplace
-fi
}
setup_chroot()
{
# this can all be replaced with:
# apt-get install libpython2.7-dev:i386
- # CC="gcc -m32" LDSHARED="gcc -m32 -shared" LDFLAGS="-m32 -shared" linux32 python setup.py build
+ # CC="gcc -m32" LDSHARED="gcc -m32 -shared" LDFLAGS="-m32 -shared" \
+ # linux32 python setup.py build
# when travis updates to ubuntu 14.04
+ #
+ # Numpy may not distinquish between 64 and 32 bit atlas in the
+ # configuration stage.
DIR=$1
set -u
- sudo debootstrap --variant=buildd --include=fakeroot,build-essential --arch=$ARCH --foreign $DIST $DIR
+ sudo debootstrap --variant=buildd --include=fakeroot,build-essential \
+ --arch=$ARCH --foreign $DIST $DIR
sudo chroot $DIR ./debootstrap/debootstrap --second-stage
+
+ # put the numpy repo in the chroot directory
sudo rsync -a $TRAVIS_BUILD_DIR $DIR/
- echo deb http://archive.ubuntu.com/ubuntu/ $DIST main restricted universe multiverse | sudo tee -a $DIR/etc/apt/sources.list
- echo deb http://archive.ubuntu.com/ubuntu/ $DIST-updates main restricted universe multiverse | sudo tee -a $DIR/etc/apt/sources.list
- echo deb http://security.ubuntu.com/ubuntu $DIST-security main restricted universe multiverse | sudo tee -a $DIR/etc/apt/sources.list
+
+ # set up repos in the chroot directory for installing packages
+ echo deb http://archive.ubuntu.com/ubuntu/ \
+ $DIST main restricted universe multiverse \
+ | sudo tee -a $DIR/etc/apt/sources.list
+ echo deb http://archive.ubuntu.com/ubuntu/ \
+ $DIST-updates main restricted universe multiverse \
+ | sudo tee -a $DIR/etc/apt/sources.list
+ echo deb http://security.ubuntu.com/ubuntu \
+ $DIST-security main restricted universe multiverse \
+ | sudo tee -a $DIR/etc/apt/sources.list
+
+ # install needed packages
sudo chroot $DIR bash -c "apt-get update"
- sudo chroot $DIR bash -c "apt-get install -qq -y --force-yes eatmydata"
- echo /usr/lib/libeatmydata/libeatmydata.so | sudo tee -a $DIR/etc/ld.so.preload
- sudo chroot $DIR bash -c "apt-get install -qq -y --force-yes libatlas-dev libatlas-base-dev gfortran python3-dev python3-nose python3-pip cython3 cython"
+ sudo chroot $DIR bash -c "apt-get install -qq -y --force-yes \
+ eatmydata libatlas-dev libatlas-base-dev gfortran \
+ python-dev python-nose python-pip cython"
+
+ # faster operation with preloaded eatmydata
+ echo /usr/lib/libeatmydata/libeatmydata.so | \
+ sudo tee -a $DIR/etc/ld.so.preload
}
run_test()
@@ -70,49 +111,48 @@ run_test()
# of numpy in the source directory.
mkdir -p empty
cd empty
- INSTALLDIR=$($PYTHON -c "import os; import numpy; print(os.path.dirname(numpy.__file__))")
+ INSTALLDIR=$($PYTHON -c \
+ "import os; import numpy; print(os.path.dirname(numpy.__file__))")
export PYTHONWARNINGS=default
- $PYTHON ../tools/test-installed-numpy.py # --mode=full
- # - coverage run --source=$INSTALLDIR --rcfile=../.coveragerc $(which $PYTHON) ../tools/test-installed-numpy.py
- # - coverage report --rcfile=../.coveragerc --show-missing
+ $PYTHON ../tools/test-installed-numpy.py
+ if [ -n "$USE_ASV" ]; then
+ pushd ../benchmarks
+ $PYTHON `which asv` machine --machine travis
+ $PYTHON `which asv` dev 2>&1| tee asv-output.log
+ if grep -q Traceback asv-output.log; then
+ echo "Some benchmarks have errors!"
+ exit 1
+ fi
+ popd
+ fi
}
-# travis venv tests override python
-PYTHON=${PYTHON:-python}
-PIP=${PIP:-pip}
-
-if [ -n "$USE_DEBUG" ]; then
- PYTHON=python3-dbg
-fi
-
-if [ -n "$PYTHON_OO" ]; then
- PYTHON="$PYTHON -OO"
-fi
-
export PYTHON
export PIP
if [ -n "$USE_WHEEL" ] && [ $# -eq 0 ]; then
# Build wheel
$PIP install wheel
+ # ensure that the pip / setuptools versions deployed inside
+ # the venv are recent enough
+ $PIP install -U virtualenv
$PYTHON setup.py bdist_wheel
# Make another virtualenv to install into
- virtualenv --python=python venv-for-wheel
+ virtualenv --python=`which $PYTHON` venv-for-wheel
. venv-for-wheel/bin/activate
# Move out of source directory to avoid finding local numpy
pushd dist
- $PIP install --pre --no-index --upgrade --find-links=. numpy
- $PIP install nose
+ pip install --pre --no-index --upgrade --find-links=. numpy
+ pip install nose
popd
run_test
-elif [ "$USE_CHROOT" != "1" ]; then
- setup_base
- run_test
elif [ -n "$USE_CHROOT" ] && [ $# -eq 0 ]; then
DIR=/chroot
setup_chroot $DIR
# run again in chroot with this time testing
- sudo linux32 chroot $DIR bash -c "cd numpy && PYTHON=python3 PIP=pip3 IN_CHROOT=1 $0 test"
+ sudo linux32 chroot $DIR bash -c \
+ "cd numpy && PYTHON=python PIP=pip IN_CHROOT=1 $0 test"
else
+ setup_base
run_test
fi
diff --git a/tools/travis-upload-wheel.sh b/tools/travis-upload-wheel.sh
new file mode 100755
index 000000000..60b9aa7cb
--- /dev/null
+++ b/tools/travis-upload-wheel.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+set -ex
+
+export CLOUD_CONTAINER_NAME=travis-dev-wheels
+
+if [[ ( $USE_WHEEL == 1 ) && \
+ ( "$TRAVIS_BRANCH" == "master" ) && \
+ ( "$TRAVIS_PULL_REQUEST" == "false" ) ]]; then
+ pip install wheelhouse_uploader
+ python -m wheelhouse_uploader upload --local-folder $TRAVIS_BUILD_DIR/dist/ $CLOUD_CONTAINER_NAME
+fi