summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorluzpaz <luzpaz@users.noreply.github.com>2019-05-31 03:13:33 -0400
committerMatti Picus <matti.picus@gmail.com>2019-05-31 10:13:33 +0300
commit0c70787c04d7b0febacb14edfe214cb68d87b6c3 (patch)
treea518387f3e9134d753ac4d09009ab4764a8dd4b2
parent43465f725bc47a36f08c5e69a61c2702a97491b8 (diff)
downloadnumpy-0c70787c04d7b0febacb14edfe214cb68d87b6c3.tar.gz
MAINT: Misc. typo fixes (#13664)
* DOC, MAINT: Misc. typo fixes Found via `codespell`
-rw-r--r--benchmarks/benchmarks/bench_function_base.py2
-rw-r--r--doc/DISTUTILS.rst.txt2
-rw-r--r--doc/Makefile2
-rw-r--r--doc/source/reference/c-api.array.rst2
-rw-r--r--doc/source/reference/c-api.coremath.rst8
-rw-r--r--doc/source/reference/random/index.rst6
-rw-r--r--doc/source/reference/random/new-or-different.rst2
-rw-r--r--numpy/core/_dtype_ctypes.py2
-rw-r--r--numpy/core/einsumfunc.py2
-rw-r--r--numpy/core/numerictypes.py2
-rw-r--r--numpy/core/shape_base.py8
-rw-r--r--numpy/core/src/multiarray/arraytypes.c.src4
-rw-r--r--numpy/core/src/multiarray/ctors.c2
-rw-r--r--numpy/core/src/multiarray/methods.c2
-rw-r--r--numpy/core/src/npymath/npy_math_complex.c.src2
-rw-r--r--numpy/core/src/npysort/selection.c.src2
-rw-r--r--numpy/core/src/umath/ufunc_type_resolution.c2
-rw-r--r--numpy/core/tests/test_dtype.py2
-rw-r--r--numpy/core/tests/test_half.py2
-rw-r--r--numpy/core/tests/test_nditer.py2
-rw-r--r--numpy/core/tests/test_scalar_methods.py2
-rw-r--r--numpy/core/tests/test_scalarprint.py2
-rw-r--r--numpy/distutils/fcompiler/__init__.py4
-rw-r--r--numpy/doc/basics.py2
-rw-r--r--numpy/doc/indexing.py5
-rw-r--r--numpy/lib/function_base.py4
-rw-r--r--numpy/lib/recfunctions.py2
-rw-r--r--numpy/lib/tests/test_histograms.py2
-rw-r--r--numpy/random/generator.pyx2
-rw-r--r--numpy/random/mtrand.pyx2
-rw-r--r--numpy/random/src/philox/philox-benchmark.c2
-rw-r--r--numpy/random/src/philox/philox-test-data-gen.c2
-rw-r--r--numpy/random/src/threefry/threefry-benchmark.c2
-rw-r--r--numpy/random/src/threefry/threefry-test-data-gen.c2
-rw-r--r--numpy/random/src/xoshiro256/xoshiro256-test-data-gen.c2
-rw-r--r--numpy/random/src/xoshiro512/xoshiro512-test-data-gen.c2
-rw-r--r--numpy/random/tests/test_randomstate.py2
-rw-r--r--numpy/testing/_private/utils.py4
38 files changed, 52 insertions, 51 deletions
diff --git a/benchmarks/benchmarks/bench_function_base.py b/benchmarks/benchmarks/bench_function_base.py
index f730bf6ba..2170c4fc4 100644
--- a/benchmarks/benchmarks/bench_function_base.py
+++ b/benchmarks/benchmarks/bench_function_base.py
@@ -269,7 +269,7 @@ class SortWorst(Benchmark):
def time_sort_worst(self):
np.sort(self.worst)
- # Retain old benchmark name for backward compatability
+ # Retain old benchmark name for backward compatibility
time_sort_worst.benchmark_name = "bench_function_base.Sort.time_sort_worst"
diff --git a/doc/DISTUTILS.rst.txt b/doc/DISTUTILS.rst.txt
index 42aa9561d..a4909d66d 100644
--- a/doc/DISTUTILS.rst.txt
+++ b/doc/DISTUTILS.rst.txt
@@ -302,7 +302,7 @@ Template files
NumPy Distutils preprocesses C source files (extension: :file:`.c.src`) written
in a custom templating language to generate C code. The :c:data:`@` symbol is
-used to wrap macro-style variables to empower a string substitution mechansim
+used to wrap macro-style variables to empower a string substitution mechanism
that might describe (for instance) a set of data types.
As a more detailed scenario, a loop in the NumPy C source code may
diff --git a/doc/Makefile b/doc/Makefile
index 776f9b778..842d2ad13 100644
--- a/doc/Makefile
+++ b/doc/Makefile
@@ -5,7 +5,7 @@
# issues with the amendments to PYTHONPATH and install paths (see DIST_VARS).
# Use explicit "version_info" indexing since make cannot handle colon characters, and
-# evaluate it now to allow easier debugging when printing the varaible
+# evaluate it now to allow easier debugging when printing the variable
PYVER:=$(shell python3 -c 'from sys import version_info as v; print("{0}.{1}".format(v[0], v[1]))')
PYTHON = python$(PYVER)
diff --git a/doc/source/reference/c-api.array.rst b/doc/source/reference/c-api.array.rst
index de412a5d2..d01d28f0e 100644
--- a/doc/source/reference/c-api.array.rst
+++ b/doc/source/reference/c-api.array.rst
@@ -219,7 +219,7 @@ From scratch
If *data* is ``NULL``, then new unitinialized memory will be allocated and
*flags* can be non-zero to indicate a Fortran-style contiguous array. Use
- :c:func:`PyArray_FILLWBYTE` to initialze the memory.
+ :c:func:`PyArray_FILLWBYTE` to initialize the memory.
If *data* is not ``NULL``, then it is assumed to point to the memory
to be used for the array and the *flags* argument is used as the
diff --git a/doc/source/reference/c-api.coremath.rst b/doc/source/reference/c-api.coremath.rst
index bb457eb0d..7e00322f9 100644
--- a/doc/source/reference/c-api.coremath.rst
+++ b/doc/source/reference/c-api.coremath.rst
@@ -185,7 +185,7 @@ Those can be useful for precise floating point comparison.
* NPY_FPE_INVALID
Note that :c:func:`npy_get_floatstatus_barrier` is preferable as it prevents
- agressive compiler optimizations reordering the call relative to
+ aggressive compiler optimizations reordering the call relative to
the code setting the status, which could lead to incorrect results.
.. versionadded:: 1.9.0
@@ -193,7 +193,7 @@ Those can be useful for precise floating point comparison.
.. c:function:: int npy_get_floatstatus_barrier(char*)
Get floating point status. A pointer to a local variable is passed in to
- prevent aggresive compiler optimizations from reodering this function call
+ prevent aggressive compiler optimizations from reodering this function call
relative to the code setting the status, which could lead to incorrect
results.
@@ -211,7 +211,7 @@ Those can be useful for precise floating point comparison.
Clears the floating point status. Returns the previous status mask.
Note that :c:func:`npy_clear_floatstatus_barrier` is preferable as it
- prevents agressive compiler optimizations reordering the call relative to
+ prevents aggressive compiler optimizations reordering the call relative to
the code setting the status, which could lead to incorrect results.
.. versionadded:: 1.9.0
@@ -219,7 +219,7 @@ Those can be useful for precise floating point comparison.
.. c:function:: int npy_clear_floatstatus_barrier(char*)
Clears the floating point status. A pointer to a local variable is passed in to
- prevent aggresive compiler optimizations from reodering this function call.
+ prevent aggressive compiler optimizations from reodering this function call.
Returns the previous status mask.
.. versionadded:: 1.15.0
diff --git a/doc/source/reference/random/index.rst b/doc/source/reference/random/index.rst
index 0b8145735..3159f0e1c 100644
--- a/doc/source/reference/random/index.rst
+++ b/doc/source/reference/random/index.rst
@@ -5,7 +5,7 @@
numpy.random
============
-Numpy's random number routines produce psuedo random numbers using
+Numpy's random number routines produce pseudo random numbers using
combinations of a `BitGenerator` to create sequences and a `Generator`
to use those sequences to sample from different statistical distributions:
@@ -41,7 +41,7 @@ which will be faster than the legacy methods in `RandomState`
`Generator` can be used as a direct replacement for `~RandomState`, although
the random values are generated by `~xoshiro256.Xoshiro256`. The
-`Generator` holds an instance of a BitGenerator. It is accessable as
+`Generator` holds an instance of a BitGenerator. It is accessible as
``gen.bit_generator``.
.. code-block:: python
@@ -127,7 +127,7 @@ What's New or Different
:ref:`Cython <randomgen_cython>`.
* `~.Generator.integers` is now the canonical way to generate integer
random numbers from a discrete uniform distribution. The ``rand`` and
- ``randn`` methods are only availabe through the legacy `~.RandomState`.
+ ``randn`` methods are only available through the legacy `~.RandomState`.
The ``endpoint`` keyword can be used to specify open or closed intervals.
This replaces both ``randint`` and the deprecated ``random_integers``.
* `~.Generator.random` is now the canonical way to generate floating-point
diff --git a/doc/source/reference/random/new-or-different.rst b/doc/source/reference/random/new-or-different.rst
index 969a9372d..a6de9c8dc 100644
--- a/doc/source/reference/random/new-or-different.rst
+++ b/doc/source/reference/random/new-or-different.rst
@@ -54,7 +54,7 @@ And in more detail:
`~.Generator.standard_gamma`.
* `~.Generator.integers` is now the canonical way to generate integer
random numbers from a discrete uniform distribution. The ``rand`` and
- ``randn`` methods are only availabe through the legacy `~.RandomState`.
+ ``randn`` methods are only available through the legacy `~.RandomState`.
This replaces both ``randint`` and the deprecated ``random_integers``.
* The Box-Muller used to produce NumPy's normals is no longer available.
* All bit generators can produce doubles, uint64s and
diff --git a/numpy/core/_dtype_ctypes.py b/numpy/core/_dtype_ctypes.py
index 0852b1ef2..708241289 100644
--- a/numpy/core/_dtype_ctypes.py
+++ b/numpy/core/_dtype_ctypes.py
@@ -1,7 +1,7 @@
"""
Conversion from ctypes to dtype.
-In an ideal world, we could acheive this through the PEP3118 buffer protocol,
+In an ideal world, we could achieve this through the PEP3118 buffer protocol,
something like::
def dtype_from_ctypes_type(t):
diff --git a/numpy/core/einsumfunc.py b/numpy/core/einsumfunc.py
index 83b7d8287..3412c3fd5 100644
--- a/numpy/core/einsumfunc.py
+++ b/numpy/core/einsumfunc.py
@@ -287,7 +287,7 @@ def _update_other_results(results, best):
Returns
-------
mod_results : list
- The list of modifed results, updated with outcome of ``best`` contraction.
+ The list of modified results, updated with outcome of ``best`` contraction.
"""
best_con = best[1]
diff --git a/numpy/core/numerictypes.py b/numpy/core/numerictypes.py
index 3ec8235db..ab1ff65a4 100644
--- a/numpy/core/numerictypes.py
+++ b/numpy/core/numerictypes.py
@@ -275,7 +275,7 @@ def obj2sctype(rep, default=None):
<class 'list'>
"""
- # prevent abtract classes being upcast
+ # prevent abstract classes being upcast
if isinstance(rep, type) and issubclass(rep, generic):
return rep
# extract dtype from arrays
diff --git a/numpy/core/shape_base.py b/numpy/core/shape_base.py
index 45115adb6..ccec25a7a 100644
--- a/numpy/core/shape_base.py
+++ b/numpy/core/shape_base.py
@@ -555,10 +555,10 @@ def _concatenate_shapes(shapes, axis):
ret[(slice(None),) * axis + sl_c] == c
```
- Thses are called slice prefixes since they are used in the recursive
+ These are called slice prefixes since they are used in the recursive
blocking algorithm to compute the left-most slices during the
recursion. Therefore, they must be prepended to rest of the slice
- that was computed deeper in the recusion.
+ that was computed deeper in the recursion.
These are returned as tuples to ensure that they can quickly be added
to existing slice tuple without creating a new tuple everytime.
@@ -841,9 +841,9 @@ def block(arrays):
return _block_concatenate(arrays, list_ndim, result_ndim)
-# Theses helper functions are mostly used for testing.
+# These helper functions are mostly used for testing.
# They allow us to write tests that directly call `_block_slicing`
-# or `_block_concatenate` without blocking large arrays to forse the wisdom
+# or `_block_concatenate` without blocking large arrays to force the wisdom
# to trigger the desired path.
def _block_setup(arrays):
"""
diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src
index 5f7bcb8f7..3b986ed04 100644
--- a/numpy/core/src/multiarray/arraytypes.c.src
+++ b/numpy/core/src/multiarray/arraytypes.c.src
@@ -2260,7 +2260,7 @@ VOID_copyswapn (char *dst, npy_intp dstride, char *src, npy_intp sstride,
char *dstptr, *srcptr;
/*
* In certain cases subarray copy can be optimized. This is when
- * swapping is unecessary and the subarrays data type can certainly
+ * swapping is unnecessary and the subarrays data type can certainly
* be simply copied (no object, fields, subarray, and not a user dtype).
*/
npy_bool can_optimize_subarray = (!swap &&
@@ -2347,7 +2347,7 @@ VOID_copyswap (char *dst, char *src, int swap, PyArrayObject *arr)
int subitemsize;
/*
* In certain cases subarray copy can be optimized. This is when
- * swapping is unecessary and the subarrays data type can certainly
+ * swapping is unnecessary and the subarrays data type can certainly
* be simply copied (no object, fields, subarray, and not a user dtype).
*/
npy_bool can_optimize_subarray = (!swap &&
diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c
index 4b524c365..9dc904c08 100644
--- a/numpy/core/src/multiarray/ctors.c
+++ b/numpy/core/src/multiarray/ctors.c
@@ -3170,7 +3170,7 @@ PyArray_Zeros(int nd, npy_intp *dims, PyArray_Descr *type, int is_f_order)
* Empty
*
* accepts NULL type
- * steals referenct to type
+ * steals a reference to type
*/
NPY_NO_EXPORT PyObject *
PyArray_Empty(int nd, npy_intp *dims, PyArray_Descr *type, int is_f_order)
diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c
index 9254a7a70..b843c7983 100644
--- a/numpy/core/src/multiarray/methods.c
+++ b/numpy/core/src/multiarray/methods.c
@@ -1687,7 +1687,7 @@ array_reduce(PyArrayObject *self, PyObject *NPY_UNUSED(args))
Notice because Python does not describe a mechanism to write
raw data to the pickle, this performs a copy to a string first
- This issue is now adressed in protocol 5, where a buffer is serialized
+ This issue is now addressed in protocol 5, where a buffer is serialized
instead of a string,
*/
diff --git a/numpy/core/src/npymath/npy_math_complex.c.src b/numpy/core/src/npymath/npy_math_complex.c.src
index 7aa07f16d..dad381232 100644
--- a/numpy/core/src/npymath/npy_math_complex.c.src
+++ b/numpy/core/src/npymath/npy_math_complex.c.src
@@ -1246,7 +1246,7 @@ _clog_for_large_values@c@(@type@ x, @type@ y,
* Divide x and y by E, and then add 1 to the logarithm. This depends
* on E being larger than sqrt(2).
* Dividing by E causes an insignificant loss of accuracy; however
- * this method is still poor since it is uneccessarily slow.
+ * this method is still poor since it is unnecessarily slow.
*/
if (ax > @TMAX@ / 2) {
*rr = npy_log@c@(npy_hypot@c@(x / NPY_E@c@, y / NPY_E@c@)) + 1;
diff --git a/numpy/core/src/npysort/selection.c.src b/numpy/core/src/npysort/selection.c.src
index 1e0934558..be645450f 100644
--- a/numpy/core/src/npysort/selection.c.src
+++ b/numpy/core/src/npysort/selection.c.src
@@ -40,7 +40,7 @@ static NPY_INLINE void store_pivot(npy_intp pivot, npy_intp kth,
}
/*
- * If pivot is the requested kth store it, overwritting other pivots if
+ * If pivot is the requested kth store it, overwriting other pivots if
* required. This must be done so iterative partition can work without
* manually shifting lower data offset by kth each time
*/
diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c
index b2400a2a1..96591ba80 100644
--- a/numpy/core/src/umath/ufunc_type_resolution.c
+++ b/numpy/core/src/umath/ufunc_type_resolution.c
@@ -1737,7 +1737,7 @@ set_ufunc_loop_data_types(PyUFuncObject *self, PyArrayObject **op,
}
/*
* For outputs, copy the dtype from op[0] if the type_num
- * matches, similarly to preserve metdata.
+ * matches, similarly to preserve metadata.
*/
else if (i >= nin && op[0] != NULL &&
PyArray_DESCR(op[0])->type_num == type_nums[i]) {
diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py
index c554e9832..f4736d694 100644
--- a/numpy/core/tests/test_dtype.py
+++ b/numpy/core/tests/test_dtype.py
@@ -603,7 +603,7 @@ class TestStructuredDtypeSparseFields(object):
'offsets':[4]}, (2, 3))])
@pytest.mark.xfail(reason="inaccessible data is changed see gh-12686.")
- @pytest.mark.valgrind_error(reason="reads from unitialized buffers.")
+ @pytest.mark.valgrind_error(reason="reads from uninitialized buffers.")
def test_sparse_field_assignment(self):
arr = np.zeros(3, self.dtype)
sparse_arr = arr.view(self.sparse_dtype)
diff --git a/numpy/core/tests/test_half.py b/numpy/core/tests/test_half.py
index 770712501..1e1e6d7d9 100644
--- a/numpy/core/tests/test_half.py
+++ b/numpy/core/tests/test_half.py
@@ -104,7 +104,7 @@ class TestHalf(object):
# logic will be necessary, an arbitrarily small offset should cause
# normal up/down rounding always.
- # Calculate the expecte pattern:
+ # Calculate the expected pattern:
cmp_patterns = f16s_patterns[1:-1].copy()
if shift == "down" and offset != "up":
diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py
index 26fd9c346..9499bedec 100644
--- a/numpy/core/tests/test_nditer.py
+++ b/numpy/core/tests/test_nditer.py
@@ -2292,7 +2292,7 @@ class TestIterNested(object):
assert_equal(vals, [[0, 1, 2], [3, 4, 5]])
vals = None
- # writebackifcopy - using conext manager
+ # writebackifcopy - using context manager
a = arange(6, dtype='f4').reshape(2, 3)
i, j = np.nested_iters(a, [[0], [1]],
op_flags=['readwrite', 'updateifcopy'],
diff --git a/numpy/core/tests/test_scalar_methods.py b/numpy/core/tests/test_scalar_methods.py
index 0e4ac5f39..93434dd1b 100644
--- a/numpy/core/tests/test_scalar_methods.py
+++ b/numpy/core/tests/test_scalar_methods.py
@@ -64,7 +64,7 @@ class TestAsIntegerRatio(object):
R(*np.double(2.1).as_integer_ratio()))
assert_equal(R(-4728779608739021, 2251799813685248),
R(*np.double(-2.1).as_integer_ratio()))
- # longdouble is platform depedent
+ # longdouble is platform dependent
@pytest.mark.parametrize("ftype, frac_vals, exp_vals", [
# dtype test cases generated using hypothesis
diff --git a/numpy/core/tests/test_scalarprint.py b/numpy/core/tests/test_scalarprint.py
index cde1355aa..86b0ca199 100644
--- a/numpy/core/tests/test_scalarprint.py
+++ b/numpy/core/tests/test_scalarprint.py
@@ -51,7 +51,7 @@ class TestRealScalars(object):
def test_py2_float_print(self):
# gh-10753
- # In python2, the python float type implements an obsolte method
+ # In python2, the python float type implements an obsolete method
# tp_print, which overrides tp_repr and tp_str when using "print" to
# output to a "real file" (ie, not a StringIO). Make sure we don't
# inherit it.
diff --git a/numpy/distutils/fcompiler/__init__.py b/numpy/distutils/fcompiler/__init__.py
index 98c2840ab..3723470f3 100644
--- a/numpy/distutils/fcompiler/__init__.py
+++ b/numpy/distutils/fcompiler/__init__.py
@@ -484,11 +484,11 @@ class FCompiler(CCompiler):
# XXX Assuming that free format is default for f90 compiler.
fix = self.command_vars.compiler_fix
# NOTE: this and similar examples are probably just
- # exluding --coverage flag when F90 = gfortran --coverage
+ # excluding --coverage flag when F90 = gfortran --coverage
# instead of putting that flag somewhere more appropriate
# this and similar examples where a Fortran compiler
# environment variable has been customized by CI or a user
- # should perhaps eventually be more throughly tested and more
+ # should perhaps eventually be more thoroughly tested and more
# robustly handled
if fix:
fix = _shell_utils.NativeParser.split(fix)
diff --git a/numpy/doc/basics.py b/numpy/doc/basics.py
index 61f5bf4ef..7946c6432 100644
--- a/numpy/doc/basics.py
+++ b/numpy/doc/basics.py
@@ -276,7 +276,7 @@ but gives 1874919424 (incorrect) for a 32-bit integer.
The behaviour of NumPy and Python integer types differs significantly for
integer overflows and may confuse users expecting NumPy integers to behave
similar to Python's ``int``. Unlike NumPy, the size of Python's ``int`` is
-flexible. This means Python integers may expand to accomodate any integer and
+flexible. This means Python integers may expand to accommodate any integer and
will not overflow.
NumPy provides `numpy.iinfo` and `numpy.finfo` to verify the
diff --git a/numpy/doc/indexing.py b/numpy/doc/indexing.py
index b752582c2..676015668 100644
--- a/numpy/doc/indexing.py
+++ b/numpy/doc/indexing.py
@@ -1,4 +1,5 @@
-"""==============
+"""
+==============
Array indexing
==============
@@ -107,7 +108,7 @@ arrays and thus greatly improve performance.
It is possible to use special features to effectively increase the
number of dimensions in an array through indexing so the resulting
-array aquires the shape needed for use in an expression or with a
+array acquires the shape needed for use in an expression or with a
specific function.
Index arrays
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index 2a6d39abc..fabb87adc 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -1,7 +1,7 @@
from __future__ import division, absolute_import, print_function
try:
- # Accessing collections abstact classes from collections
+ # Accessing collections abstract classes from collections
# has been deprecated since Python 3.3
import collections.abc as collections_abc
except ImportError:
@@ -4341,7 +4341,7 @@ def delete(arr, obj, axis=None):
else:
slobj[axis] = slice(None, start)
new[tuple(slobj)] = arr[tuple(slobj)]
- # copy end chunck
+ # copy end chunk
if stop == N:
pass
else:
diff --git a/numpy/lib/recfunctions.py b/numpy/lib/recfunctions.py
index 08a9cf09c..fabb509ab 100644
--- a/numpy/lib/recfunctions.py
+++ b/numpy/lib/recfunctions.py
@@ -146,7 +146,7 @@ def get_names(adtype):
def get_names_flat(adtype):
"""
Returns the field names of the input datatype as a tuple. Nested structure
- are flattend beforehand.
+ are flattened beforehand.
Parameters
----------
diff --git a/numpy/lib/tests/test_histograms.py b/numpy/lib/tests/test_histograms.py
index afaa526af..4895a722c 100644
--- a/numpy/lib/tests/test_histograms.py
+++ b/numpy/lib/tests/test_histograms.py
@@ -798,7 +798,7 @@ class TestHistogramdd(object):
hist, edges = histogramdd((y, x), bins=(y_edges, x_edges))
assert_equal(hist, relative_areas)
- # resulting histogram should be uniform, since counts and areas are propotional
+ # resulting histogram should be uniform, since counts and areas are proportional
hist, edges = histogramdd((y, x), bins=(y_edges, x_edges), density=True)
assert_equal(hist, 1 / (8*8))
diff --git a/numpy/random/generator.pyx b/numpy/random/generator.pyx
index fd93d5efe..cb3df2626 100644
--- a/numpy/random/generator.pyx
+++ b/numpy/random/generator.pyx
@@ -440,7 +440,7 @@ cdef class Generator:
'when required.')
# Implementation detail: the old API used a masked method to generate
- # bounded uniform integers. Lemire's method is preferrable since it is
+ # bounded uniform integers. Lemire's method is preferable since it is
# faster. randomgen allows a choice, we will always use the faster one.
cdef bint _masked = True
diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx
index 50c8a0b2f..48ff6b0a6 100644
--- a/numpy/random/mtrand.pyx
+++ b/numpy/random/mtrand.pyx
@@ -621,7 +621,7 @@ cdef class RandomState:
'ValueError', DeprecationWarning)
# Implementation detail: the use a masked method to generate
- # bounded uniform integers. Lemire's method is preferrable since it is
+ # bounded uniform integers. Lemire's method is preferable since it is
# faster. randomgen allows a choice, we will always use the slower but
# backward compatible one.
cdef bint _masked = True
diff --git a/numpy/random/src/philox/philox-benchmark.c b/numpy/random/src/philox/philox-benchmark.c
index 0cab04cf5..df5814d5f 100644
--- a/numpy/random/src/philox/philox-benchmark.c
+++ b/numpy/random/src/philox/philox-benchmark.c
@@ -5,7 +5,7 @@
*
* gcc philox-benchmark.c -O3 -o philox-benchmark
*
- * Requres the Random123 directory containing header files to be located in the
+ * Requires the Random123 directory containing header files to be located in the
* same directory (not included).
*/
#include "Random123/philox.h"
diff --git a/numpy/random/src/philox/philox-test-data-gen.c b/numpy/random/src/philox/philox-test-data-gen.c
index 442e18b55..a5fcaa690 100644
--- a/numpy/random/src/philox/philox-test-data-gen.c
+++ b/numpy/random/src/philox/philox-test-data-gen.c
@@ -7,7 +7,7 @@
* gcc philox-test-data-gen.c -o philox-test-data-gen
* ./philox-test-data-gen
*
- * Requres the Random123 directory containing header files to be located in the
+ * Requires the Random123 directory containing header files to be located in the
* same directory (not included).
*
*/
diff --git a/numpy/random/src/threefry/threefry-benchmark.c b/numpy/random/src/threefry/threefry-benchmark.c
index 6d6239cd3..5e2cfe844 100644
--- a/numpy/random/src/threefry/threefry-benchmark.c
+++ b/numpy/random/src/threefry/threefry-benchmark.c
@@ -5,7 +5,7 @@
*
* gcc threefry-benchmark.c -O3 -o threefry-benchmark
*
- * Requres the Random123 directory containing header files to be located in the
+ * Requires the Random123 directory containing header files to be located in the
* same directory (not included).
*/
#include "Random123/threefry.h"
diff --git a/numpy/random/src/threefry/threefry-test-data-gen.c b/numpy/random/src/threefry/threefry-test-data-gen.c
index 328eb2575..8514a227e 100644
--- a/numpy/random/src/threefry/threefry-test-data-gen.c
+++ b/numpy/random/src/threefry/threefry-test-data-gen.c
@@ -8,7 +8,7 @@
* threefry-test-data-gen
* ./threefry-test-data-gen
*
- * Requres the Random123 directory containing header files to be located in the
+ * Requires the Random123 directory containing header files to be located in the
* same directory (not included).
*
*/
diff --git a/numpy/random/src/xoshiro256/xoshiro256-test-data-gen.c b/numpy/random/src/xoshiro256/xoshiro256-test-data-gen.c
index 94eeb7346..b5351bf7a 100644
--- a/numpy/random/src/xoshiro256/xoshiro256-test-data-gen.c
+++ b/numpy/random/src/xoshiro256/xoshiro256-test-data-gen.c
@@ -9,7 +9,7 @@
* ../splitmix64/splitmix64.c -o xoshiro256-test-data-gen
* ./xoshiro256-test-data-gen
*
- * Requres the Random123 directory containing header files to be located in the
+ * Requires the Random123 directory containing header files to be located in the
* same directory (not included).
*
*/
diff --git a/numpy/random/src/xoshiro512/xoshiro512-test-data-gen.c b/numpy/random/src/xoshiro512/xoshiro512-test-data-gen.c
index 83e164a51..698923bda 100644
--- a/numpy/random/src/xoshiro512/xoshiro512-test-data-gen.c
+++ b/numpy/random/src/xoshiro512/xoshiro512-test-data-gen.c
@@ -9,7 +9,7 @@
* ../splitmix64/splitmix64.c -o xoshiro512-test-data-gen
* ./xoshiro512-test-data-gen
*
- * Requres the Random123 directory containing header files to be located in the
+ * Requires the Random123 directory containing header files to be located in the
* same directory (not included).
*
*/
diff --git a/numpy/random/tests/test_randomstate.py b/numpy/random/tests/test_randomstate.py
index 5e2b93f52..d8a07e8b2 100644
--- a/numpy/random/tests/test_randomstate.py
+++ b/numpy/random/tests/test_randomstate.py
@@ -1903,7 +1903,7 @@ class TestSingleEltArrayInput(object):
assert_equal(out.shape, self.tgtShape)
-# Ensure returned array dtype is corect for platform
+# Ensure returned array dtype is correct for platform
def test_integer_dtype(int_func):
random.seed(123456789)
fname, args, md5 = int_func
diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py
index 53181bc49..ead5d264d 100644
--- a/numpy/testing/_private/utils.py
+++ b/numpy/testing/_private/utils.py
@@ -708,7 +708,7 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True,
x = array(x, copy=False, subok=True)
y = array(y, copy=False, subok=True)
- # original array for output formating
+ # original array for output formatting
ox, oy = x, y
def isnumber(x):
@@ -733,7 +733,7 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True,
# (2) __eq__ on some ndarray subclasses returns Python booleans
# instead of element-wise comparisons, so we cast to bool_() and
# use isinstance(..., bool) checks
- # (3) subclasses with bare-bones __array_function__ implemenations may
+ # (3) subclasses with bare-bones __array_function__ implementations may
# not implement np.all(), so favor using the .all() method
# We are not committed to supporting such subclasses, but it's nice to
# support them if possible.