summaryrefslogtreecommitdiff
path: root/numpy
diff options
context:
space:
mode:
authorSebastian Berg <sebastian@sipsolutions.net>2021-09-21 15:54:11 -0700
committerGitHub <noreply@github.com>2021-09-21 15:54:11 -0700
commit66d33faabf1f19d4b192c38e927dfa727b0ed61f (patch)
treef657cee2e071c1394f993abf9f1b3a18504414c6 /numpy
parenta838abe68ce6bf8164c31cc35a2ac8d5485754ec (diff)
parent83960267dc097742cb67ef575504afa56f82b102 (diff)
downloadnumpy-66d33faabf1f19d4b192c38e927dfa727b0ed61f.tar.gz
Merge pull request #19911 from DimitriPapadopoulos/codespell
DOC: Typos found by codespell
Diffstat (limited to 'numpy')
-rw-r--r--numpy/__init__.pyi2
-rw-r--r--numpy/conftest.py2
-rw-r--r--numpy/core/arrayprint.pyi4
-rw-r--r--numpy/core/einsumfunc.pyi2
-rw-r--r--numpy/core/machar.py2
-rw-r--r--numpy/core/records.py2
-rw-r--r--numpy/core/src/_simd/_simd_inc.h.src2
-rw-r--r--numpy/core/src/common/npy_cpu_dispatch.h2
-rw-r--r--numpy/core/src/common/npy_cpu_features.c.src2
-rw-r--r--numpy/core/src/common/npy_cpuinfo_parser.h4
-rw-r--r--numpy/core/src/common/simd/emulate_maskop.h2
-rw-r--r--numpy/core/src/common/simd/intdiv.h2
-rw-r--r--numpy/core/src/common/simd/neon/math.h2
-rw-r--r--numpy/core/src/common/simd/vsx/operators.h2
-rw-r--r--numpy/core/src/multiarray/abstractdtypes.c2
-rw-r--r--numpy/core/src/multiarray/array_coercion.c4
-rw-r--r--numpy/core/src/multiarray/arrayobject.c4
-rw-r--r--numpy/core/src/multiarray/convert_datatype.c4
-rw-r--r--numpy/core/src/multiarray/datetime.c2
-rw-r--r--numpy/core/src/multiarray/dtypemeta.c2
-rw-r--r--numpy/core/src/multiarray/einsum_sumprod.c.src18
-rw-r--r--numpy/core/src/multiarray/lowlevel_strided_loops.c.src2
-rw-r--r--numpy/core/src/multiarray/nditer_api.c6
-rw-r--r--numpy/core/src/multiarray/nditer_constr.c2
-rw-r--r--numpy/core/src/multiarray/nditer_templ.c.src2
-rw-r--r--numpy/core/src/umath/_scaled_float_dtype.c6
-rw-r--r--numpy/core/src/umath/_umath_tests.c.src2
-rw-r--r--numpy/core/src/umath/_umath_tests.dispatch.c2
-rw-r--r--numpy/core/src/umath/loops_utils.h.src2
-rw-r--r--numpy/core/tests/test__exceptions.py2
-rw-r--r--numpy/core/tests/test_array_coercion.py4
-rw-r--r--numpy/core/tests/test_casting_unittests.py2
-rw-r--r--numpy/core/tests/test_cpu_dispatcher.py4
-rw-r--r--numpy/core/tests/test_deprecations.py2
-rw-r--r--numpy/core/tests/test_einsum.py2
-rw-r--r--numpy/core/tests/test_nditer.py4
-rw-r--r--numpy/distutils/command/build_ext.py2
-rw-r--r--numpy/distutils/fcompiler/gnu.py2
-rw-r--r--numpy/distutils/misc_util.py4
-rw-r--r--numpy/distutils/system_info.py3
-rw-r--r--numpy/f2py/cfuncs.py2
-rw-r--r--numpy/f2py/tests/test_return_character.py4
-rw-r--r--numpy/lib/format.py2
-rw-r--r--numpy/lib/nanfunctions.py2
-rw-r--r--numpy/lib/npyio.py2
-rw-r--r--numpy/lib/twodim_base.py4
-rw-r--r--numpy/linalg/linalg.py4
-rw-r--r--numpy/linalg/linalg.pyi2
-rw-r--r--numpy/linalg/tests/test_linalg.py8
-rw-r--r--numpy/ma/mrecords.py8
-rw-r--r--numpy/ma/mrecords.pyi2
-rw-r--r--numpy/ma/tests/test_core.py12
-rw-r--r--numpy/ma/tests/test_mrecords.py2
-rw-r--r--numpy/polynomial/__init__.py2
-rw-r--r--numpy/polynomial/_polybase.py2
-rw-r--r--numpy/polynomial/chebyshev.py8
-rw-r--r--numpy/polynomial/laguerre.py2
-rw-r--r--numpy/random/_generator.pyx2
-rw-r--r--numpy/typing/__init__.py2
-rw-r--r--numpy/typing/_generic_alias.py2
-rw-r--r--numpy/typing/tests/data/fail/bitwise_ops.py2
-rw-r--r--numpy/typing/tests/data/fail/numerictypes.py2
-rw-r--r--numpy/typing/tests/data/pass/simple.py2
-rw-r--r--numpy/typing/tests/test_typing.py6
64 files changed, 105 insertions, 104 deletions
diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi
index 4949df318..0313530dc 100644
--- a/numpy/__init__.pyi
+++ b/numpy/__init__.pyi
@@ -1990,7 +1990,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]):
# The last overload is for catching recursive objects whose
# nesting is too deep.
# The first overload is for catching `bytes` (as they are a subtype of
- # `Sequence[int]`) and `str`. As `str` is a recusive sequence of
+ # `Sequence[int]`) and `str`. As `str` is a recursive sequence of
# strings, it will pass through the final overload otherwise
@overload
diff --git a/numpy/conftest.py b/numpy/conftest.py
index e15ee0845..fd5fdd77d 100644
--- a/numpy/conftest.py
+++ b/numpy/conftest.py
@@ -33,7 +33,7 @@ hypothesis.settings.register_profile(
suppress_health_check=hypothesis.HealthCheck.all(),
)
# Note that the default profile is chosen based on the presence
-# of pytest.ini, but can be overriden by passing the
+# of pytest.ini, but can be overridden by passing the
# --hypothesis-profile=NAME argument to pytest.
_pytest_ini = os.path.join(os.path.dirname(__file__), "..", "pytest.ini")
hypothesis.settings.load_profile(
diff --git a/numpy/core/arrayprint.pyi b/numpy/core/arrayprint.pyi
index df22efed6..3731e6578 100644
--- a/numpy/core/arrayprint.pyi
+++ b/numpy/core/arrayprint.pyi
@@ -1,8 +1,8 @@
from types import TracebackType
from typing import Any, Optional, Callable, Union, Type, Literal, TypedDict, SupportsIndex
-# Using a private class is by no means ideal, but it is simply a consquence
-# of a `contextlib.context` returning an instance of aformentioned class
+# Using a private class is by no means ideal, but it is simply a consequence
+# of a `contextlib.context` returning an instance of aforementioned class
from contextlib import _GeneratorContextManager
from numpy import (
diff --git a/numpy/core/einsumfunc.pyi b/numpy/core/einsumfunc.pyi
index 52025d502..aabb04c47 100644
--- a/numpy/core/einsumfunc.pyi
+++ b/numpy/core/einsumfunc.pyi
@@ -41,7 +41,7 @@ __all__: List[str]
# TODO: Properly handle the `casting`-based combinatorics
# TODO: We need to evaluate the content `__subscripts` in order
# to identify whether or an array or scalar is returned. At a cursory
-# glance this seems like something that can quite easilly be done with
+# glance this seems like something that can quite easily be done with
# a mypy plugin.
# Something like `is_scalar = bool(__subscripts.partition("->")[-1])`
@overload
diff --git a/numpy/core/machar.py b/numpy/core/machar.py
index 04dad4d77..c77be793f 100644
--- a/numpy/core/machar.py
+++ b/numpy/core/machar.py
@@ -1,5 +1,5 @@
"""
-Machine arithmetics - determine the parameters of the
+Machine arithmetic - determine the parameters of the
floating-point arithmetic system
Author: Pearu Peterson, September 2003
diff --git a/numpy/core/records.py b/numpy/core/records.py
index fd5f1ab39..ce206daa1 100644
--- a/numpy/core/records.py
+++ b/numpy/core/records.py
@@ -68,7 +68,7 @@ _byteorderconv = {'b':'>',
'i':'|'}
# formats regular expression
-# allows multidimension spec with a tuple syntax in front
+# allows multidimensional spec with a tuple syntax in front
# of the letter code '(2,3)f4' and ' ( 2 , 3 ) f4 '
# are equally allowed
diff --git a/numpy/core/src/_simd/_simd_inc.h.src b/numpy/core/src/_simd/_simd_inc.h.src
index 9858fc0dc..fbdf982c2 100644
--- a/numpy/core/src/_simd/_simd_inc.h.src
+++ b/numpy/core/src/_simd/_simd_inc.h.src
@@ -113,7 +113,7 @@ typedef struct
int is_scalar:1;
// returns '1' if the type represent a vector
int is_vector:1;
- // returns the len of multi-vector if the type reprsent x2 or x3 vector
+ // returns the len of multi-vector if the type represent x2 or x3 vector
// otherwise returns 0, e.g. returns 2 if data type is simd_data_vu8x2
int is_vectorx;
// returns the equivalent scalar data type e.g. simd_data_vu8 -> simd_data_u8
diff --git a/numpy/core/src/common/npy_cpu_dispatch.h b/numpy/core/src/common/npy_cpu_dispatch.h
index 8c2b40c27..e814cd425 100644
--- a/numpy/core/src/common/npy_cpu_dispatch.h
+++ b/numpy/core/src/common/npy_cpu_dispatch.h
@@ -196,7 +196,7 @@
* Example:
* Assume we have a dispatch-able source exporting the following function:
*
- * @targets baseline avx2 avx512_skx // configration statements
+ * @targets baseline avx2 avx512_skx // configuration statements
*
* void NPY_CPU_DISPATCH_CURFX(dispatch_me)(const int *src, int *dst)
* {
diff --git a/numpy/core/src/common/npy_cpu_features.c.src b/numpy/core/src/common/npy_cpu_features.c.src
index 1e0f4a571..a2383c45f 100644
--- a/numpy/core/src/common/npy_cpu_features.c.src
+++ b/numpy/core/src/common/npy_cpu_features.c.src
@@ -230,7 +230,7 @@ npy__cpu_try_disable_env(void)
notsupp_cur[flen] = ' '; notsupp_cur += flen + 1;
goto next;
}
- // Finaly we can disable it
+ // Finally we can disable it
npy__cpu_have[feature_id] = 0;
next:
feature = strtok(NULL, delim);
diff --git a/numpy/core/src/common/npy_cpuinfo_parser.h b/numpy/core/src/common/npy_cpuinfo_parser.h
index 9e85e3a2f..364873a23 100644
--- a/numpy/core/src/common/npy_cpuinfo_parser.h
+++ b/numpy/core/src/common/npy_cpuinfo_parser.h
@@ -123,7 +123,7 @@ read_file(const char* pathname, char* buffer, size_t buffsize)
}
/*
- * Extract the content of a the first occurence of a given field in
+ * Extract the content of a the first occurrence of a given field in
* the content of /proc/cpuinfo and return it as a heap-allocated
* string that must be freed by the caller.
*
@@ -138,7 +138,7 @@ extract_cpuinfo_field(const char* buffer, int buflen, const char* field)
int len;
const char *p, *q;
- /* Look for first field occurence, and ensures it starts the line. */
+ /* Look for first field occurrence, and ensures it starts the line. */
p = buffer;
for (;;) {
p = memmem(p, bufend-p, field, fieldlen);
diff --git a/numpy/core/src/common/simd/emulate_maskop.h b/numpy/core/src/common/simd/emulate_maskop.h
index 7e7446bc5..41e397c2d 100644
--- a/numpy/core/src/common/simd/emulate_maskop.h
+++ b/numpy/core/src/common/simd/emulate_maskop.h
@@ -1,5 +1,5 @@
/**
- * This header is used internaly by all current supported SIMD extention,
+ * This header is used internally by all current supported SIMD extensions,
* execpt for AVX512.
*/
#ifndef NPY_SIMD
diff --git a/numpy/core/src/common/simd/intdiv.h b/numpy/core/src/common/simd/intdiv.h
index f6ea9abf2..5d2ab2906 100644
--- a/numpy/core/src/common/simd/intdiv.h
+++ b/numpy/core/src/common/simd/intdiv.h
@@ -39,7 +39,7 @@
* for (; len >= vstep; src += vstep, dst += vstep, len -= vstep) {
* npyv_s32 a = npyv_load_s32(*src); // load s32 vector from memory
* a = npyv_divc_s32(a, divisor); // divide all elements by x
- * npyv_store_s32(dst, a); // store s32 vector into memroy
+ * npyv_store_s32(dst, a); // store s32 vector into memory
* }
*
** NOTES:
diff --git a/numpy/core/src/common/simd/neon/math.h b/numpy/core/src/common/simd/neon/math.h
index ced82d1de..19ea6f22f 100644
--- a/numpy/core/src/common/simd/neon/math.h
+++ b/numpy/core/src/common/simd/neon/math.h
@@ -31,7 +31,7 @@ NPY_FINLINE npyv_f32 npyv_square_f32(npyv_f32 a)
const npyv_f32 zero = vdupq_n_f32(0.0f);
const npyv_u32 pinf = vdupq_n_u32(0x7f800000);
npyv_u32 is_zero = vceqq_f32(a, zero), is_inf = vceqq_u32(vreinterpretq_u32_f32(a), pinf);
- // guard agianst floating-point division-by-zero error
+ // guard against floating-point division-by-zero error
npyv_f32 guard_byz = vbslq_f32(is_zero, vreinterpretq_f32_u32(pinf), a);
// estimate to (1/√a)
npyv_f32 rsqrte = vrsqrteq_f32(guard_byz);
diff --git a/numpy/core/src/common/simd/vsx/operators.h b/numpy/core/src/common/simd/vsx/operators.h
index 23c5d0dbe..d34057ff3 100644
--- a/numpy/core/src/common/simd/vsx/operators.h
+++ b/numpy/core/src/common/simd/vsx/operators.h
@@ -103,7 +103,7 @@ NPYV_IMPL_VSX_BIN_B64(or)
NPYV_IMPL_VSX_BIN_B64(xor)
// NOT
-// note: we implement npyv_not_b*(boolen types) for internal use*/
+// note: we implement npyv_not_b*(boolean types) for internal use*/
#define NPYV_IMPL_VSX_NOT_INT(VEC_LEN) \
NPY_FINLINE npyv_u##VEC_LEN npyv_not_u##VEC_LEN(npyv_u##VEC_LEN a) \
{ return vec_nor(a, a); } \
diff --git a/numpy/core/src/multiarray/abstractdtypes.c b/numpy/core/src/multiarray/abstractdtypes.c
index 99573f089..cc1d7fad8 100644
--- a/numpy/core/src/multiarray/abstractdtypes.c
+++ b/numpy/core/src/multiarray/abstractdtypes.c
@@ -157,7 +157,7 @@ int_common_dtype(PyArray_DTypeMeta *NPY_UNUSED(cls), PyArray_DTypeMeta *other)
}
else if (PyTypeNum_ISNUMBER(other->type_num) ||
other->type_num == NPY_TIMEDELTA) {
- /* All other numeric types (ant timdelta) are preserved: */
+ /* All other numeric types (ant timedelta) are preserved: */
Py_INCREF(other);
return other;
}
diff --git a/numpy/core/src/multiarray/array_coercion.c b/numpy/core/src/multiarray/array_coercion.c
index 90b50097a..847bdafc3 100644
--- a/numpy/core/src/multiarray/array_coercion.c
+++ b/numpy/core/src/multiarray/array_coercion.c
@@ -136,7 +136,7 @@ _prime_global_pytype_to_type_dict(void)
*
* This assumes that the DType class is guaranteed to hold on the
* python type (this assumption is guaranteed).
- * This functionality supercedes ``_typenum_fromtypeobj``.
+ * This functionality supersedes ``_typenum_fromtypeobj``.
*
* @param DType DType to map the python type to
* @param pytype Python type to map from
@@ -1400,7 +1400,7 @@ PyArray_DiscoverDTypeAndShape(
* These should be largely deprecated, and represent only the DType class
* for most `dtype` parameters.
*
- * TODO: This function should eventually recieve a deprecation warning and
+ * TODO: This function should eventually receive a deprecation warning and
* be removed.
*
* @param descr
diff --git a/numpy/core/src/multiarray/arrayobject.c b/numpy/core/src/multiarray/arrayobject.c
index d653bfc22..9b9df08f2 100644
--- a/numpy/core/src/multiarray/arrayobject.c
+++ b/numpy/core/src/multiarray/arrayobject.c
@@ -858,7 +858,7 @@ _uni_release(char *ptr, int nc)
relfunc(aptr, N1); \
return -1; \
} \
- val = compfunc(aptr, bptr, N1, N2); \
+ val = compfunc(aptr, bptr, N1, N2); \
*dptr = (val CMP 0); \
PyArray_ITER_NEXT(iself); \
PyArray_ITER_NEXT(iother); \
@@ -870,7 +870,7 @@ _uni_release(char *ptr, int nc)
#define _reg_loop(CMP) { \
while(size--) { \
- val = compfunc((void *)iself->dataptr, \
+ val = compfunc((void *)iself->dataptr, \
(void *)iother->dataptr, \
N1, N2); \
*dptr = (val CMP 0); \
diff --git a/numpy/core/src/multiarray/convert_datatype.c b/numpy/core/src/multiarray/convert_datatype.c
index b6755e91d..12dd99504 100644
--- a/numpy/core/src/multiarray/convert_datatype.c
+++ b/numpy/core/src/multiarray/convert_datatype.c
@@ -1649,7 +1649,7 @@ PyArray_ResultType(
}
Py_INCREF(all_DTypes[i_all]);
/*
- * Leave the decriptor empty, if we need it, we will have to go
+ * Leave the descriptor empty, if we need it, we will have to go
* to more extreme lengths unfortunately.
*/
all_descriptors[i_all] = NULL;
@@ -2243,7 +2243,7 @@ PyArray_AddCastingImplementation(PyBoundArrayMethodObject *meth)
* Add a new casting implementation using a PyArrayMethod_Spec.
*
* @param spec
- * @param private If private, allow slots not publically exposed.
+ * @param private If private, allow slots not publicly exposed.
* @return 0 on success -1 on failure
*/
NPY_NO_EXPORT int
diff --git a/numpy/core/src/multiarray/datetime.c b/numpy/core/src/multiarray/datetime.c
index 093090b4c..11a941e72 100644
--- a/numpy/core/src/multiarray/datetime.c
+++ b/numpy/core/src/multiarray/datetime.c
@@ -427,7 +427,7 @@ PyArray_DatetimeStructToDatetime(
}
/*NUMPY_API
- * Create a timdelta value from a filled timedelta struct and resolution unit.
+ * Create a timedelta value from a filled timedelta struct and resolution unit.
*
* TO BE REMOVED - NOT USED INTERNALLY.
*/
diff --git a/numpy/core/src/multiarray/dtypemeta.c b/numpy/core/src/multiarray/dtypemeta.c
index 059ec201e..cbde91b76 100644
--- a/numpy/core/src/multiarray/dtypemeta.c
+++ b/numpy/core/src/multiarray/dtypemeta.c
@@ -101,7 +101,7 @@ static PyObject *
legacy_dtype_default_new(PyArray_DTypeMeta *self,
PyObject *args, PyObject *kwargs)
{
- /* TODO: This should allow endianess and possibly metadata */
+ /* TODO: This should allow endianness and possibly metadata */
if (NPY_DT_is_parametric(self)) {
/* reject parametric ones since we would need to get unit, etc. info */
PyErr_Format(PyExc_TypeError,
diff --git a/numpy/core/src/multiarray/einsum_sumprod.c.src b/numpy/core/src/multiarray/einsum_sumprod.c.src
index 333b8e188..29ceabd71 100644
--- a/numpy/core/src/multiarray/einsum_sumprod.c.src
+++ b/numpy/core/src/multiarray/einsum_sumprod.c.src
@@ -80,7 +80,7 @@ static NPY_GCC_OPT_3 @temptype@ @name@_sum_of_arr(@type@ *data, npy_intp count)
/* Use aligned instructions if possible */
const int is_aligned = EINSUM_IS_ALIGNED(data);
const int vstep = npyv_nlanes_@sfx@;
- npyv_@sfx@ vaccum = npyv_zero_@sfx@();
+ npyv_@sfx@ v_accum = npyv_zero_@sfx@();
const npy_intp vstepx4 = vstep * 4;
/**begin repeat1
@@ -98,15 +98,15 @@ static NPY_GCC_OPT_3 @temptype@ @name@_sum_of_arr(@type@ *data, npy_intp count)
npyv_@sfx@ a01 = npyv_add_@sfx@(a0, a1);
npyv_@sfx@ a23 = npyv_add_@sfx@(a2, a3);
npyv_@sfx@ a0123 = npyv_add_@sfx@(a01, a23);
- vaccum = npyv_add_@sfx@(a0123, vaccum);
+ v_accum = npyv_add_@sfx@(a0123, v_accum);
}
}
/**end repeat1**/
for (; count > 0; count -= vstep, data += vstep) {
npyv_@sfx@ a = npyv_load_tillz_@sfx@(data, count);
- vaccum = npyv_add_@sfx@(a, vaccum);
+ v_accum = npyv_add_@sfx@(a, v_accum);
}
- accum = npyv_sum_@sfx@(vaccum);
+ accum = npyv_sum_@sfx@(v_accum);
npyv_cleanup();
#else
#ifndef NPY_DISABLE_OPTIMIZATION
@@ -485,7 +485,7 @@ static NPY_GCC_OPT_3 void
/* Use aligned instructions if possible */
const int is_aligned = EINSUM_IS_ALIGNED(data0) && EINSUM_IS_ALIGNED(data1);
const int vstep = npyv_nlanes_@sfx@;
- npyv_@sfx@ vaccum = npyv_zero_@sfx@();
+ npyv_@sfx@ v_accum = npyv_zero_@sfx@();
/**begin repeat2
* #cond = if(is_aligned), else#
@@ -501,19 +501,19 @@ static NPY_GCC_OPT_3 void
npyv_@sfx@ a@i@ = npyv_@ld@_@sfx@(data0 + vstep * @i@);
npyv_@sfx@ b@i@ = npyv_@ld@_@sfx@(data1 + vstep * @i@);
/**end repeat3**/
- npyv_@sfx@ ab3 = npyv_muladd_@sfx@(a3, b3, vaccum);
+ npyv_@sfx@ ab3 = npyv_muladd_@sfx@(a3, b3, v_accum);
npyv_@sfx@ ab2 = npyv_muladd_@sfx@(a2, b2, ab3);
npyv_@sfx@ ab1 = npyv_muladd_@sfx@(a1, b1, ab2);
- vaccum = npyv_muladd_@sfx@(a0, b0, ab1);
+ v_accum = npyv_muladd_@sfx@(a0, b0, ab1);
}
}
/**end repeat2**/
for (; count > 0; count -= vstep, data0 += vstep, data1 += vstep) {
npyv_@sfx@ a = npyv_load_tillz_@sfx@(data0, count);
npyv_@sfx@ b = npyv_load_tillz_@sfx@(data1, count);
- vaccum = npyv_muladd_@sfx@(a, b, vaccum);
+ v_accum = npyv_muladd_@sfx@(a, b, v_accum);
}
- accum = npyv_sum_@sfx@(vaccum);
+ accum = npyv_sum_@sfx@(v_accum);
npyv_cleanup();
#else
#ifndef NPY_DISABLE_OPTIMIZATION
diff --git a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
index b32664cc9..e313d2447 100644
--- a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
+++ b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
@@ -1849,7 +1849,7 @@ mapiter_@name@(PyArrayMapIterObject *mit)
return -1;
}
#else
- /* The operand order is reveresed here */
+ /* The operand order is reversed here */
char *args[2] = {subspace_ptrs[1], subspace_ptrs[0]};
npy_intp strides[2] = {subspace_strides[1], subspace_strides[0]};
if (NPY_UNLIKELY(cast_info.func(&cast_info.context,
diff --git a/numpy/core/src/multiarray/nditer_api.c b/numpy/core/src/multiarray/nditer_api.c
index 811eece7d..0f0a79ddf 100644
--- a/numpy/core/src/multiarray/nditer_api.c
+++ b/numpy/core/src/multiarray/nditer_api.c
@@ -2130,7 +2130,7 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs)
/*
* Try to do make the outersize as big as possible. This allows
* it to shrink when processing the last bit of the outer reduce loop,
- * then grow again at the beginnning of the next outer reduce loop.
+ * then grow again at the beginning of the next outer reduce loop.
*/
NBF_REDUCE_OUTERSIZE(bufferdata) = (NAD_SHAPE(reduce_outeraxisdata)-
NAD_INDEX(reduce_outeraxisdata));
@@ -2804,9 +2804,9 @@ npyiter_checkreducesize(NpyIter *iter, npy_intp count,
if (coord != 0) {
/*
* In this case, it is only safe to reuse the buffer if the amount
- * of data copied is not more then the current axes, as is the
+ * of data copied is not more than the current axes, as is the
* case when reuse_reduce_loops was active already.
- * It should be in principle OK when the idim loop returns immidiatly.
+ * It should be in principle OK when the idim loop returns immediately.
*/
NIT_ITFLAGS(iter) &= ~NPY_ITFLAG_REUSE_REDUCE_LOOPS;
}
diff --git a/numpy/core/src/multiarray/nditer_constr.c b/numpy/core/src/multiarray/nditer_constr.c
index 57dbb3a94..bf32e1f6b 100644
--- a/numpy/core/src/multiarray/nditer_constr.c
+++ b/numpy/core/src/multiarray/nditer_constr.c
@@ -1405,7 +1405,7 @@ check_mask_for_writemasked_reduction(NpyIter *iter, int iop)
/*
* Check whether a reduction is OK based on the flags and the operand being
* readwrite. This path is deprecated, since usually only specific axes
- * should be reduced. If axes are specified explicitely, the flag is
+ * should be reduced. If axes are specified explicitly, the flag is
* unnecessary.
*/
static int
diff --git a/numpy/core/src/multiarray/nditer_templ.c.src b/numpy/core/src/multiarray/nditer_templ.c.src
index 05ce6ae75..3f91a482b 100644
--- a/numpy/core/src/multiarray/nditer_templ.c.src
+++ b/numpy/core/src/multiarray/nditer_templ.c.src
@@ -132,7 +132,7 @@ npyiter_iternext_itflags@tag_itflags@_dims@tag_ndim@_iters@tag_nop@(
/* Reset the 1st and 2nd indices to 0 */
NAD_INDEX(axisdata0) = 0;
NAD_INDEX(axisdata1) = 0;
- /* Reset the 1st and 2nd pointers to the value of the 3nd */
+ /* Reset the 1st and 2nd pointers to the value of the 3rd */
for (istrides = 0; istrides < nstrides; ++istrides) {
NAD_PTRS(axisdata0)[istrides] = NAD_PTRS(axisdata2)[istrides];
NAD_PTRS(axisdata1)[istrides] = NAD_PTRS(axisdata2)[istrides];
diff --git a/numpy/core/src/umath/_scaled_float_dtype.c b/numpy/core/src/umath/_scaled_float_dtype.c
index 866f636a0..eeef33a3d 100644
--- a/numpy/core/src/umath/_scaled_float_dtype.c
+++ b/numpy/core/src/umath/_scaled_float_dtype.c
@@ -733,9 +733,9 @@ NPY_NO_EXPORT PyObject *
get_sfloat_dtype(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(args))
{
/* Allow calling the function multiple times. */
- static npy_bool initalized = NPY_FALSE;
+ static npy_bool initialized = NPY_FALSE;
- if (initalized) {
+ if (initialized) {
Py_INCREF(&PyArray_SFloatDType);
return (PyObject *)&PyArray_SFloatDType;
}
@@ -764,6 +764,6 @@ get_sfloat_dtype(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(args))
return NULL;
}
- initalized = NPY_TRUE;
+ initialized = NPY_TRUE;
return (PyObject *)&PyArray_SFloatDType;
}
diff --git a/numpy/core/src/umath/_umath_tests.c.src b/numpy/core/src/umath/_umath_tests.c.src
index 0cd673831..ed4c617a4 100644
--- a/numpy/core/src/umath/_umath_tests.c.src
+++ b/numpy/core/src/umath/_umath_tests.c.src
@@ -586,7 +586,7 @@ fail:
return NULL;
}
-// Testing the utilites of the CPU dispatcher
+// Testing the utilities of the CPU dispatcher
#ifndef NPY_DISABLE_OPTIMIZATION
#include "_umath_tests.dispatch.h"
#endif
diff --git a/numpy/core/src/umath/_umath_tests.dispatch.c b/numpy/core/src/umath/_umath_tests.dispatch.c
index 66058550e..9d8df4c86 100644
--- a/numpy/core/src/umath/_umath_tests.dispatch.c
+++ b/numpy/core/src/umath/_umath_tests.dispatch.c
@@ -1,5 +1,5 @@
/**
- * Testing the utilites of the CPU dispatcher
+ * Testing the utilities of the CPU dispatcher
*
* @targets $werror baseline
* SSE2 SSE41 AVX2
diff --git a/numpy/core/src/umath/loops_utils.h.src b/numpy/core/src/umath/loops_utils.h.src
index 1a2a5a32b..762e9ee59 100644
--- a/numpy/core/src/umath/loops_utils.h.src
+++ b/numpy/core/src/umath/loops_utils.h.src
@@ -6,7 +6,7 @@
/**
* Old versions of MSVC causes ambiguous link errors when we deal with large SIMD kernels
- * which lead to break the build, probably releated to the following bug:
+ * which lead to break the build, probably related to the following bug:
* https://developercommunity.visualstudio.com/content/problem/415095/internal-compiler-error-with-perfectly-forwarded-r.html
*/
#if defined(_MSC_VER) && _MSC_VER < 1916
diff --git a/numpy/core/tests/test__exceptions.py b/numpy/core/tests/test__exceptions.py
index c87412aa4..10b87e052 100644
--- a/numpy/core/tests/test__exceptions.py
+++ b/numpy/core/tests/test__exceptions.py
@@ -40,7 +40,7 @@ class TestArrayMemoryError:
# 1023.9999 Mib should round to 1 GiB
assert f(int(Ki*Ki*Ki*0.9999)) == '1.00 GiB'
assert f(Ki*Ki*Ki*Ki*Ki*Ki) == '1.00 EiB'
- # larger than sys.maxsize, adding larger prefices isn't going to help
+ # larger than sys.maxsize, adding larger prefixes isn't going to help
# anyway.
assert f(Ki*Ki*Ki*Ki*Ki*Ki*123456) == '123456. EiB'
diff --git a/numpy/core/tests/test_array_coercion.py b/numpy/core/tests/test_array_coercion.py
index 076d8e43f..293f5a68f 100644
--- a/numpy/core/tests/test_array_coercion.py
+++ b/numpy/core/tests/test_array_coercion.py
@@ -376,7 +376,7 @@ class TestScalarDiscovery:
def test_scalar_to_int_coerce_does_not_cast(self, dtype):
"""
Signed integers are currently different in that they do not cast other
- NumPy scalar, but instead use scalar.__int__(). The harcoded
+ NumPy scalar, but instead use scalar.__int__(). The hardcoded
exception to this rule is `np.array(scalar, dtype=integer)`.
"""
dtype = np.dtype(dtype)
@@ -444,7 +444,7 @@ class TestTimeScalars:
# never use casting. This is because casting will error in this
# case, and traditionally in most cases the behaviour is maintained
# like this. (`np.array(scalar, dtype="U6")` would have failed before)
- # TODO: This discrepency _should_ be resolved, either by relaxing the
+ # TODO: This discrepancy _should_ be resolved, either by relaxing the
# cast, or by deprecating the first part.
scalar = np.datetime64(val, unit)
dtype = np.dtype(dtype)
diff --git a/numpy/core/tests/test_casting_unittests.py b/numpy/core/tests/test_casting_unittests.py
index a13e807e2..d41d6dcc0 100644
--- a/numpy/core/tests/test_casting_unittests.py
+++ b/numpy/core/tests/test_casting_unittests.py
@@ -127,7 +127,7 @@ CAST_TABLE = _get_cancast_table()
class TestChanges:
"""
- These test cases excercise some behaviour changes
+ These test cases exercise some behaviour changes
"""
@pytest.mark.parametrize("string", ["S", "U"])
@pytest.mark.parametrize("floating", ["e", "f", "d", "g"])
diff --git a/numpy/core/tests/test_cpu_dispatcher.py b/numpy/core/tests/test_cpu_dispatcher.py
index 8712dee1a..2f7eac7e8 100644
--- a/numpy/core/tests/test_cpu_dispatcher.py
+++ b/numpy/core/tests/test_cpu_dispatcher.py
@@ -4,7 +4,7 @@ from numpy.testing import assert_equal
def test_dispatcher():
"""
- Testing the utilites of the CPU dispatcher
+ Testing the utilities of the CPU dispatcher
"""
targets = (
"SSE2", "SSE41", "AVX2",
@@ -16,7 +16,7 @@ def test_dispatcher():
for feature in reversed(targets):
# skip baseline features, by the default `CCompilerOpt` do not generate separated objects
# for the baseline, just one object combined all of them via 'baseline' option
- # within the configuration statments.
+ # within the configuration statements.
if feature in __cpu_baseline__:
continue
# check compiler and running machine support
diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py
index 44c76e0b8..1d0c5dfac 100644
--- a/numpy/core/tests/test_deprecations.py
+++ b/numpy/core/tests/test_deprecations.py
@@ -791,7 +791,7 @@ class TestFutureWarningArrayLikeNotIterable(_DeprecationTestCase):
*not* define the sequence protocol.
NOTE: Tests for the versions including __len__ and __getitem__ exist
- in `test_array_coercion.py` and they can be modified or ammended
+ in `test_array_coercion.py` and they can be modified or amended
when this deprecation expired.
"""
blueprint = np.arange(10)
diff --git a/numpy/core/tests/test_einsum.py b/numpy/core/tests/test_einsum.py
index c697d0c2d..78c5e527b 100644
--- a/numpy/core/tests/test_einsum.py
+++ b/numpy/core/tests/test_einsum.py
@@ -1025,7 +1025,7 @@ class TestEinsumPath:
self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)])
def test_path_type_input(self):
- # Test explicit path handeling
+ # Test explicit path handling
path_test = self.build_operands('dcc,fce,ea,dbf->ab')
path, path_str = np.einsum_path(*path_test, optimize=False)
diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py
index 6b743ab27..fbf6da0e1 100644
--- a/numpy/core/tests/test_nditer.py
+++ b/numpy/core/tests/test_nditer.py
@@ -2819,7 +2819,7 @@ def test_iter_writemasked_decref():
for buf, mask_buf in it:
buf[...] = (3, singleton)
- del buf, mask_buf, it # delete everything to ensure corrrect cleanup
+ del buf, mask_buf, it # delete everything to ensure correct cleanup
if HAS_REFCOUNT:
# The buffer would have included additional items, they must be
@@ -3202,7 +3202,7 @@ def test_debug_print(capfd):
Currently uses a subprocess to avoid dealing with the C level `printf`s.
"""
# the expected output with all addresses and sizes stripped (they vary
- # and/or are platform dependend).
+ # and/or are platform dependent).
expected = """
------ BEGIN ITERATOR DUMP ------
| Iterator Address:
diff --git a/numpy/distutils/command/build_ext.py b/numpy/distutils/command/build_ext.py
index 84ec8aa2c..b8378d473 100644
--- a/numpy/distutils/command/build_ext.py
+++ b/numpy/distutils/command/build_ext.py
@@ -602,7 +602,7 @@ class build_ext (old_build_ext):
# Expand possible fake static libraries to objects;
# make sure to iterate over a copy of the list as
# "fake" libraries will be removed as they are
- # enountered
+ # encountered
for lib in libraries[:]:
for libdir in library_dirs:
fake_lib = os.path.join(libdir, lib + '.fobjects')
diff --git a/numpy/distutils/fcompiler/gnu.py b/numpy/distutils/fcompiler/gnu.py
index 02372f5e6..f9891e93b 100644
--- a/numpy/distutils/fcompiler/gnu.py
+++ b/numpy/distutils/fcompiler/gnu.py
@@ -113,7 +113,7 @@ class GnuFCompiler(FCompiler):
# If MACOSX_DEPLOYMENT_TARGET is set, we simply trust the value
# and leave it alone. But, distutils will complain if the
# environment's value is different from the one in the Python
- # Makefile used to build Python. We let disutils handle this
+ # Makefile used to build Python. We let distutils handle this
# error checking.
if not target:
# If MACOSX_DEPLOYMENT_TARGET is not set in the environment,
diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py
index a903f3ea3..c9e051237 100644
--- a/numpy/distutils/misc_util.py
+++ b/numpy/distutils/misc_util.py
@@ -128,8 +128,8 @@ def quote_args(args):
def allpath(name):
"Convert a /-separated pathname to one using the OS's path separator."
- splitted = name.split('/')
- return os.path.join(*splitted)
+ split = name.split('/')
+ return os.path.join(*split)
def rel_path(path, parent_path):
"""Return path relative to parent_path."""
diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py
index 8467e1c19..c0404b0e8 100644
--- a/numpy/distutils/system_info.py
+++ b/numpy/distutils/system_info.py
@@ -414,7 +414,8 @@ def get_standard_file(fname):
def _parse_env_order(base_order, env):
""" Parse an environment variable `env` by splitting with "," and only returning elements from `base_order`
- This method will sequence the environment variable and check for their invidual elements in `base_order`.
+ This method will sequence the environment variable and check for their
+ individual elements in `base_order`.
The items in the environment variable may be negated via '^item' or '!itema,itemb'.
It must start with ^/! to negate all options.
diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py
index 8f42b4029..fb1688744 100644
--- a/numpy/f2py/cfuncs.py
+++ b/numpy/f2py/cfuncs.py
@@ -487,7 +487,7 @@ STRINGPADN replaces null values with padding values from the right.
`to` must have size of at least N bytes.
If the `to[N-1]` has null value, then replace it and all the
-preceeding nulls with the given padding.
+preceding, nulls with the given padding.
STRINGPADN(to, N, PADDING, NULLVALUE) is an inverse operation.
*/
diff --git a/numpy/f2py/tests/test_return_character.py b/numpy/f2py/tests/test_return_character.py
index 7d4ced914..2c999ed0b 100644
--- a/numpy/f2py/tests/test_return_character.py
+++ b/numpy/f2py/tests/test_return_character.py
@@ -80,7 +80,7 @@ cf2py intent(out) ts
end
"""
- @pytest.mark.xfail(IS_S390X, reason="calback returns ' '")
+ @pytest.mark.xfail(IS_S390X, reason="callback returns ' '")
@pytest.mark.parametrize('name', 't0,t1,t5,s0,s1,s5,ss'.split(','))
def test_all(self, name):
self.check_function(getattr(self.module, name), name)
@@ -139,7 +139,7 @@ module f90_return_char
end module f90_return_char
"""
- @pytest.mark.xfail(IS_S390X, reason="calback returns ' '")
+ @pytest.mark.xfail(IS_S390X, reason="callback returns ' '")
@pytest.mark.parametrize('name', 't0,t1,t5,ts,s0,s1,s5,ss'.split(','))
def test_all(self, name):
self.check_function(getattr(self.module.f90_return_char, name), name)
diff --git a/numpy/lib/format.py b/numpy/lib/format.py
index e566e253d..3967b43ee 100644
--- a/numpy/lib/format.py
+++ b/numpy/lib/format.py
@@ -291,7 +291,7 @@ def descr_to_dtype(descr):
Parameters
----------
descr : object
- The object retreived by dtype.descr. Can be passed to
+ The object retrieved by dtype.descr. Can be passed to
`numpy.dtype()` in order to replicate the input dtype.
Returns
diff --git a/numpy/lib/nanfunctions.py b/numpy/lib/nanfunctions.py
index 658ec5255..4e77f0d92 100644
--- a/numpy/lib/nanfunctions.py
+++ b/numpy/lib/nanfunctions.py
@@ -975,7 +975,7 @@ def _nanmedian1d(arr1d, overwrite_input=False):
)
if arr1d_parsed.size == 0:
- # Ensure that a nan-esque scalar of the appropiate type (and unit)
+ # Ensure that a nan-esque scalar of the appropriate type (and unit)
# is returned for `timedelta64` and `complexfloating`
return arr1d[-1]
diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py
index b91bf440f..a40b1ca66 100644
--- a/numpy/lib/npyio.py
+++ b/numpy/lib/npyio.py
@@ -1634,7 +1634,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first line after
- the first `skip_header` lines. This line can optionally be preceeded
+ the first `skip_header` lines. This line can optionally be preceded
by a comment delimiter. If `names` is a sequence or a single-string of
comma-separated names, the names will be used to define the field names
in a structured dtype. If `names` is None, the names of the dtype
diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py
index 83c028061..811faff79 100644
--- a/numpy/lib/twodim_base.py
+++ b/numpy/lib/twodim_base.py
@@ -67,7 +67,7 @@ def fliplr(m):
See Also
--------
flipud : Flip array in the up/down direction.
- flip : Flip array in one or more dimesions.
+ flip : Flip array in one or more dimensions.
rot90 : Rotate array counterclockwise.
Notes
@@ -120,7 +120,7 @@ def flipud(m):
See Also
--------
fliplr : Flip array in the left/right direction.
- flip : Flip array in one or more dimesions.
+ flip : Flip array in one or more dimensions.
rot90 : Rotate array counterclockwise.
Notes
diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py
index 2b686839a..95780d19d 100644
--- a/numpy/linalg/linalg.py
+++ b/numpy/linalg/linalg.py
@@ -1864,7 +1864,7 @@ def matrix_rank(A, tol=None, hermitian=False):
References
----------
- .. [1] MATLAB reference documention, "Rank"
+ .. [1] MATLAB reference documentation, "Rank"
https://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
@@ -2159,7 +2159,7 @@ def lstsq(a, b, rcond="warn"):
r"""
Return the least-squares solution to a linear matrix equation.
- Computes the vector `x` that approximatively solves the equation
+ Computes the vector `x` that approximately solves the equation
``a @ x = b``. The equation may be under-, well-, or over-determined
(i.e., the number of linearly independent rows of `a` can be less than,
equal to, or greater than its number of linearly independent columns).
diff --git a/numpy/linalg/linalg.pyi b/numpy/linalg/linalg.pyi
index a35207d32..a60b9539e 100644
--- a/numpy/linalg/linalg.pyi
+++ b/numpy/linalg/linalg.pyi
@@ -97,7 +97,7 @@ def inv(a: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ...
@overload
def inv(a: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
-# TODO: The supported input and output dtypes are dependant on the value of `n`.
+# TODO: The supported input and output dtypes are dependent on the value of `n`.
# For example: `n < 0` always casts integer types to float64
def matrix_power(
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py
index dd059fb63..a45323bb3 100644
--- a/numpy/linalg/tests/test_linalg.py
+++ b/numpy/linalg/tests/test_linalg.py
@@ -1956,8 +1956,8 @@ class TestMultiDot:
assert_almost_equal(multi_dot([A, B]), A.dot(B))
assert_almost_equal(multi_dot([A, B]), np.dot(A, B))
- def test_basic_function_with_dynamic_programing_optimization(self):
- # multi_dot with four or more arguments uses the dynamic programing
+ def test_basic_function_with_dynamic_programming_optimization(self):
+ # multi_dot with four or more arguments uses the dynamic programming
# optimization and therefore deserve a separate
A = np.random.random((6, 2))
B = np.random.random((2, 6))
@@ -2018,8 +2018,8 @@ class TestMultiDot:
assert_almost_equal(out, A.dot(B))
assert_almost_equal(out, np.dot(A, B))
- def test_dynamic_programing_optimization_and_out(self):
- # multi_dot with four or more arguments uses the dynamic programing
+ def test_dynamic_programming_optimization_and_out(self):
+ # multi_dot with four or more arguments uses the dynamic programming
# optimization and therefore deserve a separate test
A = np.random.random((6, 2))
B = np.random.random((2, 6))
diff --git a/numpy/ma/mrecords.py b/numpy/ma/mrecords.py
index 10b1b209c..bdce8b3bd 100644
--- a/numpy/ma/mrecords.py
+++ b/numpy/ma/mrecords.py
@@ -667,7 +667,7 @@ def openfile(fname):
raise NotImplementedError("Wow, binary file")
-def fromtextfile(fname, delimitor=None, commentchar='#', missingchar='',
+def fromtextfile(fname, delimiter=None, commentchar='#', missingchar='',
varnames=None, vartypes=None):
"""
Creates a mrecarray from data stored in the file `filename`.
@@ -676,7 +676,7 @@ def fromtextfile(fname, delimitor=None, commentchar='#', missingchar='',
----------
fname : {file name/handle}
Handle of an opened file.
- delimitor : {None, string}, optional
+ delimiter : {None, string}, optional
Alphanumeric character used to separate columns in the file.
If None, any (group of) white spacestring(s) will be used.
commentchar : {'#', string}, optional
@@ -699,14 +699,14 @@ def fromtextfile(fname, delimitor=None, commentchar='#', missingchar='',
while True:
line = ftext.readline()
firstline = line[:line.find(commentchar)].strip()
- _varnames = firstline.split(delimitor)
+ _varnames = firstline.split(delimiter)
if len(_varnames) > 1:
break
if varnames is None:
varnames = _varnames
# Get the data.
- _variables = masked_array([line.strip().split(delimitor) for line in ftext
+ _variables = masked_array([line.strip().split(delimiter) for line in ftext
if line[0] != commentchar and len(line) > 1])
(_, nfields) = _variables.shape
ftext.close()
diff --git a/numpy/ma/mrecords.pyi b/numpy/ma/mrecords.pyi
index 92d5afb89..cdd5347d6 100644
--- a/numpy/ma/mrecords.pyi
+++ b/numpy/ma/mrecords.pyi
@@ -78,7 +78,7 @@ def fromrecords(
def fromtextfile(
fname,
- delimitor=...,
+ delimiter=...,
commentchar=...,
missingchar=...,
varnames=...,
diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py
index 2fd353d23..7e9522b3a 100644
--- a/numpy/ma/tests/test_core.py
+++ b/numpy/ma/tests/test_core.py
@@ -1071,7 +1071,7 @@ class TestMaskedArrayArithmetic:
assert_equal(z.mask, [[1, 1, 1], [0, 0, 0]])
def test_mixed_arithmetic(self):
- # Tests mixed arithmetics.
+ # Tests mixed arithmetic.
na = np.array([1])
ma = array([1])
assert_(isinstance(na + ma, MaskedArray))
@@ -1084,7 +1084,7 @@ class TestMaskedArrayArithmetic:
assert_equal(getmaskarray(2 / a), [1, 0, 1])
def test_masked_singleton_arithmetic(self):
- # Tests some scalar arithmetics on MaskedArrays.
+ # Tests some scalar arithmetic on MaskedArrays.
# Masked singleton should remain masked no matter what
xm = array(0, mask=1)
assert_((1 / array(0)).mask)
@@ -1804,7 +1804,7 @@ class TestMaskedArrayArithmetic:
assert_equal(test.mask, [[False, True],
[False, True]])
- def test_numpyarithmetics(self):
+ def test_numpyarithmetic(self):
# Check that the mask is not back-propagated when using numpy functions
a = masked_array([-1, 0, 1, 2, 3], mask=[0, 0, 0, 0, 1])
control = masked_array([np.nan, np.nan, 0, np.log(2), -1],
@@ -2479,8 +2479,8 @@ class TestUfuncs:
# also check that allclose uses ma ufuncs, to avoid warning
allclose(m, 0.5)
-class TestMaskedArrayInPlaceArithmetics:
- # Test MaskedArray Arithmetics
+class TestMaskedArrayInPlaceArithmetic:
+ # Test MaskedArray Arithmetic
def setup(self):
x = arange(10)
@@ -3464,7 +3464,7 @@ class TestMaskedArrayMethods:
# Test sort on dtype with subarray (gh-8069)
# Just check that the sort does not error, structured array subarrays
# are treated as byte strings and that leads to differing behavior
- # depending on endianess and `endwith`.
+ # depending on endianness and `endwith`.
dt = np.dtype([('v', int, 2)])
a = a.view(dt)
test = sort(a)
diff --git a/numpy/ma/tests/test_mrecords.py b/numpy/ma/tests/test_mrecords.py
index 27df519d2..4b2c01df9 100644
--- a/numpy/ma/tests/test_mrecords.py
+++ b/numpy/ma/tests/test_mrecords.py
@@ -468,7 +468,7 @@ class TestMRecordsImport:
with temppath() as path:
with open(path, 'w') as f:
f.write(fcontent)
- mrectxt = fromtextfile(path, delimitor=',', varnames='ABCDEFG')
+ mrectxt = fromtextfile(path, delimiter=',', varnames='ABCDEFG')
assert_(isinstance(mrectxt, MaskedRecords))
assert_equal(mrectxt.F, [1, 1, 1, 1])
assert_equal(mrectxt.E._mask, [1, 1, 1, 1])
diff --git a/numpy/polynomial/__init__.py b/numpy/polynomial/__init__.py
index 4b4361163..5a3addf4c 100644
--- a/numpy/polynomial/__init__.py
+++ b/numpy/polynomial/__init__.py
@@ -164,7 +164,7 @@ def set_default_printstyle(style):
1.0 + 2.0 x**1 + 3.0 x**2
>>> print(c)
1.0 + 2.0 T_1(x) + 3.0 T_2(x)
- >>> # Formatting supercedes all class/package-level defaults
+ >>> # Formatting supersedes all class/package-level defaults
>>> print(f"{p:unicode}")
1.0 + 2.0·x¹ + 3.0·x²
"""
diff --git a/numpy/polynomial/_polybase.py b/numpy/polynomial/_polybase.py
index 5525b232b..4b9f7c661 100644
--- a/numpy/polynomial/_polybase.py
+++ b/numpy/polynomial/_polybase.py
@@ -344,7 +344,7 @@ class ABCPolyBase(abc.ABC):
# Polynomial coefficient
# The coefficient array can be an object array with elements that
# will raise a TypeError with >= 0 (e.g. strings or Python
- # complex). In this case, represent the coeficient as-is.
+ # complex). In this case, represent the coefficient as-is.
try:
if coef >= 0:
next_term = f"+ {coef}"
diff --git a/numpy/polynomial/chebyshev.py b/numpy/polynomial/chebyshev.py
index 210000ec4..8288c6120 100644
--- a/numpy/polynomial/chebyshev.py
+++ b/numpy/polynomial/chebyshev.py
@@ -131,9 +131,9 @@ chebtrim = pu.trimcoef
#
def _cseries_to_zseries(c):
- """Covert Chebyshev series to z-series.
+ """Convert Chebyshev series to z-series.
- Covert a Chebyshev series to the equivalent z-series. The result is
+ Convert a Chebyshev series to the equivalent z-series. The result is
never an empty array. The dtype of the return is the same as that of
the input. No checks are run on the arguments as this routine is for
internal use.
@@ -156,9 +156,9 @@ def _cseries_to_zseries(c):
def _zseries_to_cseries(zs):
- """Covert z-series to a Chebyshev series.
+ """Convert z-series to a Chebyshev series.
- Covert a z series to the equivalent Chebyshev series. The result is
+ Convert a z series to the equivalent Chebyshev series. The result is
never an empty array. The dtype of the return is the same as that of
the input. No checks are run on the arguments as this routine is for
internal use.
diff --git a/numpy/polynomial/laguerre.py b/numpy/polynomial/laguerre.py
index d3b6432dc..72d068e31 100644
--- a/numpy/polynomial/laguerre.py
+++ b/numpy/polynomial/laguerre.py
@@ -1030,7 +1030,7 @@ def lagval3d(x, y, z, c):
Returns
-------
values : ndarray, compatible object
- The values of the multidimension polynomial on points formed with
+ The values of the multidimensional polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx
index 60b6bfc72..8db1f0269 100644
--- a/numpy/random/_generator.pyx
+++ b/numpy/random/_generator.pyx
@@ -3624,7 +3624,7 @@ cdef class Generator:
from numpy.linalg import cholesky
l = cholesky(cov)
- # make sure check_valid is ignored whe method == 'cholesky'
+ # make sure check_valid is ignored when method == 'cholesky'
# since the decomposition will have failed if cov is not valid.
if check_valid != 'ignore' and method != 'cholesky':
if check_valid != 'warn' and check_valid != 'raise':
diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py
index 2bea3be86..81ab1afad 100644
--- a/numpy/typing/__init__.py
+++ b/numpy/typing/__init__.py
@@ -154,7 +154,7 @@ else:
__path__: List[str]
-@final # Dissallow the creation of arbitrary `NBitBase` subclasses
+@final # Disallow the creation of arbitrary `NBitBase` subclasses
class NBitBase:
"""
An object representing `numpy.number` precision during static type checking.
diff --git a/numpy/typing/_generic_alias.py b/numpy/typing/_generic_alias.py
index 8d65ef855..5ad5e580c 100644
--- a/numpy/typing/_generic_alias.py
+++ b/numpy/typing/_generic_alias.py
@@ -51,7 +51,7 @@ def _parse_parameters(args: Iterable[Any]) -> Generator[TypeVar, None, None]:
def _reconstruct_alias(alias: _T, parameters: Iterator[TypeVar]) -> _T:
- """Recursivelly replace all typevars with those from `parameters`.
+ """Recursively replace all typevars with those from `parameters`.
Helper function for `_GenericAlias.__getitem__`.
diff --git a/numpy/typing/tests/data/fail/bitwise_ops.py b/numpy/typing/tests/data/fail/bitwise_ops.py
index 8a8f89755..ee9090007 100644
--- a/numpy/typing/tests/data/fail/bitwise_ops.py
+++ b/numpy/typing/tests/data/fail/bitwise_ops.py
@@ -16,5 +16,5 @@ u8 & f8 # E: No overload variant
~f8 # E: Unsupported operand type
# mypys' error message for `NoReturn` is unfortunately pretty bad
-# TODO: Reenable this once we add support for numerical precision for `number`s
+# TODO: Re-enable this once we add support for numerical precision for `number`s
# a = u8 | 0 # E: Need type annotation
diff --git a/numpy/typing/tests/data/fail/numerictypes.py b/numpy/typing/tests/data/fail/numerictypes.py
index 9a81cd9dc..a5c2814ef 100644
--- a/numpy/typing/tests/data/fail/numerictypes.py
+++ b/numpy/typing/tests/data/fail/numerictypes.py
@@ -1,6 +1,6 @@
import numpy as np
-# Techincally this works, but probably shouldn't. See
+# Technically this works, but probably shouldn't. See
#
# https://github.com/numpy/numpy/issues/16366
#
diff --git a/numpy/typing/tests/data/pass/simple.py b/numpy/typing/tests/data/pass/simple.py
index 243caf229..85965e0de 100644
--- a/numpy/typing/tests/data/pass/simple.py
+++ b/numpy/typing/tests/data/pass/simple.py
@@ -47,7 +47,7 @@ np.dtype(object_dtype)
np.dtype((np.int32, (np.int8, 4)))
-# Dtype comparision
+# Dtype comparison
np.dtype(float) == float
np.dtype(float) != np.float64
np.dtype(float) < None
diff --git a/numpy/typing/tests/test_typing.py b/numpy/typing/tests/test_typing.py
index 35558c880..81863c780 100644
--- a/numpy/typing/tests/test_typing.py
+++ b/numpy/typing/tests/test_typing.py
@@ -36,7 +36,7 @@ OUTPUT_MYPY: Dict[str, List[str]] = {}
def _key_func(key: str) -> str:
- """Split at the first occurance of the ``:`` character.
+ """Split at the first occurrence of the ``:`` character.
Windows drive-letters (*e.g.* ``C:``) are ignored herein.
"""
@@ -246,8 +246,8 @@ def _parse_reveals(file: IO[str]) -> List[str]:
comments_array = np.char.partition(string.split("\n"), sep=" # E: ")[:, 2]
comments = "/n".join(comments_array)
- # Only search for the `{*}` pattern within comments,
- # otherwise there is the risk of accidently grabbing dictionaries and sets
+ # Only search for the `{*}` pattern within comments, otherwise
+ # there is the risk of accidentally grabbing dictionaries and sets
key_set = set(re.findall(r"\{(.*?)\}", comments))
kwargs = {
k: FORMAT_DICT.get(k, f"<UNRECOGNIZED FORMAT KEY {k!r}>") for k in key_set