summaryrefslogtreecommitdiff
path: root/numpy/core/src
diff options
context:
space:
mode:
Diffstat (limited to 'numpy/core/src')
-rw-r--r--numpy/core/src/_simd/_simd_inc.h.src2
-rw-r--r--numpy/core/src/common/npy_cpu_dispatch.h2
-rw-r--r--numpy/core/src/common/npy_cpu_features.c.src2
-rw-r--r--numpy/core/src/common/npy_cpuinfo_parser.h4
-rw-r--r--numpy/core/src/common/simd/emulate_maskop.h2
-rw-r--r--numpy/core/src/common/simd/intdiv.h2
-rw-r--r--numpy/core/src/common/simd/neon/math.h2
-rw-r--r--numpy/core/src/common/simd/vsx/operators.h2
-rw-r--r--numpy/core/src/multiarray/abstractdtypes.c2
-rw-r--r--numpy/core/src/multiarray/array_coercion.c4
-rw-r--r--numpy/core/src/multiarray/arrayobject.c4
-rw-r--r--numpy/core/src/multiarray/convert_datatype.c4
-rw-r--r--numpy/core/src/multiarray/datetime.c2
-rw-r--r--numpy/core/src/multiarray/dtypemeta.c2
-rw-r--r--numpy/core/src/multiarray/einsum_sumprod.c.src18
-rw-r--r--numpy/core/src/multiarray/lowlevel_strided_loops.c.src2
-rw-r--r--numpy/core/src/multiarray/nditer_api.c6
-rw-r--r--numpy/core/src/multiarray/nditer_constr.c2
-rw-r--r--numpy/core/src/multiarray/nditer_templ.c.src2
-rw-r--r--numpy/core/src/umath/_scaled_float_dtype.c6
-rw-r--r--numpy/core/src/umath/_umath_tests.c.src2
-rw-r--r--numpy/core/src/umath/_umath_tests.dispatch.c2
-rw-r--r--numpy/core/src/umath/loops_utils.h.src2
23 files changed, 39 insertions, 39 deletions
diff --git a/numpy/core/src/_simd/_simd_inc.h.src b/numpy/core/src/_simd/_simd_inc.h.src
index 9858fc0dc..fbdf982c2 100644
--- a/numpy/core/src/_simd/_simd_inc.h.src
+++ b/numpy/core/src/_simd/_simd_inc.h.src
@@ -113,7 +113,7 @@ typedef struct
int is_scalar:1;
// returns '1' if the type represent a vector
int is_vector:1;
- // returns the len of multi-vector if the type reprsent x2 or x3 vector
+ // returns the len of multi-vector if the type represent x2 or x3 vector
// otherwise returns 0, e.g. returns 2 if data type is simd_data_vu8x2
int is_vectorx;
// returns the equivalent scalar data type e.g. simd_data_vu8 -> simd_data_u8
diff --git a/numpy/core/src/common/npy_cpu_dispatch.h b/numpy/core/src/common/npy_cpu_dispatch.h
index 8c2b40c27..e814cd425 100644
--- a/numpy/core/src/common/npy_cpu_dispatch.h
+++ b/numpy/core/src/common/npy_cpu_dispatch.h
@@ -196,7 +196,7 @@
* Example:
* Assume we have a dispatch-able source exporting the following function:
*
- * @targets baseline avx2 avx512_skx // configration statements
+ * @targets baseline avx2 avx512_skx // configuration statements
*
* void NPY_CPU_DISPATCH_CURFX(dispatch_me)(const int *src, int *dst)
* {
diff --git a/numpy/core/src/common/npy_cpu_features.c.src b/numpy/core/src/common/npy_cpu_features.c.src
index 1e0f4a571..a2383c45f 100644
--- a/numpy/core/src/common/npy_cpu_features.c.src
+++ b/numpy/core/src/common/npy_cpu_features.c.src
@@ -230,7 +230,7 @@ npy__cpu_try_disable_env(void)
notsupp_cur[flen] = ' '; notsupp_cur += flen + 1;
goto next;
}
- // Finaly we can disable it
+ // Finally we can disable it
npy__cpu_have[feature_id] = 0;
next:
feature = strtok(NULL, delim);
diff --git a/numpy/core/src/common/npy_cpuinfo_parser.h b/numpy/core/src/common/npy_cpuinfo_parser.h
index 9e85e3a2f..364873a23 100644
--- a/numpy/core/src/common/npy_cpuinfo_parser.h
+++ b/numpy/core/src/common/npy_cpuinfo_parser.h
@@ -123,7 +123,7 @@ read_file(const char* pathname, char* buffer, size_t buffsize)
}
/*
- * Extract the content of a the first occurence of a given field in
+ * Extract the content of a the first occurrence of a given field in
* the content of /proc/cpuinfo and return it as a heap-allocated
* string that must be freed by the caller.
*
@@ -138,7 +138,7 @@ extract_cpuinfo_field(const char* buffer, int buflen, const char* field)
int len;
const char *p, *q;
- /* Look for first field occurence, and ensures it starts the line. */
+ /* Look for first field occurrence, and ensures it starts the line. */
p = buffer;
for (;;) {
p = memmem(p, bufend-p, field, fieldlen);
diff --git a/numpy/core/src/common/simd/emulate_maskop.h b/numpy/core/src/common/simd/emulate_maskop.h
index 7e7446bc5..41e397c2d 100644
--- a/numpy/core/src/common/simd/emulate_maskop.h
+++ b/numpy/core/src/common/simd/emulate_maskop.h
@@ -1,5 +1,5 @@
/**
- * This header is used internaly by all current supported SIMD extention,
+ * This header is used internally by all current supported SIMD extensions,
* execpt for AVX512.
*/
#ifndef NPY_SIMD
diff --git a/numpy/core/src/common/simd/intdiv.h b/numpy/core/src/common/simd/intdiv.h
index f6ea9abf2..5d2ab2906 100644
--- a/numpy/core/src/common/simd/intdiv.h
+++ b/numpy/core/src/common/simd/intdiv.h
@@ -39,7 +39,7 @@
* for (; len >= vstep; src += vstep, dst += vstep, len -= vstep) {
* npyv_s32 a = npyv_load_s32(*src); // load s32 vector from memory
* a = npyv_divc_s32(a, divisor); // divide all elements by x
- * npyv_store_s32(dst, a); // store s32 vector into memroy
+ * npyv_store_s32(dst, a); // store s32 vector into memory
* }
*
** NOTES:
diff --git a/numpy/core/src/common/simd/neon/math.h b/numpy/core/src/common/simd/neon/math.h
index ced82d1de..19ea6f22f 100644
--- a/numpy/core/src/common/simd/neon/math.h
+++ b/numpy/core/src/common/simd/neon/math.h
@@ -31,7 +31,7 @@ NPY_FINLINE npyv_f32 npyv_square_f32(npyv_f32 a)
const npyv_f32 zero = vdupq_n_f32(0.0f);
const npyv_u32 pinf = vdupq_n_u32(0x7f800000);
npyv_u32 is_zero = vceqq_f32(a, zero), is_inf = vceqq_u32(vreinterpretq_u32_f32(a), pinf);
- // guard agianst floating-point division-by-zero error
+ // guard against floating-point division-by-zero error
npyv_f32 guard_byz = vbslq_f32(is_zero, vreinterpretq_f32_u32(pinf), a);
// estimate to (1/√a)
npyv_f32 rsqrte = vrsqrteq_f32(guard_byz);
diff --git a/numpy/core/src/common/simd/vsx/operators.h b/numpy/core/src/common/simd/vsx/operators.h
index 23c5d0dbe..d34057ff3 100644
--- a/numpy/core/src/common/simd/vsx/operators.h
+++ b/numpy/core/src/common/simd/vsx/operators.h
@@ -103,7 +103,7 @@ NPYV_IMPL_VSX_BIN_B64(or)
NPYV_IMPL_VSX_BIN_B64(xor)
// NOT
-// note: we implement npyv_not_b*(boolen types) for internal use*/
+// note: we implement npyv_not_b*(boolean types) for internal use*/
#define NPYV_IMPL_VSX_NOT_INT(VEC_LEN) \
NPY_FINLINE npyv_u##VEC_LEN npyv_not_u##VEC_LEN(npyv_u##VEC_LEN a) \
{ return vec_nor(a, a); } \
diff --git a/numpy/core/src/multiarray/abstractdtypes.c b/numpy/core/src/multiarray/abstractdtypes.c
index 99573f089..cc1d7fad8 100644
--- a/numpy/core/src/multiarray/abstractdtypes.c
+++ b/numpy/core/src/multiarray/abstractdtypes.c
@@ -157,7 +157,7 @@ int_common_dtype(PyArray_DTypeMeta *NPY_UNUSED(cls), PyArray_DTypeMeta *other)
}
else if (PyTypeNum_ISNUMBER(other->type_num) ||
other->type_num == NPY_TIMEDELTA) {
- /* All other numeric types (ant timdelta) are preserved: */
+ /* All other numeric types (ant timedelta) are preserved: */
Py_INCREF(other);
return other;
}
diff --git a/numpy/core/src/multiarray/array_coercion.c b/numpy/core/src/multiarray/array_coercion.c
index 90b50097a..847bdafc3 100644
--- a/numpy/core/src/multiarray/array_coercion.c
+++ b/numpy/core/src/multiarray/array_coercion.c
@@ -136,7 +136,7 @@ _prime_global_pytype_to_type_dict(void)
*
* This assumes that the DType class is guaranteed to hold on the
* python type (this assumption is guaranteed).
- * This functionality supercedes ``_typenum_fromtypeobj``.
+ * This functionality supersedes ``_typenum_fromtypeobj``.
*
* @param DType DType to map the python type to
* @param pytype Python type to map from
@@ -1400,7 +1400,7 @@ PyArray_DiscoverDTypeAndShape(
* These should be largely deprecated, and represent only the DType class
* for most `dtype` parameters.
*
- * TODO: This function should eventually recieve a deprecation warning and
+ * TODO: This function should eventually receive a deprecation warning and
* be removed.
*
* @param descr
diff --git a/numpy/core/src/multiarray/arrayobject.c b/numpy/core/src/multiarray/arrayobject.c
index d653bfc22..9b9df08f2 100644
--- a/numpy/core/src/multiarray/arrayobject.c
+++ b/numpy/core/src/multiarray/arrayobject.c
@@ -858,7 +858,7 @@ _uni_release(char *ptr, int nc)
relfunc(aptr, N1); \
return -1; \
} \
- val = compfunc(aptr, bptr, N1, N2); \
+ val = compfunc(aptr, bptr, N1, N2); \
*dptr = (val CMP 0); \
PyArray_ITER_NEXT(iself); \
PyArray_ITER_NEXT(iother); \
@@ -870,7 +870,7 @@ _uni_release(char *ptr, int nc)
#define _reg_loop(CMP) { \
while(size--) { \
- val = compfunc((void *)iself->dataptr, \
+ val = compfunc((void *)iself->dataptr, \
(void *)iother->dataptr, \
N1, N2); \
*dptr = (val CMP 0); \
diff --git a/numpy/core/src/multiarray/convert_datatype.c b/numpy/core/src/multiarray/convert_datatype.c
index b6755e91d..12dd99504 100644
--- a/numpy/core/src/multiarray/convert_datatype.c
+++ b/numpy/core/src/multiarray/convert_datatype.c
@@ -1649,7 +1649,7 @@ PyArray_ResultType(
}
Py_INCREF(all_DTypes[i_all]);
/*
- * Leave the decriptor empty, if we need it, we will have to go
+ * Leave the descriptor empty, if we need it, we will have to go
* to more extreme lengths unfortunately.
*/
all_descriptors[i_all] = NULL;
@@ -2243,7 +2243,7 @@ PyArray_AddCastingImplementation(PyBoundArrayMethodObject *meth)
* Add a new casting implementation using a PyArrayMethod_Spec.
*
* @param spec
- * @param private If private, allow slots not publically exposed.
+ * @param private If private, allow slots not publicly exposed.
* @return 0 on success -1 on failure
*/
NPY_NO_EXPORT int
diff --git a/numpy/core/src/multiarray/datetime.c b/numpy/core/src/multiarray/datetime.c
index 093090b4c..11a941e72 100644
--- a/numpy/core/src/multiarray/datetime.c
+++ b/numpy/core/src/multiarray/datetime.c
@@ -427,7 +427,7 @@ PyArray_DatetimeStructToDatetime(
}
/*NUMPY_API
- * Create a timdelta value from a filled timedelta struct and resolution unit.
+ * Create a timedelta value from a filled timedelta struct and resolution unit.
*
* TO BE REMOVED - NOT USED INTERNALLY.
*/
diff --git a/numpy/core/src/multiarray/dtypemeta.c b/numpy/core/src/multiarray/dtypemeta.c
index 059ec201e..cbde91b76 100644
--- a/numpy/core/src/multiarray/dtypemeta.c
+++ b/numpy/core/src/multiarray/dtypemeta.c
@@ -101,7 +101,7 @@ static PyObject *
legacy_dtype_default_new(PyArray_DTypeMeta *self,
PyObject *args, PyObject *kwargs)
{
- /* TODO: This should allow endianess and possibly metadata */
+ /* TODO: This should allow endianness and possibly metadata */
if (NPY_DT_is_parametric(self)) {
/* reject parametric ones since we would need to get unit, etc. info */
PyErr_Format(PyExc_TypeError,
diff --git a/numpy/core/src/multiarray/einsum_sumprod.c.src b/numpy/core/src/multiarray/einsum_sumprod.c.src
index 333b8e188..29ceabd71 100644
--- a/numpy/core/src/multiarray/einsum_sumprod.c.src
+++ b/numpy/core/src/multiarray/einsum_sumprod.c.src
@@ -80,7 +80,7 @@ static NPY_GCC_OPT_3 @temptype@ @name@_sum_of_arr(@type@ *data, npy_intp count)
/* Use aligned instructions if possible */
const int is_aligned = EINSUM_IS_ALIGNED(data);
const int vstep = npyv_nlanes_@sfx@;
- npyv_@sfx@ vaccum = npyv_zero_@sfx@();
+ npyv_@sfx@ v_accum = npyv_zero_@sfx@();
const npy_intp vstepx4 = vstep * 4;
/**begin repeat1
@@ -98,15 +98,15 @@ static NPY_GCC_OPT_3 @temptype@ @name@_sum_of_arr(@type@ *data, npy_intp count)
npyv_@sfx@ a01 = npyv_add_@sfx@(a0, a1);
npyv_@sfx@ a23 = npyv_add_@sfx@(a2, a3);
npyv_@sfx@ a0123 = npyv_add_@sfx@(a01, a23);
- vaccum = npyv_add_@sfx@(a0123, vaccum);
+ v_accum = npyv_add_@sfx@(a0123, v_accum);
}
}
/**end repeat1**/
for (; count > 0; count -= vstep, data += vstep) {
npyv_@sfx@ a = npyv_load_tillz_@sfx@(data, count);
- vaccum = npyv_add_@sfx@(a, vaccum);
+ v_accum = npyv_add_@sfx@(a, v_accum);
}
- accum = npyv_sum_@sfx@(vaccum);
+ accum = npyv_sum_@sfx@(v_accum);
npyv_cleanup();
#else
#ifndef NPY_DISABLE_OPTIMIZATION
@@ -485,7 +485,7 @@ static NPY_GCC_OPT_3 void
/* Use aligned instructions if possible */
const int is_aligned = EINSUM_IS_ALIGNED(data0) && EINSUM_IS_ALIGNED(data1);
const int vstep = npyv_nlanes_@sfx@;
- npyv_@sfx@ vaccum = npyv_zero_@sfx@();
+ npyv_@sfx@ v_accum = npyv_zero_@sfx@();
/**begin repeat2
* #cond = if(is_aligned), else#
@@ -501,19 +501,19 @@ static NPY_GCC_OPT_3 void
npyv_@sfx@ a@i@ = npyv_@ld@_@sfx@(data0 + vstep * @i@);
npyv_@sfx@ b@i@ = npyv_@ld@_@sfx@(data1 + vstep * @i@);
/**end repeat3**/
- npyv_@sfx@ ab3 = npyv_muladd_@sfx@(a3, b3, vaccum);
+ npyv_@sfx@ ab3 = npyv_muladd_@sfx@(a3, b3, v_accum);
npyv_@sfx@ ab2 = npyv_muladd_@sfx@(a2, b2, ab3);
npyv_@sfx@ ab1 = npyv_muladd_@sfx@(a1, b1, ab2);
- vaccum = npyv_muladd_@sfx@(a0, b0, ab1);
+ v_accum = npyv_muladd_@sfx@(a0, b0, ab1);
}
}
/**end repeat2**/
for (; count > 0; count -= vstep, data0 += vstep, data1 += vstep) {
npyv_@sfx@ a = npyv_load_tillz_@sfx@(data0, count);
npyv_@sfx@ b = npyv_load_tillz_@sfx@(data1, count);
- vaccum = npyv_muladd_@sfx@(a, b, vaccum);
+ v_accum = npyv_muladd_@sfx@(a, b, v_accum);
}
- accum = npyv_sum_@sfx@(vaccum);
+ accum = npyv_sum_@sfx@(v_accum);
npyv_cleanup();
#else
#ifndef NPY_DISABLE_OPTIMIZATION
diff --git a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
index b32664cc9..e313d2447 100644
--- a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
+++ b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
@@ -1849,7 +1849,7 @@ mapiter_@name@(PyArrayMapIterObject *mit)
return -1;
}
#else
- /* The operand order is reveresed here */
+ /* The operand order is reversed here */
char *args[2] = {subspace_ptrs[1], subspace_ptrs[0]};
npy_intp strides[2] = {subspace_strides[1], subspace_strides[0]};
if (NPY_UNLIKELY(cast_info.func(&cast_info.context,
diff --git a/numpy/core/src/multiarray/nditer_api.c b/numpy/core/src/multiarray/nditer_api.c
index 811eece7d..0f0a79ddf 100644
--- a/numpy/core/src/multiarray/nditer_api.c
+++ b/numpy/core/src/multiarray/nditer_api.c
@@ -2130,7 +2130,7 @@ npyiter_copy_to_buffers(NpyIter *iter, char **prev_dataptrs)
/*
* Try to do make the outersize as big as possible. This allows
* it to shrink when processing the last bit of the outer reduce loop,
- * then grow again at the beginnning of the next outer reduce loop.
+ * then grow again at the beginning of the next outer reduce loop.
*/
NBF_REDUCE_OUTERSIZE(bufferdata) = (NAD_SHAPE(reduce_outeraxisdata)-
NAD_INDEX(reduce_outeraxisdata));
@@ -2804,9 +2804,9 @@ npyiter_checkreducesize(NpyIter *iter, npy_intp count,
if (coord != 0) {
/*
* In this case, it is only safe to reuse the buffer if the amount
- * of data copied is not more then the current axes, as is the
+ * of data copied is not more than the current axes, as is the
* case when reuse_reduce_loops was active already.
- * It should be in principle OK when the idim loop returns immidiatly.
+ * It should be in principle OK when the idim loop returns immediately.
*/
NIT_ITFLAGS(iter) &= ~NPY_ITFLAG_REUSE_REDUCE_LOOPS;
}
diff --git a/numpy/core/src/multiarray/nditer_constr.c b/numpy/core/src/multiarray/nditer_constr.c
index 57dbb3a94..bf32e1f6b 100644
--- a/numpy/core/src/multiarray/nditer_constr.c
+++ b/numpy/core/src/multiarray/nditer_constr.c
@@ -1405,7 +1405,7 @@ check_mask_for_writemasked_reduction(NpyIter *iter, int iop)
/*
* Check whether a reduction is OK based on the flags and the operand being
* readwrite. This path is deprecated, since usually only specific axes
- * should be reduced. If axes are specified explicitely, the flag is
+ * should be reduced. If axes are specified explicitly, the flag is
* unnecessary.
*/
static int
diff --git a/numpy/core/src/multiarray/nditer_templ.c.src b/numpy/core/src/multiarray/nditer_templ.c.src
index 05ce6ae75..3f91a482b 100644
--- a/numpy/core/src/multiarray/nditer_templ.c.src
+++ b/numpy/core/src/multiarray/nditer_templ.c.src
@@ -132,7 +132,7 @@ npyiter_iternext_itflags@tag_itflags@_dims@tag_ndim@_iters@tag_nop@(
/* Reset the 1st and 2nd indices to 0 */
NAD_INDEX(axisdata0) = 0;
NAD_INDEX(axisdata1) = 0;
- /* Reset the 1st and 2nd pointers to the value of the 3nd */
+ /* Reset the 1st and 2nd pointers to the value of the 3rd */
for (istrides = 0; istrides < nstrides; ++istrides) {
NAD_PTRS(axisdata0)[istrides] = NAD_PTRS(axisdata2)[istrides];
NAD_PTRS(axisdata1)[istrides] = NAD_PTRS(axisdata2)[istrides];
diff --git a/numpy/core/src/umath/_scaled_float_dtype.c b/numpy/core/src/umath/_scaled_float_dtype.c
index 866f636a0..eeef33a3d 100644
--- a/numpy/core/src/umath/_scaled_float_dtype.c
+++ b/numpy/core/src/umath/_scaled_float_dtype.c
@@ -733,9 +733,9 @@ NPY_NO_EXPORT PyObject *
get_sfloat_dtype(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(args))
{
/* Allow calling the function multiple times. */
- static npy_bool initalized = NPY_FALSE;
+ static npy_bool initialized = NPY_FALSE;
- if (initalized) {
+ if (initialized) {
Py_INCREF(&PyArray_SFloatDType);
return (PyObject *)&PyArray_SFloatDType;
}
@@ -764,6 +764,6 @@ get_sfloat_dtype(PyObject *NPY_UNUSED(mod), PyObject *NPY_UNUSED(args))
return NULL;
}
- initalized = NPY_TRUE;
+ initialized = NPY_TRUE;
return (PyObject *)&PyArray_SFloatDType;
}
diff --git a/numpy/core/src/umath/_umath_tests.c.src b/numpy/core/src/umath/_umath_tests.c.src
index 0cd673831..ed4c617a4 100644
--- a/numpy/core/src/umath/_umath_tests.c.src
+++ b/numpy/core/src/umath/_umath_tests.c.src
@@ -586,7 +586,7 @@ fail:
return NULL;
}
-// Testing the utilites of the CPU dispatcher
+// Testing the utilities of the CPU dispatcher
#ifndef NPY_DISABLE_OPTIMIZATION
#include "_umath_tests.dispatch.h"
#endif
diff --git a/numpy/core/src/umath/_umath_tests.dispatch.c b/numpy/core/src/umath/_umath_tests.dispatch.c
index 66058550e..9d8df4c86 100644
--- a/numpy/core/src/umath/_umath_tests.dispatch.c
+++ b/numpy/core/src/umath/_umath_tests.dispatch.c
@@ -1,5 +1,5 @@
/**
- * Testing the utilites of the CPU dispatcher
+ * Testing the utilities of the CPU dispatcher
*
* @targets $werror baseline
* SSE2 SSE41 AVX2
diff --git a/numpy/core/src/umath/loops_utils.h.src b/numpy/core/src/umath/loops_utils.h.src
index 1a2a5a32b..762e9ee59 100644
--- a/numpy/core/src/umath/loops_utils.h.src
+++ b/numpy/core/src/umath/loops_utils.h.src
@@ -6,7 +6,7 @@
/**
* Old versions of MSVC causes ambiguous link errors when we deal with large SIMD kernels
- * which lead to break the build, probably releated to the following bug:
+ * which lead to break the build, probably related to the following bug:
* https://developercommunity.visualstudio.com/content/problem/415095/internal-compiler-error-with-perfectly-forwarded-r.html
*/
#if defined(_MSC_VER) && _MSC_VER < 1916