summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMatti Picus <matti.picus@gmail.com>2023-02-12 07:41:54 +0200
committerGitHub <noreply@github.com>2023-02-12 07:41:54 +0200
commit9dde0a930aa568bbdd3ba176b12d1d46c72d3d4e (patch)
treeb11aaf190594f6fa3eec452ea6ef3e095d1f7f5a
parent671de61aeaf2b4ec25f058f60f629f412d7df653 (diff)
parente1e487acf1d820cbab8a6f97986bf2fb451dfa8e (diff)
downloadnumpy-9dde0a930aa568bbdd3ba176b12d1d46c72d3d4e.tar.gz
Merge pull request #23200 from DimitriPapadopoulos/codespell
Fix typos found by copdespell
-rw-r--r--doc/release/upcoming_changes/23041.expired.rst4
-rw-r--r--numpy/__init__.py2
-rw-r--r--numpy/__init__.pyi2
-rw-r--r--numpy/core/_add_newdocs_scalars.py2
-rw-r--r--numpy/core/src/common/dlpack/dlpack.h4
-rw-r--r--numpy/core/src/common/simd/avx2/operators.h2
-rw-r--r--numpy/core/src/common/simd/neon/math.h2
-rw-r--r--numpy/core/src/common/simd/sse/math.h10
-rw-r--r--numpy/core/src/multiarray/arrayobject.c2
-rw-r--r--numpy/core/src/multiarray/dtypemeta.c2
-rw-r--r--numpy/core/src/npysort/heapsort.cpp2
-rw-r--r--numpy/core/src/npysort/mergesort.cpp2
-rw-r--r--numpy/core/src/npysort/timsort.cpp2
-rw-r--r--numpy/core/src/umath/legacy_array_method.c2
-rw-r--r--numpy/core/src/umath/loops_arithm_fp.dispatch.c.src8
-rw-r--r--numpy/core/src/umath/scalarmath.c.src2
-rw-r--r--numpy/core/src/umath/ufunc_type_resolution.c2
-rw-r--r--numpy/core/tests/test_einsum.py2
-rw-r--r--numpy/core/tests/test_multiarray.py2
-rw-r--r--numpy/core/tests/test_nep50_promotions.py2
-rw-r--r--numpy/core/tests/test_ufunc.py2
-rw-r--r--numpy/core/tests/test_umath.py2
-rw-r--r--numpy/distutils/ccompiler_opt.py4
-rw-r--r--numpy/f2py/symbolic.py4
-rw-r--r--numpy/lib/polynomial.py2
25 files changed, 36 insertions, 36 deletions
diff --git a/doc/release/upcoming_changes/23041.expired.rst b/doc/release/upcoming_changes/23041.expired.rst
index 6270083f0..9049ea70f 100644
--- a/doc/release/upcoming_changes/23041.expired.rst
+++ b/doc/release/upcoming_changes/23041.expired.rst
@@ -1,5 +1,5 @@
-Nose suppport has been removed
-------------------------------
+Nose support has been removed
+-----------------------------
NumPy switched to using pytest in 2018 and nose has been unmaintained for many
years. We have kept NumPy's nose support to avoid breaking downstream projects
who might have been using it and not yet switched to pytest or some other
diff --git a/numpy/__init__.py b/numpy/__init__.py
index 052ba7327..9c50e2493 100644
--- a/numpy/__init__.py
+++ b/numpy/__init__.py
@@ -197,7 +197,7 @@ else:
"`np.{n}` is a deprecated alias for `{an}`. (Deprecated NumPy 1.24)")
# Some of these are awkward (since `np.str` may be preferable in the long
- # term), but overall the names ending in 0 seem undesireable
+ # term), but overall the names ending in 0 seem undesirable
_type_info = [
("bool8", bool_, "np.bool_"),
("int0", intp, "np.intp"),
diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi
index 5bc8f57f4..fb041ddc4 100644
--- a/numpy/__init__.pyi
+++ b/numpy/__init__.pyi
@@ -1534,7 +1534,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeType, _DType_co]):
# NOTE: In practice any object is accepted by `obj`, but as `__array_finalize__`
# is a pseudo-abstract method the type has been narrowed down in order to
- # grant subclasses a bit more flexiblity
+ # grant subclasses a bit more flexibility
def __array_finalize__(self, obj: None | NDArray[Any], /) -> None: ...
def __array_wrap__(
diff --git a/numpy/core/_add_newdocs_scalars.py b/numpy/core/_add_newdocs_scalars.py
index 15d37522a..86fe5583c 100644
--- a/numpy/core/_add_newdocs_scalars.py
+++ b/numpy/core/_add_newdocs_scalars.py
@@ -255,7 +255,7 @@ add_newdoc_for_scalar_type('void', [],
``\0`` bytes. The 5 can be a Python or NumPy integer.
2. ``np.void(b"bytes-like")`` creates a void scalar from the byte string.
The dtype itemsize will match the byte string length, here ``"V10"``.
- 3. When a ``dtype=`` is passed the call is rougly the same as an
+ 3. When a ``dtype=`` is passed the call is roughly the same as an
array creation. However, a void scalar rather than array is returned.
Please see the examples which show all three different conventions.
diff --git a/numpy/core/src/common/dlpack/dlpack.h b/numpy/core/src/common/dlpack/dlpack.h
index a516572b8..f0cbf6136 100644
--- a/numpy/core/src/common/dlpack/dlpack.h
+++ b/numpy/core/src/common/dlpack/dlpack.h
@@ -197,7 +197,7 @@ typedef struct {
* `byte_offset` field should be used to point to the beginning of the data.
*
* Note that as of Nov 2021, multiply libraries (CuPy, PyTorch, TensorFlow,
- * TVM, perhaps others) do not adhere to this 256 byte aligment requirement
+ * TVM, perhaps others) do not adhere to this 256 byte alignment requirement
* on CPU/CUDA/ROCm, and always use `byte_offset=0`. This must be fixed
* (after which this note will be updated); at the moment it is recommended
* to not rely on the data pointer being correctly aligned.
@@ -317,4 +317,4 @@ struct DLManagedTensorVersioned {
#ifdef __cplusplus
} // DLPACK_EXTERN_C
#endif
-#endif // DLPACK_DLPACK_H_ \ No newline at end of file
+#endif // DLPACK_DLPACK_H_
diff --git a/numpy/core/src/common/simd/avx2/operators.h b/numpy/core/src/common/simd/avx2/operators.h
index 86e0038d9..7b9b6a344 100644
--- a/numpy/core/src/common/simd/avx2/operators.h
+++ b/numpy/core/src/common/simd/avx2/operators.h
@@ -205,7 +205,7 @@ NPY_FINLINE __m256i npyv_cmpge_u32(__m256i a, __m256i b)
#define npyv_cmple_u64(A, B) npyv_cmpge_u64(B, A)
#define npyv_cmple_s64(A, B) npyv_cmpge_s64(B, A)
-// precision comparison (orderd)
+// precision comparison (ordered)
#define npyv_cmpeq_f32(A, B) _mm256_castps_si256(_mm256_cmp_ps(A, B, _CMP_EQ_OQ))
#define npyv_cmpeq_f64(A, B) _mm256_castpd_si256(_mm256_cmp_pd(A, B, _CMP_EQ_OQ))
#define npyv_cmpneq_f32(A, B) _mm256_castps_si256(_mm256_cmp_ps(A, B, _CMP_NEQ_UQ))
diff --git a/numpy/core/src/common/simd/neon/math.h b/numpy/core/src/common/simd/neon/math.h
index 01344a41f..58d14809f 100644
--- a/numpy/core/src/common/simd/neon/math.h
+++ b/numpy/core/src/common/simd/neon/math.h
@@ -352,7 +352,7 @@ NPY_FINLINE npyv_f32 npyv_rint_f32(npyv_f32 a)
npyv_u32 nfinite_mask = vshlq_n_u32(vreinterpretq_u32_f32(a), 1);
nfinite_mask = vandq_u32(nfinite_mask, exp_mask);
nfinite_mask = vceqq_u32(nfinite_mask, exp_mask);
- // elminate nans/inf to avoid invalid fp errors
+ // eliminate nans/inf to avoid invalid fp errors
npyv_f32 x = vreinterpretq_f32_u32(
veorq_u32(nfinite_mask, vreinterpretq_u32_f32(a)));
/**
diff --git a/numpy/core/src/common/simd/sse/math.h b/numpy/core/src/common/simd/sse/math.h
index 6b42d8ba3..b51c935af 100644
--- a/numpy/core/src/common/simd/sse/math.h
+++ b/numpy/core/src/common/simd/sse/math.h
@@ -298,7 +298,7 @@ NPY_FINLINE npyv_f64 npyv_rint_f64(npyv_f64 a)
const __m128d szero = _mm_set1_pd(-0.0);
const __m128d two_power_52 = _mm_set1_pd(0x10000000000000);
__m128d nan_mask = _mm_cmpunord_pd(a, a);
- // elminate nans to avoid invalid fp errors within cmpge
+ // eliminate nans to avoid invalid fp errors within cmpge
__m128d abs_x = npyv_abs_f64(_mm_xor_pd(nan_mask, a));
// round by add magic number 2^52
// assuming that MXCSR register is set to rounding
@@ -344,7 +344,7 @@ NPY_FINLINE npyv_f64 npyv_rint_f64(npyv_f64 a)
const __m128d szero = _mm_set1_pd(-0.0);
const __m128d two_power_52 = _mm_set1_pd(0x10000000000000);
__m128d nan_mask = _mm_cmpunord_pd(a, a);
- // elminate nans to avoid invalid fp errors within cmpge
+ // eliminate nans to avoid invalid fp errors within cmpge
__m128d x = _mm_xor_pd(nan_mask, a);
__m128d abs_x = npyv_abs_f64(x);
__m128d sign_x = _mm_and_pd(x, szero);
@@ -377,7 +377,7 @@ NPY_FINLINE npyv_f64 npyv_rint_f64(npyv_f64 a)
nfinite_mask = _mm_and_si128(nfinite_mask, exp_mask);
nfinite_mask = _mm_cmpeq_epi32(nfinite_mask, exp_mask);
- // elminate nans/inf to avoid invalid fp errors
+ // eliminate nans/inf to avoid invalid fp errors
__m128 x = _mm_xor_ps(a, _mm_castsi128_ps(nfinite_mask));
__m128i trunci = _mm_cvttps_epi32(x);
__m128 trunc = _mm_cvtepi32_ps(trunci);
@@ -394,7 +394,7 @@ NPY_FINLINE npyv_f64 npyv_rint_f64(npyv_f64 a)
const __m128d szero = _mm_set1_pd(-0.0);
const __m128d two_power_52 = _mm_set1_pd(0x10000000000000);
__m128d nan_mask = _mm_cmpunord_pd(a, a);
- // elminate nans to avoid invalid fp errors within cmpge
+ // eliminate nans to avoid invalid fp errors within cmpge
__m128d abs_x = npyv_abs_f64(_mm_xor_pd(nan_mask, a));
// round by add magic number 2^52
// assuming that MXCSR register is set to rounding
@@ -443,7 +443,7 @@ NPY_FINLINE npyv_f64 npyv_rint_f64(npyv_f64 a)
const __m128d szero = _mm_set1_pd(-0.0f);
const __m128d two_power_52 = _mm_set1_pd(0x10000000000000);
__m128d nan_mask = _mm_cmpunord_pd(a, a);
- // elminate nans to avoid invalid fp errors within cmpge
+ // eliminate nans to avoid invalid fp errors within cmpge
__m128d x = _mm_xor_pd(nan_mask, a);
__m128d abs_x = npyv_abs_f64(x);
__m128d sign_x = _mm_and_pd(x, szero);
diff --git a/numpy/core/src/multiarray/arrayobject.c b/numpy/core/src/multiarray/arrayobject.c
index 08e2cc683..a4e49ce89 100644
--- a/numpy/core/src/multiarray/arrayobject.c
+++ b/numpy/core/src/multiarray/arrayobject.c
@@ -994,7 +994,7 @@ array_richcompare(PyArrayObject *self, PyObject *other, int cmp_op)
* TODO: If/once we correctly push structured comparisons into the ufunc
* we could consider pushing this path into the ufunc itself as a
* fallback loop (which ignores the input arrays).
- * This would have the advantage that subclasses implemementing
+ * This would have the advantage that subclasses implementing
* `__array_ufunc__` do not explicitly need `__eq__` and `__ne__`.
*/
if (result == NULL
diff --git a/numpy/core/src/multiarray/dtypemeta.c b/numpy/core/src/multiarray/dtypemeta.c
index c3b612894..437319b3b 100644
--- a/numpy/core/src/multiarray/dtypemeta.c
+++ b/numpy/core/src/multiarray/dtypemeta.c
@@ -664,7 +664,7 @@ static PyArray_DTypeMeta *
datetime_common_dtype(PyArray_DTypeMeta *cls, PyArray_DTypeMeta *other)
{
/*
- * Timedelta/datetime shouldn't actuall promote at all. That they
+ * Timedelta/datetime shouldn't actually promote at all. That they
* currently do means that we need additional hacks in the comparison
* type resolver. For comparisons we have to make sure we reject it
* nicely in order to return an array of True/False values.
diff --git a/numpy/core/src/npysort/heapsort.cpp b/numpy/core/src/npysort/heapsort.cpp
index de39367c2..3956de51f 100644
--- a/numpy/core/src/npysort/heapsort.cpp
+++ b/numpy/core/src/npysort/heapsort.cpp
@@ -21,7 +21,7 @@
*
* The merge sort is *stable*, meaning that equal components
* are unmoved from their entry versions, so it can be used to
- * implement lexigraphic sorting on multiple keys.
+ * implement lexicographic sorting on multiple keys.
*
* The heap sort is included for completeness.
*/
diff --git a/numpy/core/src/npysort/mergesort.cpp b/numpy/core/src/npysort/mergesort.cpp
index f892dd185..60d89ddb7 100644
--- a/numpy/core/src/npysort/mergesort.cpp
+++ b/numpy/core/src/npysort/mergesort.cpp
@@ -21,7 +21,7 @@
*
* The merge sort is *stable*, meaning that equal components
* are unmoved from their entry versions, so it can be used to
- * implement lexigraphic sorting on multiple keys.
+ * implement lexicographic sorting on multiple keys.
*
* The heap sort is included for completeness.
*/
diff --git a/numpy/core/src/npysort/timsort.cpp b/numpy/core/src/npysort/timsort.cpp
index 27294af0c..9438fb293 100644
--- a/numpy/core/src/npysort/timsort.cpp
+++ b/numpy/core/src/npysort/timsort.cpp
@@ -21,7 +21,7 @@
*
* The merge sort is *stable*, meaning that equal components
* are unmoved from their entry versions, so it can be used to
- * implement lexigraphic sorting on multiple keys.
+ * implement lexicographic sorting on multiple keys.
*
* The heap sort is included for completeness.
*/
diff --git a/numpy/core/src/umath/legacy_array_method.c b/numpy/core/src/umath/legacy_array_method.c
index 39b66a0ec..dd56e13a1 100644
--- a/numpy/core/src/umath/legacy_array_method.c
+++ b/numpy/core/src/umath/legacy_array_method.c
@@ -259,7 +259,7 @@ copy_cached_initial(
*
* For internal number dtypes, we can easily cache it, so do so after the
* first call by overriding the function with `copy_cache_initial`.
- * This path is not publically available. That could be added, and for a
+ * This path is not publicly available. That could be added, and for a
* custom initial getter it should be static/compile time data anyway.
*/
static int
diff --git a/numpy/core/src/umath/loops_arithm_fp.dispatch.c.src b/numpy/core/src/umath/loops_arithm_fp.dispatch.c.src
index 4aea88c02..3ab5a968d 100644
--- a/numpy/core/src/umath/loops_arithm_fp.dispatch.c.src
+++ b/numpy/core/src/umath/loops_arithm_fp.dispatch.c.src
@@ -466,7 +466,7 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@)
const int loadable0 = npyv_loadable_stride_s64(ssrc0);
const int loadable1 = npyv_loadable_stride_s64(ssrc1);
- const int storeable = npyv_storable_stride_s64(sdst);
+ const int storable = npyv_storable_stride_s64(sdst);
// lots**lots of specializations, to squeeze out max performance
// contig
@@ -512,7 +512,7 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@)
}
}
// non-contig
- else if (loadable1 && storeable) {
+ else if (loadable1 && storable) {
for (; len >= vstep; len -= vstep, src1 += ssrc1*vstep, dst += sdst*vstep) {
npyv_@sfx@ b0 = npyv_loadn2_@sfx@(src1, ssrc1);
npyv_@sfx@ b1 = npyv_loadn2_@sfx@(src1 + ssrc1*hstep, ssrc1);
@@ -558,7 +558,7 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@)
}
}
// non-contig
- else if (loadable0 && storeable) {
+ else if (loadable0 && storable) {
for (; len >= vstep; len -= vstep, src0 += ssrc0*vstep, dst += sdst*vstep) {
npyv_@sfx@ a0 = npyv_loadn2_@sfx@(src0, ssrc0);
npyv_@sfx@ a1 = npyv_loadn2_@sfx@(src0 + ssrc0*hstep, ssrc0);
@@ -583,7 +583,7 @@ NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@)
}
#if @is_mul@
// non-contig
- else if (loadable0 && loadable1 && storeable) {
+ else if (loadable0 && loadable1 && storable) {
for (; len >= vstep; len -= vstep, src0 += ssrc0*vstep,
src1 += ssrc1*vstep, dst += sdst*vstep
) {
diff --git a/numpy/core/src/umath/scalarmath.c.src b/numpy/core/src/umath/scalarmath.c.src
index 47d42b899..a159fdc12 100644
--- a/numpy/core/src/umath/scalarmath.c.src
+++ b/numpy/core/src/umath/scalarmath.c.src
@@ -806,7 +806,7 @@ typedef enum {
*/
CONVERT_PYSCALAR,
/*
- * Other object is an unkown scalar or array-like, we (typically) use
+ * Other object is an unknown scalar or array-like, we (typically) use
* the generic path, which normally ends up in the ufunc machinery.
*/
OTHER_IS_UNKNOWN_OBJECT,
diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c
index a0a16a0f9..12187d059 100644
--- a/numpy/core/src/umath/ufunc_type_resolution.c
+++ b/numpy/core/src/umath/ufunc_type_resolution.c
@@ -362,7 +362,7 @@ PyUFunc_SimpleBinaryComparisonTypeResolver(PyUFuncObject *ufunc,
&& PyArray_ISDATETIME(operands[1])
&& type_num1 != type_num2) {
/*
- * Reject mixed datetime and timedelta explictly, this was always
+ * Reject mixed datetime and timedelta explicitly, this was always
* implicitly rejected because casting fails (except with
* `casting="unsafe"` admittedly).
* This is required to ensure that `==` and `!=` can correctly
diff --git a/numpy/core/tests/test_einsum.py b/numpy/core/tests/test_einsum.py
index 7c0e8d97c..043785782 100644
--- a/numpy/core/tests/test_einsum.py
+++ b/numpy/core/tests/test_einsum.py
@@ -755,7 +755,7 @@ class TestEinsum:
# Test originally added to cover broken float16 path: gh-20305
# Likely most are covered elsewhere, at least partially.
dtype = np.dtype(dtype)
- # Simple test, designed to excersize most specialized code paths,
+ # Simple test, designed to exercise most specialized code paths,
# note the +0.5 for floats. This makes sure we use a float value
# where the results must be exact.
arr = (np.arange(7) + 0.5).astype(dtype)
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index b32239f9b..a7b72d54f 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -9534,7 +9534,7 @@ def test_equal_override():
@pytest.mark.parametrize("op", [operator.eq, operator.ne])
@pytest.mark.parametrize(["dt1", "dt2"], [
- ([("f", "i")], [("f", "i")]), # structured comparison (successfull)
+ ([("f", "i")], [("f", "i")]), # structured comparison (successful)
("M8", "d"), # impossible comparison: result is all True or False
("d", "d"), # valid comparison
])
diff --git a/numpy/core/tests/test_nep50_promotions.py b/numpy/core/tests/test_nep50_promotions.py
index 3c0316960..10d91aa31 100644
--- a/numpy/core/tests/test_nep50_promotions.py
+++ b/numpy/core/tests/test_nep50_promotions.py
@@ -131,7 +131,7 @@ def test_nep50_weak_integers_with_inexact(dtype):
@pytest.mark.parametrize("op", [operator.add, operator.pow, operator.eq])
def test_weak_promotion_scalar_path(op):
- # Some additional paths excercising the weak scalars.
+ # Some additional paths exercising the weak scalars.
np._set_promotion_state("weak")
# Integer path:
diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py
index 27bb12377..498a654c8 100644
--- a/numpy/core/tests/test_ufunc.py
+++ b/numpy/core/tests/test_ufunc.py
@@ -1987,7 +1987,7 @@ class TestUfunc:
# second operand cannot be converted to an array
np.add.at(a, [2, 5, 3], [[1, 2], 1])
- # ufuncs with indexed loops for perfomance in ufunc.at
+ # ufuncs with indexed loops for performance in ufunc.at
indexed_ufuncs = [np.add, np.subtract, np.multiply, np.floor_divide,
np.maximum, np.minimum, np.fmax, np.fmin]
diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py
index eb5cecbe4..e504ddd6e 100644
--- a/numpy/core/tests/test_umath.py
+++ b/numpy/core/tests/test_umath.py
@@ -424,7 +424,7 @@ class TestDivision:
def test_division_int_boundary(self, dtype, ex_val):
fo = np.iinfo(dtype)
neg = -1 if fo.min < 0 else 1
- # Large enough to test SIMD loops and remaind elements
+ # Large enough to test SIMD loops and remainder elements
lsize = 512 + 7
a, b, divisors = eval(ex_val)
a_lst, b_lst = a.tolist(), b.tolist()
diff --git a/numpy/distutils/ccompiler_opt.py b/numpy/distutils/ccompiler_opt.py
index da550722c..4bb0dd008 100644
--- a/numpy/distutils/ccompiler_opt.py
+++ b/numpy/distutils/ccompiler_opt.py
@@ -1167,7 +1167,7 @@ class _CCompiler:
continue
lower_flags = flags[:-(i+1)]
upper_flags = flags[-i:]
- filterd = list(filter(
+ filtered = list(filter(
self._cc_normalize_unix_frgx.search, lower_flags
))
# gather subflags
@@ -1179,7 +1179,7 @@ class _CCompiler:
subflags = xsubflags + subflags
cur_flag = arch + '+' + '+'.join(subflags)
- flags = filterd + [cur_flag]
+ flags = filtered + [cur_flag]
if i > 0:
flags += upper_flags
break
diff --git a/numpy/f2py/symbolic.py b/numpy/f2py/symbolic.py
index c2ab0f140..b1b9f5b6a 100644
--- a/numpy/f2py/symbolic.py
+++ b/numpy/f2py/symbolic.py
@@ -801,7 +801,7 @@ def normalize(obj):
else:
_pairs_add(d, t, c)
if len(d) == 0:
- # TODO: deterimine correct kind
+ # TODO: determine correct kind
return as_number(0)
elif len(d) == 1:
(t, c), = d.items()
@@ -836,7 +836,7 @@ def normalize(obj):
else:
_pairs_add(d, b, e)
if len(d) == 0 or coeff == 0:
- # TODO: deterimine correct kind
+ # TODO: determine correct kind
assert isinstance(coeff, number_types)
return as_number(coeff)
elif len(d) == 1:
diff --git a/numpy/lib/polynomial.py b/numpy/lib/polynomial.py
index fb036108a..3b8db2a95 100644
--- a/numpy/lib/polynomial.py
+++ b/numpy/lib/polynomial.py
@@ -104,7 +104,7 @@ def poly(seq_of_zeros):
References
----------
- .. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry,
+ .. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trigonometry,
Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
.. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"