summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCharles Harris <charlesr.harris@gmail.com>2021-05-10 12:24:28 -0600
committerGitHub <noreply@github.com>2021-05-10 12:24:28 -0600
commit7c4b2482a9ae8e1587871665fe8c0ea622b5f360 (patch)
tree7432d6046d271d17f23fb5a484fe918d8249ce1e
parentc0f858fb934adbb30fd07b5984e185a236f58b19 (diff)
parente75537773bd3af1b4f2f4846c8e6cb4b80ff20ea (diff)
downloadnumpy-7c4b2482a9ae8e1587871665fe8c0ea622b5f360.tar.gz
Merge pull request #18889 from HowJMay/simd-min-max-integer
ENH: Add SIMD operations for min and max value comparision
-rw-r--r--numpy/core/src/_simd/_simd.dispatch.c.src38
-rw-r--r--numpy/core/src/common/simd/avx2/math.h30
-rw-r--r--numpy/core/src/common/simd/avx512/math.h32
-rw-r--r--numpy/core/src/common/simd/neon/math.h30
-rw-r--r--numpy/core/src/common/simd/sse/math.h68
-rw-r--r--numpy/core/src/common/simd/vsx/math.h18
-rw-r--r--numpy/core/tests/test_simd.py45
7 files changed, 238 insertions, 23 deletions
diff --git a/numpy/core/src/_simd/_simd.dispatch.c.src b/numpy/core/src/_simd/_simd.dispatch.c.src
index c51e4ce4e..54770959c 100644
--- a/numpy/core/src/_simd/_simd.dispatch.c.src
+++ b/numpy/core/src/_simd/_simd.dispatch.c.src
@@ -309,7 +309,7 @@ SIMD_IMPL_INTRIN_2IMM(shri_@sfx@, v@sfx@, v@sfx@, @shr_imm@)
#endif // shl_imm
/**begin repeat1
- * #intrin = and, or, xor#
+ * #intrin = and, or, xor#
*/
SIMD_IMPL_INTRIN_2(@intrin@_@sfx@, v@sfx@, v@sfx@, v@sfx@)
/**end repeat1**/
@@ -317,7 +317,7 @@ SIMD_IMPL_INTRIN_2(@intrin@_@sfx@, v@sfx@, v@sfx@, v@sfx@)
SIMD_IMPL_INTRIN_1(not_@sfx@, v@sfx@, v@sfx@)
/**begin repeat1
- * #intrin = cmpeq, cmpneq, cmpgt, cmpge, cmplt, cmple#
+ * #intrin = cmpeq, cmpneq, cmpgt, cmpge, cmplt, cmple#
*/
SIMD_IMPL_INTRIN_2(@intrin@_@sfx@, v@bsfx@, v@sfx@, v@sfx@)
/**end repeat1**/
@@ -334,14 +334,14 @@ SIMD_IMPL_INTRIN_1(expand_@esfx@_@sfx@, v@esfx@x2, v@sfx@)
* Arithmetic
***************************/
/**begin repeat1
- * #intrin = add, sub#
+ * #intrin = add, sub#
*/
SIMD_IMPL_INTRIN_2(@intrin@_@sfx@, v@sfx@, v@sfx@, v@sfx@)
/**end repeat1**/
#if @sat_sup@
/**begin repeat1
- * #intrin = adds, subs#
+ * #intrin = adds, subs#
*/
SIMD_IMPL_INTRIN_2(@intrin@_@sfx@, v@sfx@, v@sfx@, v@sfx@)
/**end repeat1**/
@@ -362,7 +362,7 @@ SIMD_IMPL_INTRIN_2(divc_@sfx@, v@sfx@, v@sfx@, v@sfx@x3)
#if @fused_sup@
/**begin repeat1
- * #intrin = muladd, mulsub, nmuladd, nmulsub#
+ * #intrin = muladd, mulsub, nmuladd, nmulsub#
*/
SIMD_IMPL_INTRIN_3(@intrin@_@sfx@, v@sfx@, v@sfx@, v@sfx@, v@sfx@)
/**end repeat1**/
@@ -381,15 +381,21 @@ SIMD_IMPL_INTRIN_1(sumup_@sfx@, @esfx@, v@sfx@)
***************************/
#if @fp_only@
/**begin repeat1
- * #intrin = sqrt, recip, abs, square#
+ * #intrin = sqrt, recip, abs, square#
*/
SIMD_IMPL_INTRIN_1(@intrin@_@sfx@, v@sfx@, v@sfx@)
/**end repeat1**/
#endif
+/**begin repeat1
+ * #intrin = max, min#
+ */
+SIMD_IMPL_INTRIN_2(@intrin@_@sfx@, v@sfx@, v@sfx@, v@sfx@)
+/**end repeat1**/
+
#if @fp_only@
/**begin repeat1
- * #intrin = max, maxp, min, minp#
+ * #intrin = maxp, minp#
*/
SIMD_IMPL_INTRIN_2(@intrin@_@sfx@, v@sfx@, v@sfx@, v@sfx@)
/**end repeat1**/
@@ -546,7 +552,7 @@ SIMD_INTRIN_DEF(@intrin@_@sfx@)
#endif // shl_imm
/**begin repeat1
- * #intrin = and, or, xor, not, cmpeq, cmpneq, cmpgt, cmpge, cmplt, cmple#
+ * #intrin = and, or, xor, not, cmpeq, cmpneq, cmpgt, cmpge, cmplt, cmple#
*/
SIMD_INTRIN_DEF(@intrin@_@sfx@)
/**end repeat1**/
@@ -563,14 +569,14 @@ SIMD_INTRIN_DEF(expand_@esfx@_@sfx@)
* Arithmetic
***************************/
/**begin repeat1
- * #intrin = add, sub#
+ * #intrin = add, sub#
*/
SIMD_INTRIN_DEF(@intrin@_@sfx@)
/**end repeat1**/
#if @sat_sup@
/**begin repeat1
- * #intrin = adds, subs#
+ * #intrin = adds, subs#
*/
SIMD_INTRIN_DEF(@intrin@_@sfx@)
/**end repeat1**/
@@ -591,7 +597,7 @@ SIMD_INTRIN_DEF(divc_@sfx@)
#if @fused_sup@
/**begin repeat1
- * #intrin = muladd, mulsub, nmuladd, nmulsub#
+ * #intrin = muladd, mulsub, nmuladd, nmulsub#
*/
SIMD_INTRIN_DEF(@intrin@_@sfx@)
/**end repeat1**/
@@ -609,15 +615,21 @@ SIMD_INTRIN_DEF(sumup_@sfx@)
***************************/
#if @fp_only@
/**begin repeat1
- * #intrin = sqrt, recip, abs, square#
+ * #intrin = sqrt, recip, abs, square#
*/
SIMD_INTRIN_DEF(@intrin@_@sfx@)
/**end repeat1**/
#endif
+/**begin repeat1
+ * #intrin = max, min#
+ */
+SIMD_INTRIN_DEF(@intrin@_@sfx@)
+/**end repeat1**/
+
#if @fp_only@
/**begin repeat1
- * #intrin = max, maxp, min, minp#
+ * #intrin = maxp, minp#
*/
SIMD_INTRIN_DEF(@intrin@_@sfx@)
/**end repeat1**/
diff --git a/numpy/core/src/common/simd/avx2/math.h b/numpy/core/src/common/simd/avx2/math.h
index 19e770ebf..22659e21b 100644
--- a/numpy/core/src/common/simd/avx2/math.h
+++ b/numpy/core/src/common/simd/avx2/math.h
@@ -55,6 +55,21 @@ NPY_FINLINE npyv_f64 npyv_maxp_f64(npyv_f64 a, npyv_f64 b)
__m256d max = _mm256_max_pd(a, b);
return _mm256_blendv_pd(a, max, nn);
}
+// Maximum, integer operations
+#define npyv_max_u8 _mm256_max_epu8
+#define npyv_max_s8 _mm256_max_epi8
+#define npyv_max_u16 _mm256_max_epu16
+#define npyv_max_s16 _mm256_max_epi16
+#define npyv_max_u32 _mm256_max_epu32
+#define npyv_max_s32 _mm256_max_epi32
+NPY_FINLINE npyv_u64 npyv_max_u64(npyv_u64 a, npyv_u64 b)
+{
+ return _mm256_blendv_epi8(b, a, npyv_cmpgt_u64(a, b));
+}
+NPY_FINLINE npyv_s64 npyv_max_s64(npyv_s64 a, npyv_s64 b)
+{
+ return _mm256_blendv_epi8(b, a, _mm256_cmpgt_epi64(a, b));
+}
// Minimum, natively mapping with no guarantees to handle NaN.
#define npyv_min_f32 _mm256_min_ps
@@ -74,5 +89,20 @@ NPY_FINLINE npyv_f64 npyv_minp_f64(npyv_f64 a, npyv_f64 b)
__m256d min = _mm256_min_pd(a, b);
return _mm256_blendv_pd(a, min, nn);
}
+// Minimum, integer operations
+#define npyv_min_u8 _mm256_min_epu8
+#define npyv_min_s8 _mm256_min_epi8
+#define npyv_min_u16 _mm256_min_epu16
+#define npyv_min_s16 _mm256_min_epi16
+#define npyv_min_u32 _mm256_min_epu32
+#define npyv_min_s32 _mm256_min_epi32
+NPY_FINLINE npyv_u64 npyv_min_u64(npyv_u64 a, npyv_u64 b)
+{
+ return _mm256_blendv_epi8(b, a, npyv_cmplt_u64(a, b));
+}
+NPY_FINLINE npyv_s64 npyv_min_s64(npyv_s64 a, npyv_s64 b)
+{
+ return _mm256_blendv_epi8(a, b, _mm256_cmpgt_epi64(a, b));
+}
#endif
diff --git a/numpy/core/src/common/simd/avx512/math.h b/numpy/core/src/common/simd/avx512/math.h
index da94faaeb..b75651962 100644
--- a/numpy/core/src/common/simd/avx512/math.h
+++ b/numpy/core/src/common/simd/avx512/math.h
@@ -62,6 +62,22 @@ NPY_FINLINE npyv_f64 npyv_maxp_f64(npyv_f64 a, npyv_f64 b)
__mmask8 nn = _mm512_cmp_pd_mask(b, b, _CMP_ORD_Q);
return _mm512_mask_max_pd(a, nn, a, b);
}
+// Maximum, integer operations
+#ifdef NPY_HAVE_AVX512BW
+ #define npyv_max_u8 _mm512_max_epu8
+ #define npyv_max_s8 _mm512_max_epi8
+ #define npyv_max_u16 _mm512_max_epu16
+ #define npyv_max_s16 _mm512_max_epi16
+#else
+ NPYV_IMPL_AVX512_FROM_AVX2_2ARG(npyv_max_u8, _mm256_max_epu8)
+ NPYV_IMPL_AVX512_FROM_AVX2_2ARG(npyv_max_s8, _mm256_max_epi8)
+ NPYV_IMPL_AVX512_FROM_AVX2_2ARG(npyv_max_u16, _mm256_max_epu16)
+ NPYV_IMPL_AVX512_FROM_AVX2_2ARG(npyv_max_s16, _mm256_max_epi16)
+#endif
+#define npyv_max_u32 _mm512_max_epu32
+#define npyv_max_s32 _mm512_max_epi32
+#define npyv_max_u64 _mm512_max_epu64
+#define npyv_max_s64 _mm512_max_epi64
// Minimum, natively mapping with no guarantees to handle NaN.
#define npyv_min_f32 _mm512_min_ps
@@ -79,5 +95,21 @@ NPY_FINLINE npyv_f64 npyv_minp_f64(npyv_f64 a, npyv_f64 b)
__mmask8 nn = _mm512_cmp_pd_mask(b, b, _CMP_ORD_Q);
return _mm512_mask_min_pd(a, nn, a, b);
}
+// Minimum, integer operations
+#ifdef NPY_HAVE_AVX512BW
+ #define npyv_min_u8 _mm512_min_epu8
+ #define npyv_min_s8 _mm512_min_epi8
+ #define npyv_min_u16 _mm512_min_epu16
+ #define npyv_min_s16 _mm512_min_epi16
+#else
+ NPYV_IMPL_AVX512_FROM_AVX2_2ARG(npyv_min_u8, _mm256_min_epu8)
+ NPYV_IMPL_AVX512_FROM_AVX2_2ARG(npyv_min_s8, _mm256_min_epi8)
+ NPYV_IMPL_AVX512_FROM_AVX2_2ARG(npyv_min_u16, _mm256_min_epu16)
+ NPYV_IMPL_AVX512_FROM_AVX2_2ARG(npyv_min_s16, _mm256_min_epi16)
+#endif
+#define npyv_min_u32 _mm512_min_epu32
+#define npyv_min_s32 _mm512_min_epi32
+#define npyv_min_u64 _mm512_min_epu64
+#define npyv_min_s64 _mm512_min_epi64
#endif
diff --git a/numpy/core/src/common/simd/neon/math.h b/numpy/core/src/common/simd/neon/math.h
index c99ef3299..a5508c96b 100644
--- a/numpy/core/src/common/simd/neon/math.h
+++ b/numpy/core/src/common/simd/neon/math.h
@@ -102,6 +102,21 @@ NPY_FINLINE npyv_f32 npyv_recip_f32(npyv_f32 a)
#if NPY_SIMD_F64
#define npyv_maxp_f64 vmaxnmq_f64
#endif // NPY_SIMD_F64
+// Maximum, integer operations
+#define npyv_max_u8 vmaxq_u8
+#define npyv_max_s8 vmaxq_s8
+#define npyv_max_u16 vmaxq_u16
+#define npyv_max_s16 vmaxq_s16
+#define npyv_max_u32 vmaxq_u32
+#define npyv_max_s32 vmaxq_s32
+NPY_FINLINE npyv_u64 npyv_max_u64(npyv_u64 a, npyv_u64 b)
+{
+ return vbslq_u64(npyv_cmpgt_u64(a, b), a, b);
+}
+NPY_FINLINE npyv_s64 npyv_max_s64(npyv_s64 a, npyv_s64 b)
+{
+ return vbslq_s64(npyv_cmpgt_s64(a, b), a, b);
+}
// Minimum, natively mapping with no guarantees to handle NaN.
#define npyv_min_f32 vminq_f32
@@ -122,5 +137,20 @@ NPY_FINLINE npyv_f32 npyv_recip_f32(npyv_f32 a)
#if NPY_SIMD_F64
#define npyv_minp_f64 vminnmq_f64
#endif // NPY_SIMD_F64
+// Minimum, integer operations
+#define npyv_min_u8 vminq_u8
+#define npyv_min_s8 vminq_s8
+#define npyv_min_u16 vminq_u16
+#define npyv_min_s16 vminq_s16
+#define npyv_min_u32 vminq_u32
+#define npyv_min_s32 vminq_s32
+NPY_FINLINE npyv_u64 npyv_min_u64(npyv_u64 a, npyv_u64 b)
+{
+ return vbslq_u64(npyv_cmplt_u64(a, b), a, b);
+}
+NPY_FINLINE npyv_s64 npyv_min_s64(npyv_s64 a, npyv_s64 b)
+{
+ return vbslq_s64(npyv_cmplt_s64(a, b), a, b);
+}
#endif // _NPY_SIMD_SSE_MATH_H
diff --git a/numpy/core/src/common/simd/sse/math.h b/numpy/core/src/common/simd/sse/math.h
index e43c41167..1f82b546f 100644
--- a/numpy/core/src/common/simd/sse/math.h
+++ b/numpy/core/src/common/simd/sse/math.h
@@ -55,6 +55,40 @@ NPY_FINLINE npyv_f64 npyv_maxp_f64(npyv_f64 a, npyv_f64 b)
__m128d max = _mm_max_pd(a, b);
return npyv_select_f64(_mm_castpd_si128(nn), max, a);
}
+// Maximum, integer operations
+#ifdef NPY_HAVE_SSE41
+ #define npyv_max_s8 _mm_max_epi8
+ #define npyv_max_u16 _mm_max_epu16
+ #define npyv_max_u32 _mm_max_epu32
+ #define npyv_max_s32 _mm_max_epi32
+#else
+ NPY_FINLINE npyv_s8 npyv_max_s8(npyv_s8 a, npyv_s8 b)
+ {
+ return npyv_select_s8(npyv_cmpgt_s8(a, b), a, b);
+ }
+ NPY_FINLINE npyv_u16 npyv_max_u16(npyv_u16 a, npyv_u16 b)
+ {
+ return npyv_select_u16(npyv_cmpgt_u16(a, b), a, b);
+ }
+ NPY_FINLINE npyv_u32 npyv_max_u32(npyv_u32 a, npyv_u32 b)
+ {
+ return npyv_select_u32(npyv_cmpgt_u32(a, b), a, b);
+ }
+ NPY_FINLINE npyv_s32 npyv_max_s32(npyv_s32 a, npyv_s32 b)
+ {
+ return npyv_select_s32(npyv_cmpgt_s32(a, b), a, b);
+ }
+#endif
+#define npyv_max_u8 _mm_max_epu8
+#define npyv_max_s16 _mm_max_epi16
+NPY_FINLINE npyv_u64 npyv_max_u64(npyv_u64 a, npyv_u64 b)
+{
+ return npyv_select_u64(npyv_cmpgt_u64(a, b), a, b);
+}
+NPY_FINLINE npyv_s64 npyv_max_s64(npyv_s64 a, npyv_s64 b)
+{
+ return npyv_select_s64(npyv_cmpgt_s64(a, b), a, b);
+}
// Minimum, natively mapping with no guarantees to handle NaN.
#define npyv_min_f32 _mm_min_ps
@@ -74,5 +108,39 @@ NPY_FINLINE npyv_f64 npyv_minp_f64(npyv_f64 a, npyv_f64 b)
__m128d min = _mm_min_pd(a, b);
return npyv_select_f64(_mm_castpd_si128(nn), min, a);
}
+// Minimum, integer operations
+#ifdef NPY_HAVE_SSE41
+ #define npyv_min_s8 _mm_min_epi8
+ #define npyv_min_u16 _mm_min_epu16
+ #define npyv_min_u32 _mm_min_epu32
+ #define npyv_min_s32 _mm_min_epi32
+#else
+ NPY_FINLINE npyv_s8 npyv_min_s8(npyv_s8 a, npyv_s8 b)
+ {
+ return npyv_select_s8(npyv_cmplt_s8(a, b), a, b);
+ }
+ NPY_FINLINE npyv_u16 npyv_min_u16(npyv_u16 a, npyv_u16 b)
+ {
+ return npyv_select_u16(npyv_cmplt_u16(a, b), a, b);
+ }
+ NPY_FINLINE npyv_u32 npyv_min_u32(npyv_u32 a, npyv_u32 b)
+ {
+ return npyv_select_u32(npyv_cmplt_u32(a, b), a, b);
+ }
+ NPY_FINLINE npyv_s32 npyv_min_s32(npyv_s32 a, npyv_s32 b)
+ {
+ return npyv_select_s32(npyv_cmplt_s32(a, b), a, b);
+ }
+#endif
+#define npyv_min_u8 _mm_min_epu8
+#define npyv_min_s16 _mm_min_epi16
+NPY_FINLINE npyv_u64 npyv_min_u64(npyv_u64 a, npyv_u64 b)
+{
+ return npyv_select_u64(npyv_cmplt_u64(a, b), a, b);
+}
+NPY_FINLINE npyv_s64 npyv_min_s64(npyv_s64 a, npyv_s64 b)
+{
+ return npyv_select_s64(npyv_cmplt_s64(a, b), a, b);
+}
#endif
diff --git a/numpy/core/src/common/simd/vsx/math.h b/numpy/core/src/common/simd/vsx/math.h
index 7c5301032..b2e393c7c 100644
--- a/numpy/core/src/common/simd/vsx/math.h
+++ b/numpy/core/src/common/simd/vsx/math.h
@@ -41,6 +41,15 @@ NPY_FINLINE npyv_f64 npyv_square_f64(npyv_f64 a)
// - Only if both corresponded elements are NaN, NaN is set.
#define npyv_maxp_f32 vec_max
#define npyv_maxp_f64 vec_max
+// Maximum, integer operations
+#define npyv_max_u8 vec_max
+#define npyv_max_s8 vec_max
+#define npyv_max_u16 vec_max
+#define npyv_max_s16 vec_max
+#define npyv_max_u32 vec_max
+#define npyv_max_s32 vec_max
+#define npyv_max_u64 vec_max
+#define npyv_max_s64 vec_max
// Minimum, natively mapping with no guarantees to handle NaN.
#define npyv_min_f32 vec_min
@@ -50,5 +59,14 @@ NPY_FINLINE npyv_f64 npyv_square_f64(npyv_f64 a)
// - Only if both corresponded elements are NaN, NaN is set.
#define npyv_minp_f32 vec_min
#define npyv_minp_f64 vec_min
+// Minimum, integer operations
+#define npyv_min_u8 vec_min
+#define npyv_min_s8 vec_min
+#define npyv_min_u16 vec_min
+#define npyv_min_s16 vec_min
+#define npyv_min_u32 vec_min
+#define npyv_min_s32 vec_min
+#define npyv_min_u64 vec_min
+#define npyv_min_s64 vec_min
#endif // _NPY_SIMD_VSX_MATH_H
diff --git a/numpy/core/tests/test_simd.py b/numpy/core/tests/test_simd.py
index 6c1575971..3be28c3bb 100644
--- a/numpy/core/tests/test_simd.py
+++ b/numpy/core/tests/test_simd.py
@@ -208,6 +208,19 @@ class _SIMD_INT(_Test_Utility):
subs = self.subs(vdata_a, vdata_b)
assert subs == data_subs
+ def test_math_max_min(self):
+ data_a = self._data()
+ data_b = self._data(self.nlanes)
+ vdata_a, vdata_b = self.load(data_a), self.load(data_b)
+
+ data_max = [max(a, b) for a, b in zip(data_a, data_b)]
+ simd_max = self.max(vdata_a, vdata_b)
+ assert simd_max == data_max
+
+ data_min = [min(a, b) for a, b in zip(data_a, data_b)]
+ simd_min = self.min(vdata_a, vdata_b)
+ assert simd_min == data_min
+
class _SIMD_FP32(_Test_Utility):
"""
To only test single precision
@@ -215,7 +228,7 @@ class _SIMD_FP32(_Test_Utility):
def test_conversions(self):
"""
Round to nearest even integer, assume CPU control register is set to rounding.
- Test intrinics:
+ Test intrinsics:
npyv_round_s32_##SFX
"""
features = self._cpu_features()
@@ -238,7 +251,7 @@ class _SIMD_FP64(_Test_Utility):
def test_conversions(self):
"""
Round to nearest even integer, assume CPU control register is set to rounding.
- Test intrinics:
+ Test intrinsics:
npyv_round_s32_##SFX
"""
vdata_a = self.load(self._data())
@@ -317,6 +330,11 @@ class _SIMD_FP(_Test_Utility):
assert square == data_square
def test_max(self):
+ """
+ Test intrinsics:
+ npyv_max_##SFX
+ npyv_maxp_##SFX
+ """
data_a = self._data()
data_b = self._data(self.nlanes)
vdata_a, vdata_b = self.load(data_a), self.load(data_b)
@@ -329,7 +347,8 @@ class _SIMD_FP(_Test_Utility):
pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
max_cases = ((nan, nan, nan), (nan, 10, 10), (10, nan, 10),
(pinf, pinf, pinf), (pinf, 10, pinf), (10, pinf, pinf),
- (ninf, ninf, ninf), (ninf, 10, 10), (10, ninf, 10))
+ (ninf, ninf, ninf), (ninf, 10, 10), (10, ninf, 10),
+ (10, 0, 10), (10, -10, 10))
for case_operand1, case_operand2, desired in max_cases:
data_max = [desired]*self.nlanes
vdata_a = self.setall(case_operand1)
@@ -342,6 +361,11 @@ class _SIMD_FP(_Test_Utility):
assert _max == data_max
def test_min(self):
+ """
+ Test intrinsics:
+ npyv_min_##SFX
+ npyv_minp_##SFX
+ """
data_a = self._data()
data_b = self._data(self.nlanes)
vdata_a, vdata_b = self.load(data_a), self.load(data_b)
@@ -354,7 +378,8 @@ class _SIMD_FP(_Test_Utility):
pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
min_cases = ((nan, nan, nan), (nan, 10, 10), (10, nan, 10),
(pinf, pinf, pinf), (pinf, 10, 10), (10, pinf, 10),
- (ninf, ninf, ninf), (ninf, 10, ninf), (10, ninf, ninf))
+ (ninf, ninf, ninf), (ninf, 10, ninf), (10, ninf, ninf),
+ (10, 0, 0), (10, -10, -10))
for case_operand1, case_operand2, desired in min_cases:
data_min = [desired]*self.nlanes
vdata_a = self.setall(case_operand1)
@@ -383,7 +408,7 @@ class _SIMD_FP(_Test_Utility):
def test_special_cases(self):
"""
- Compare Not NaN. Test intrinics:
+ Compare Not NaN. Test intrinsics:
npyv_notnan_##SFX
"""
nnan = self.notnan(self.setall(self._nan()))
@@ -722,7 +747,7 @@ class _SIMD_ALL(_Test_Utility):
def test_conversion_expand(self):
"""
- Test expand intrinics:
+ Test expand intrinsics:
npyv_expand_u16_u8
npyv_expand_u32_u16
"""
@@ -785,7 +810,7 @@ class _SIMD_ALL(_Test_Utility):
def test_arithmetic_intdiv(self):
"""
- Test integer division intrinics:
+ Test integer division intrinsics:
npyv_divisor_##sfx
npyv_divc_##sfx
"""
@@ -855,7 +880,7 @@ class _SIMD_ALL(_Test_Utility):
def test_arithmetic_reduce_sum(self):
"""
- Test reduce sum intrinics:
+ Test reduce sum intrinsics:
npyv_sum_##sfx
"""
if self.sfx not in ("u32", "u64", "f32", "f64"):
@@ -870,7 +895,7 @@ class _SIMD_ALL(_Test_Utility):
def test_arithmetic_reduce_sumup(self):
"""
- Test extend reduce sum intrinics:
+ Test extend reduce sum intrinsics:
npyv_sumup_##sfx
"""
if self.sfx not in ("u8", "u16"):
@@ -886,7 +911,7 @@ class _SIMD_ALL(_Test_Utility):
def test_mask_conditional(self):
"""
Conditional addition and subtraction for all supported data types.
- Test intrinics:
+ Test intrinsics:
npyv_ifadd_##SFX, npyv_ifsub_##SFX
"""
vdata_a = self.load(self._data())