summaryrefslogtreecommitdiff
path: root/numpy
diff options
context:
space:
mode:
authorRaghuveer Devulapalli <raghuveer.devulapalli@intel.com>2022-02-02 13:45:47 -0800
committerRaghuveer Devulapalli <raghuveer.devulapalli@intel.com>2022-02-02 13:52:34 -0800
commit0f1397f6de03e070de8359c2f1d2b81c423d0abe (patch)
tree96e073ebdacc4649ebf8beead318b0b3b4348b14 /numpy
parentf312f000fa683b7dbd0b6544cabf4323a6fdd818 (diff)
downloadnumpy-0f1397f6de03e070de8359c2f1d2b81c423d0abe.tar.gz
SIMD: Add intrinsics emulation for reduce_min and reduce_max instructions
Diffstat (limited to 'numpy')
-rw-r--r--numpy/core/src/common/simd/avx512/arithmetic.h72
-rw-r--r--numpy/core/src/npysort/x86-qsort.dispatch.c.src10
2 files changed, 78 insertions, 4 deletions
diff --git a/numpy/core/src/common/simd/avx512/arithmetic.h b/numpy/core/src/common/simd/avx512/arithmetic.h
index f8632e701..e930da1b2 100644
--- a/numpy/core/src/common/simd/avx512/arithmetic.h
+++ b/numpy/core/src/common/simd/avx512/arithmetic.h
@@ -371,7 +371,79 @@ NPY_FINLINE npyv_s64 npyv_divc_s64(npyv_s64 a, const npyv_s64x3 divisor)
#define npyv_sum_u64 _mm512_reduce_add_epi64
#define npyv_sum_f32 _mm512_reduce_add_ps
#define npyv_sum_f64 _mm512_reduce_add_pd
+ #define npyv_reduce_min_u32 _mm512_reduce_min_epu32
+ #define npyv_reduce_min_s32 _mm512_reduce_min_epi32
+ #define npyv_reduce_min_f32 _mm512_reduce_min_ps
+ #define npyv_reduce_max_u32 _mm512_reduce_max_epu32
+ #define npyv_reduce_max_s32 _mm512_reduce_max_epi32
+ #define npyv_reduce_max_f32 _mm512_reduce_max_ps
#else
+ NPY_FINLINE npy_uint32 npyv_reduce_max_u32(npyv_u32 a)
+ {
+ const npyv_u32 idx1 = _mm512_set_epi32(7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
+ const npyv_u32 idx2 = _mm512_set_epi32(3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
+ npyv_u32 a1 = _mm512_max_epu32(a, _mm512_permutex2var_epi32(a, idx1, a));
+ npyv_u32 a2 = _mm512_max_epu32(a1, _mm512_permutex2var_epi32(a1, idx2, a1));
+ npyv_u32 a3 = _mm512_max_epu32(a2, _mm512_shuffle_epi32(a2, (1<<6 | 0<<4 | 3<<2 | 2)));
+ npyv_u32 a4 = _mm512_max_epu32(a3, _mm512_shuffle_epi32(a3, (2<<6 | 3<<4 | 0<<2 | 1)));
+ return _mm_extract_epi32(_mm512_extracti32x4_epi32(a4, 0x00), 0x00);
+ }
+
+ NPY_FINLINE npy_int32 npyv_reduce_max_s32(npyv_s32 a)
+ {
+ const npyv_u32 idx1 = _mm512_set_epi32(7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
+ const npyv_u32 idx2 = _mm512_set_epi32(3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
+ npyv_s32 a1 = _mm512_max_epi32(a, _mm512_permutex2var_epi32(a, idx1, a));
+ npyv_s32 a2 = _mm512_max_epi32(a1, _mm512_permutex2var_epi32(a1, idx2, a1));
+ npyv_s32 a3 = _mm512_max_epi32(a2, _mm512_shuffle_epi32(a2, (1<<6 | 0<<4 | 3<<2 | 2)));
+ npyv_s32 a4 = _mm512_max_epi32(a3, _mm512_shuffle_epi32(a3, (2<<6 | 3<<4 | 0<<2 | 1)));
+ return _mm_extract_epi32(_mm512_extracti32x4_epi32(a4, 0x00), 0x00);
+ }
+
+ NPY_FINLINE npy_float npyv_reduce_max_f32(npyv_f32 a)
+ {
+ const npyv_u32 idx1 = _mm512_set_epi32(7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
+ const npyv_u32 idx2 = _mm512_set_epi32(3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
+ npyv_f32 a1 = _mm512_max_ps(a, _mm512_permutex2var_ps(a, idx1, a));
+ npyv_f32 a2 = _mm512_max_ps(a1, _mm512_permutex2var_ps(a1, idx2, a1));
+ npyv_f32 a3 = _mm512_max_ps(a2, _mm512_shuffle_ps(a2, a2, (1<<6 | 0<<4 | 3<<2 | 2)));
+ npyv_f32 a4 = _mm512_max_ps(a3, _mm512_shuffle_sp(a3, a3, (2<<6 | 3<<4 | 0<<2 | 1)));
+ return _mm_cvtss_f32(_mm512_extractf32x4_ps(a4, 0x00));
+ }
+
+ NPY_FINLINE npy_uint32 npyv_reduce_min_u32(npyv_u32 a)
+ {
+ const npyv_u32 idx1 = _mm512_set_epi32(7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
+ const npyv_u32 idx2 = _mm512_set_epi32(3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
+ npyv_u32 a1 = _mm512_min_epu32(a, _mm512_permutex2var_epi32(a, idx1, a));
+ npyv_u32 a2 = _mm512_min_epu32(a1, _mm512_permutex2var_epi32(a1, idx2, a1));
+ npyv_u32 a3 = _mm512_min_epu32(a2, _mm512_shuffle_epi32(a2, (1<<6 | 0<<4 | 3<<2 | 2)));
+ npyv_u32 a4 = _mm512_min_epu32(a3, _mm512_shuffle_epi32(a3, (2<<6 | 3<<4 | 0<<2 | 1)));
+ return _mm_extract_epi32(_mm512_extracti32x4_epi32(a4, 0x00), 0x00);
+ }
+
+ NPY_FINLINE npy_int32 npyv_reduce_min_s32(npyv_s32 a)
+ {
+ const npyv_u32 idx1 = _mm512_set_epi32(7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
+ const npyv_u32 idx2 = _mm512_set_epi32(3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
+ npyv_s32 a1 = _mm512_min_epi32(a, _mm512_permutex2var_epi32(a, idx1, a));
+ npyv_s32 a2 = _mm512_min_epi32(a1, _mm512_permutex2var_epi32(a1, idx2, a1));
+ npyv_s32 a3 = _mm512_min_epi32(a2, _mm512_shuffle_epi32(a2, (1<<6 | 0<<4 | 3<<2 | 2)));
+ npyv_s32 a4 = _mm512_min_epi32(a3, _mm512_shuffle_epi32(a3, (2<<6 | 3<<4 | 0<<2 | 1)));
+ return _mm_extract_epi32(_mm512_extracti32x4_epi32(a4, 0x00), 0x00);
+ }
+
+ NPY_FINLINE npy_float npyv_reduce_min_f32(npyv_f32 a)
+ {
+ const npyv_u32 idx1 = _mm512_set_epi32(7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8);
+ const npyv_u32 idx2 = _mm512_set_epi32(3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12);
+ npyv_f32 a1 = _mm512_min_ps(a, _mm512_permutex2var_ps(a, idx1, a));
+ npyv_f32 a2 = _mm512_min_ps(a1, _mm512_permutex2var_ps(a1, idx2, a1));
+ npyv_f32 a3 = _mm512_min_ps(a2, _mm512_shuffle_ps(a2, a2, (1<<6 | 0<<4 | 3<<2 | 2)));
+ npyv_f32 a4 = _mm512_min_ps(a3, _mm512_shuffle_sp(a3, a3, (2<<6 | 3<<4 | 0<<2 | 1)));
+ return _mm_cvtss_f32(_mm512_extractf32x4_ps(a4, 0x00));
+ }
+
NPY_FINLINE npy_uint32 npyv_sum_u32(npyv_u32 a)
{
__m256i half = _mm256_add_epi32(npyv512_lower_si256(a), npyv512_higher_si256(a));
diff --git a/numpy/core/src/npysort/x86-qsort.dispatch.c.src b/numpy/core/src/npysort/x86-qsort.dispatch.c.src
index fca44c17d..c8fa5c82b 100644
--- a/numpy/core/src/npysort/x86-qsort.dispatch.c.src
+++ b/numpy/core/src/npysort/x86-qsort.dispatch.c.src
@@ -12,6 +12,7 @@
#include <immintrin.h>
#include "numpy/npy_math.h"
#include "npy_sort.h"
+#include "simd/simd.h"
/*
@@ -90,6 +91,7 @@ __m256i rnd_epu32(__m256i rnd_vec, __m256i bound) {
* #vsuf1 = epi32, epu32, ps#
* #vsuf2 = epi32, epi32, ps#
* #vsuf3 = si512, si512, ps#
+ * #vsuf4 = s32, u32, f32#
* #CMP_GE_OP = _MM_CMPINT_NLT, _MM_CMPINT_NLT, _CMP_GE_OQ#
* #TYPE_MAX_VAL = NPY_MAX_INT32, NPY_MAX_UINT32, NPY_INFINITYF#
* #TYPE_MIN_VAL = NPY_MIN_INT32, 0, -NPY_INFINITYF#
@@ -459,8 +461,8 @@ npy_intp partition_avx512_@vsuf1@(@type_t@* arr, npy_intp left, npy_intp right,
if(right - left == 16) {
@zmm_t@ vec = _mm512_loadu_@vsuf3@(arr + left);
npy_int amount_gt_pivot = partition_vec_@vsuf1@(arr, left, left + 16, vec, pivot_vec, &min_vec, &max_vec);
- *smallest = _mm512_reduce_min_@vsuf1@(min_vec);
- *biggest = _mm512_reduce_max_@vsuf1@(max_vec);
+ *smallest = npyv_reduce_min_@vsuf4@(min_vec);
+ *biggest = npyv_reduce_max_@vsuf4@(max_vec);
return left + (16 - amount_gt_pivot);
}
@@ -498,8 +500,8 @@ npy_intp partition_avx512_@vsuf1@(@type_t@* arr, npy_intp left, npy_intp right,
l_store += (16 - amount_gt_pivot);
amount_gt_pivot = partition_vec_@vsuf1@(arr, l_store, l_store + 16, vec_right, pivot_vec, &min_vec, &max_vec);
l_store += (16 - amount_gt_pivot);
- *smallest = _mm512_reduce_min_@vsuf1@(min_vec);
- *biggest = _mm512_reduce_max_@vsuf1@(max_vec);
+ *smallest = npyv_reduce_min_@vsuf4@(min_vec);
+ *biggest = npyv_reduce_max_@vsuf4@(max_vec);
return l_store;
}