summaryrefslogtreecommitdiff
path: root/numpy
diff options
context:
space:
mode:
authorMatti Picus <matti.picus@gmail.com>2022-01-11 09:14:53 +0200
committerGitHub <noreply@github.com>2022-01-11 09:14:53 +0200
commit2d749723319e91f9c2dbce9e96f12f95db08c0d0 (patch)
tree9aa48fbd652403249a5049f70f39a33bc69c292e /numpy
parentacf33eb2a8a3de148cc523db13fab633530125e8 (diff)
parent5fca2bfccf7ea55d5e6d1480fff61b1ca14770bd (diff)
downloadnumpy-2d749723319e91f9c2dbce9e96f12f95db08c0d0.tar.gz
Merge pull request #20131 from Developer-Ecosystem-Engineering/as_min_max
BUG: min/max is slow, re-implement using NEON (#17989)
Diffstat (limited to 'numpy')
-rw-r--r--numpy/core/code_generators/generate_umath.py6
-rw-r--r--numpy/core/setup.py1
-rw-r--r--numpy/core/src/umath/loops.c.src113
-rw-r--r--numpy/core/src/umath/loops.h.src38
-rw-r--r--numpy/core/src/umath/loops_minmax.dispatch.c.src551
-rw-r--r--numpy/core/src/umath/simd.inc.src188
6 files changed, 596 insertions, 301 deletions
diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py
index 1844ab96a..054150b28 100644
--- a/numpy/core/code_generators/generate_umath.py
+++ b/numpy/core/code_generators/generate_umath.py
@@ -522,14 +522,14 @@ defdict = {
Ufunc(2, 1, ReorderableNone,
docstrings.get('numpy.core.umath.maximum'),
'PyUFunc_SimpleUniformOperationTypeResolver',
- TD(noobj, simd=[('avx512f', 'fd')]),
+ TD(noobj, dispatch=[('loops_minmax', ints+'fdg')]),
TD(O, f='npy_ObjectMax')
),
'minimum':
Ufunc(2, 1, ReorderableNone,
docstrings.get('numpy.core.umath.minimum'),
'PyUFunc_SimpleUniformOperationTypeResolver',
- TD(noobj, simd=[('avx512f', 'fd')]),
+ TD(noobj, dispatch=[('loops_minmax', ints+'fdg')]),
TD(O, f='npy_ObjectMin')
),
'clip':
@@ -543,6 +543,7 @@ defdict = {
Ufunc(2, 1, ReorderableNone,
docstrings.get('numpy.core.umath.fmax'),
'PyUFunc_SimpleUniformOperationTypeResolver',
+ TD('fdg', dispatch=[('loops_minmax', 'fdg')]),
TD(noobj),
TD(O, f='npy_ObjectMax')
),
@@ -550,6 +551,7 @@ defdict = {
Ufunc(2, 1, ReorderableNone,
docstrings.get('numpy.core.umath.fmin'),
'PyUFunc_SimpleUniformOperationTypeResolver',
+ TD('fdg', dispatch=[('loops_minmax', 'fdg')]),
TD(noobj),
TD(O, f='npy_ObjectMin')
),
diff --git a/numpy/core/setup.py b/numpy/core/setup.py
index a67a4cab6..22cac1e9a 100644
--- a/numpy/core/setup.py
+++ b/numpy/core/setup.py
@@ -999,6 +999,7 @@ def configuration(parent_package='',top_path=None):
join('src', 'umath', 'loops_unary_fp.dispatch.c.src'),
join('src', 'umath', 'loops_arithm_fp.dispatch.c.src'),
join('src', 'umath', 'loops_arithmetic.dispatch.c.src'),
+ join('src', 'umath', 'loops_minmax.dispatch.c.src'),
join('src', 'umath', 'loops_trigonometric.dispatch.c.src'),
join('src', 'umath', 'loops_umath_fp.dispatch.c.src'),
join('src', 'umath', 'loops_exponent_log.dispatch.c.src'),
diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src
index aaa694f34..5f054d0a9 100644
--- a/numpy/core/src/umath/loops.c.src
+++ b/numpy/core/src/umath/loops.c.src
@@ -724,32 +724,6 @@ NPY_NO_EXPORT NPY_GCC_OPT_3 @ATTR@ void
/**end repeat1**/
-/**begin repeat1
- * #kind = maximum, minimum#
- * #OP = >, <#
- **/
-
-NPY_NO_EXPORT void
-@TYPE@_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
-{
- if (IS_BINARY_REDUCE) {
- BINARY_REDUCE_LOOP(@type@) {
- const @type@ in2 = *(@type@ *)ip2;
- io1 = (io1 @OP@ in2) ? io1 : in2;
- }
- *((@type@ *)iop1) = io1;
- }
- else {
- BINARY_LOOP {
- const @type@ in1 = *(@type@ *)ip1;
- const @type@ in2 = *(@type@ *)ip2;
- *((@type@ *)op1) = (in1 @OP@ in2) ? in1 : in2;
- }
- }
-}
-
-/**end repeat1**/
-
NPY_NO_EXPORT void
@TYPE@_power(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
@@ -1684,93 +1658,6 @@ NPY_NO_EXPORT void
}
}
-/**begin repeat1
- * #kind = maximum, minimum#
- * #OP = >=, <=#
- **/
-NPY_NO_EXPORT void
-@TYPE@_@kind@_avx512f(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
-{
- /* */
- if (IS_BINARY_REDUCE) {
- if (!run_unary_reduce_simd_@kind@_@TYPE@(args, dimensions, steps)) {
- BINARY_REDUCE_LOOP(@type@) {
- const @type@ in2 = *(@type@ *)ip2;
- /* Order of operations important for MSVC 2015 */
- io1 = (io1 @OP@ in2 || npy_isnan(io1)) ? io1 : in2;
- }
- *((@type@ *)iop1) = io1;
- }
- }
- else {
- if (!run_binary_avx512f_@kind@_@TYPE@(args, dimensions, steps)) {
- BINARY_LOOP {
- @type@ in1 = *(@type@ *)ip1;
- const @type@ in2 = *(@type@ *)ip2;
- /* Order of operations important for MSVC 2015 */
- in1 = (in1 @OP@ in2 || npy_isnan(in1)) ? in1 : in2;
- *((@type@ *)op1) = in1;
- }
- }
- }
- npy_clear_floatstatus_barrier((char*)dimensions);
-}
-
-NPY_NO_EXPORT void
-@TYPE@_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
-{
- /* */
- if (IS_BINARY_REDUCE) {
- if (!run_unary_reduce_simd_@kind@_@TYPE@(args, dimensions, steps)) {
- BINARY_REDUCE_LOOP(@type@) {
- const @type@ in2 = *(@type@ *)ip2;
- /* Order of operations important for MSVC 2015 */
- io1 = (io1 @OP@ in2 || npy_isnan(io1)) ? io1 : in2;
- }
- *((@type@ *)iop1) = io1;
- }
- }
- else {
- BINARY_LOOP {
- @type@ in1 = *(@type@ *)ip1;
- const @type@ in2 = *(@type@ *)ip2;
- /* Order of operations important for MSVC 2015 */
- in1 = (in1 @OP@ in2 || npy_isnan(in1)) ? in1 : in2;
- *((@type@ *)op1) = in1;
- }
- }
- npy_clear_floatstatus_barrier((char*)dimensions);
-}
-/**end repeat1**/
-
-/**begin repeat1
- * #kind = fmax, fmin#
- * #OP = >=, <=#
- **/
-NPY_NO_EXPORT void
-@TYPE@_@kind@(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
-{
- /* */
- if (IS_BINARY_REDUCE) {
- BINARY_REDUCE_LOOP(@type@) {
- const @type@ in2 = *(@type@ *)ip2;
- /* Order of operations important for MSVC 2015 */
- io1 = (io1 @OP@ in2 || npy_isnan(in2)) ? io1 : in2;
- }
- *((@type@ *)iop1) = io1;
- }
- else {
- BINARY_LOOP {
- const @type@ in1 = *(@type@ *)ip1;
- const @type@ in2 = *(@type@ *)ip2;
- /* Order of operations important for MSVC 2015 */
- *((@type@ *)op1) = (in1 @OP@ in2 || npy_isnan(in2)) ? in1 : in2;
- }
- }
- npy_clear_floatstatus_barrier((char*)dimensions);
-}
-/**end repeat1**/
-
NPY_NO_EXPORT void
@TYPE@_floor_divide(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
{
diff --git a/numpy/core/src/umath/loops.h.src b/numpy/core/src/umath/loops.h.src
index 081ca9957..3eafbdf66 100644
--- a/numpy/core/src/umath/loops.h.src
+++ b/numpy/core/src/umath/loops.h.src
@@ -22,7 +22,6 @@
#define BOOL_fmax BOOL_maximum
#define BOOL_fmin BOOL_minimum
-
/*
*****************************************************************************
** BOOLEAN LOOPS **
@@ -660,6 +659,43 @@ PyUFunc_OOO_O(char **args, npy_intp const *dimensions, npy_intp const *steps, vo
/*
*****************************************************************************
+ ** MIN/MAX LOOPS **
+ *****************************************************************************
+ */
+
+#ifndef NPY_DISABLE_OPTIMIZATION
+ #include "loops_minmax.dispatch.h"
+#endif
+
+//---------- Integers ----------
+
+/**begin repeat
+ * #TYPE = BYTE, UBYTE, SHORT, USHORT, INT, UINT,
+ * LONG, ULONG, LONGLONG, ULONGLONG#
+ */
+/**begin repeat1
+ * #kind = maximum, minimum#
+ */
+ NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT void @TYPE@_@kind@,
+ (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)))
+/**end repeat1**/
+/**end repeat**/
+
+//---------- Float ----------
+
+ /**begin repeat
+ * #TYPE = FLOAT, DOUBLE, LONGDOUBLE#
+ */
+/**begin repeat1
+ * #kind = maximum, minimum, fmax, fmin#
+ */
+ NPY_CPU_DISPATCH_DECLARE(NPY_NO_EXPORT void @TYPE@_@kind@,
+ (char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(data)))
+/**end repeat1**/
+/**end repeat**/
+
+/*
+ *****************************************************************************
** END LOOPS **
*****************************************************************************
*/
diff --git a/numpy/core/src/umath/loops_minmax.dispatch.c.src b/numpy/core/src/umath/loops_minmax.dispatch.c.src
new file mode 100644
index 000000000..dbd158db9
--- /dev/null
+++ b/numpy/core/src/umath/loops_minmax.dispatch.c.src
@@ -0,0 +1,551 @@
+/*@targets
+ ** $maxopt baseline
+ ** neon asimd
+ ** sse2 avx2 avx512_skx
+ ** vsx2
+ **/
+#define _UMATHMODULE
+#define _MULTIARRAYMODULE
+#define NPY_NO_DEPRECATED_API NPY_API_VERSION
+
+#include "simd/simd.h"
+#include "loops_utils.h"
+#include "loops.h"
+#include "lowlevel_strided_loops.h"
+// Provides the various *_LOOP macros
+#include "fast_loop_macros.h"
+
+/*******************************************************************************
+ ** Scalar intrinsics
+ ******************************************************************************/
+// signed/unsigned int
+#define scalar_max_i(A, B) ((A > B) ? A : B)
+#define scalar_min_i(A, B) ((A < B) ? A : B)
+// fp, propagates NaNs
+#define scalar_max_f(A, B) ((A >= B || npy_isnan(A)) ? A : B)
+#define scalar_max_d scalar_max_f
+#define scalar_max_l scalar_max_f
+#define scalar_min_f(A, B) ((A <= B || npy_isnan(A)) ? A : B)
+#define scalar_min_d scalar_min_f
+#define scalar_min_l scalar_min_f
+// fp, ignores NaNs
+#define scalar_maxp_f fmaxf
+#define scalar_maxp_d fmax
+#define scalar_maxp_l fmaxl
+#define scalar_minp_f fminf
+#define scalar_minp_d fmin
+#define scalar_minp_l fminl
+
+// special optimization for fp scalars propagates NaNs
+// since there're no C99 support for it
+#ifndef NPY_DISABLE_OPTIMIZATION
+/**begin repeat
+ * #type = npy_float, npy_double#
+ * #sfx = f32, f64#
+ * #c_sfx = f, d#
+ * #isa_sfx = s, d#
+ * #sse_type = __m128, __m128d#
+ */
+/**begin repeat1
+ * #op = max, min#
+ * #neon_instr = fmax, fmin#
+ */
+#ifdef NPY_HAVE_SSE2
+#undef scalar_@op@_@c_sfx@
+NPY_FINLINE @type@ scalar_@op@_@c_sfx@(@type@ a, @type@ b) {
+ @sse_type@ va = _mm_set_s@isa_sfx@(a);
+ @sse_type@ vb = _mm_set_s@isa_sfx@(b);
+ @sse_type@ rv = _mm_@op@_s@isa_sfx@(va, vb);
+ // X86 handel second operand
+ @sse_type@ nn = _mm_cmpord_s@isa_sfx@(va, va);
+ #ifdef NPY_HAVE_SSE41
+ rv = _mm_blendv_p@isa_sfx@(va, rv, nn);
+ #else
+ rv = _mm_xor_p@isa_sfx@(va, _mm_and_p@isa_sfx@(_mm_xor_p@isa_sfx@(va, rv), nn));
+ #endif
+ return _mm_cvts@isa_sfx@_@sfx@(rv);
+}
+#endif // SSE2
+#ifdef __aarch64__
+#undef scalar_@op@_@c_sfx@
+NPY_FINLINE @type@ scalar_@op@_@c_sfx@(@type@ a, @type@ b) {
+ @type@ result = 0;
+ __asm(
+ "@neon_instr@ %@isa_sfx@[result], %@isa_sfx@[a], %@isa_sfx@[b]"
+ : [result] "=w" (result)
+ : [a] "w" (a), [b] "w" (b)
+ );
+ return result;
+}
+#endif // __aarch64__
+/**end repeat1**/
+/**end repeat**/
+#endif // NPY_DISABLE_OPTIMIZATION
+// mapping to double if its possible
+#if NPY_BITSOF_DOUBLE == NPY_BITSOF_LONGDOUBLE
+/**begin repeat
+ * #op = max, min, maxp, minp#
+ */
+ #undef scalar_@op@_l
+ #define scalar_@op@_l scalar_@op@_d
+/**end repeat**/
+#endif
+
+/*******************************************************************************
+ ** extra SIMD intrinsics
+ ******************************************************************************/
+
+#if NPY_SIMD
+/**begin repeat
+ * #sfx = s8, u8, s16, u16, s32, u32, s64, u64#
+ * #is_64 = 0*6, 1*2#
+ */
+#if defined(NPY_HAVE_ASIMD) && defined(__aarch64__)
+ #if !@is_64@
+ #define npyv_reduce_min_@sfx@ vminvq_@sfx@
+ #define npyv_reduce_max_@sfx@ vmaxvq_@sfx@
+ #else
+ NPY_FINLINE npyv_lanetype_@sfx@ npyv_reduce_min_@sfx@(npyv_@sfx@ v)
+ {
+ npyv_lanetype_@sfx@ a = vgetq_lane_@sfx@(v, 0);
+ npyv_lanetype_@sfx@ b = vgetq_lane_@sfx@(v, 1);
+ npyv_lanetype_@sfx@ result = (a < b) ? a : b;
+ return result;
+ }
+ NPY_FINLINE npyv_lanetype_@sfx@ npyv_reduce_max_@sfx@(npyv_@sfx@ v)
+ {
+ npyv_lanetype_@sfx@ a = vgetq_lane_@sfx@(v, 0);
+ npyv_lanetype_@sfx@ b = vgetq_lane_@sfx@(v, 1);
+ npyv_lanetype_@sfx@ result = (a > b) ? a : b;
+ return result;
+ }
+ #endif // !@is_64@
+#else
+ /**begin repeat1
+ * #intrin = min, max#
+ */
+ NPY_FINLINE npyv_lanetype_@sfx@ npyv_reduce_@intrin@_@sfx@(npyv_@sfx@ v)
+ {
+ npyv_lanetype_@sfx@ NPY_DECL_ALIGNED(NPY_SIMD_WIDTH) s[npyv_nlanes_@sfx@];
+ npyv_storea_@sfx@(s, v);
+ npyv_lanetype_@sfx@ result = s[0];
+ for(int i=1; i<npyv_nlanes_@sfx@; ++i){
+ result = scalar_@intrin@_i(result, s[i]);
+ }
+ return result;
+ }
+ /**end repeat1**/
+#endif
+/**end repeat**/
+#endif // NPY_SIMD
+
+/**begin repeat
+ * #sfx = f32, f64#
+ * #bsfx = b32, b64#
+ * #simd_chk = NPY_SIMD, NPY_SIMD_F64#
+ * #scalar_sfx = f, d#
+ */
+#if @simd_chk@
+#if defined(NPY_HAVE_ASIMD) && defined(__aarch64__)
+ #define npyv_minn_@sfx@ vminq_@sfx@
+ #define npyv_maxn_@sfx@ vmaxq_@sfx@
+ #define npyv_reduce_minn_@sfx@ vminvq_@sfx@
+ #define npyv_reduce_maxn_@sfx@ vmaxvq_@sfx@
+ #define npyv_reduce_minp_@sfx@ vminnmvq_@sfx@
+ #define npyv_reduce_maxp_@sfx@ vmaxnmvq_@sfx@
+#else
+ /**begin repeat1
+ * #intrin = min, max#
+ */
+ // propagates NaNs
+ NPY_FINLINE npyv_@sfx@ npyv_@intrin@n_@sfx@(npyv_@sfx@ a, npyv_@sfx@ b)
+ {
+ npyv_@sfx@ result = npyv_@intrin@_@sfx@(a, b);
+ // result = npyv_select_@sfx@(npyv_notnan_@sfx@(b), result, b);
+ // X86 handle second operand
+ #ifndef NPY_HAVE_SSE2
+ result = npyv_select_@sfx@(npyv_notnan_@sfx@(b), result, b);
+ #endif
+ result = npyv_select_@sfx@(npyv_notnan_@sfx@(a), result, a);
+ return result;
+ }
+ /**end repeat1**/
+ /**begin repeat1
+ * #intrin = minn, maxn, minp, maxp#
+ * #scalar_intrin = min, max, minp, maxp#
+ */
+ NPY_FINLINE npyv_lanetype_@sfx@ npyv_reduce_@intrin@_@sfx@(npyv_@sfx@ v)
+ {
+ npyv_lanetype_@sfx@ NPY_DECL_ALIGNED(NPY_SIMD_WIDTH) s[npyv_nlanes_@sfx@];
+ npyv_storea_@sfx@(s, v);
+ npyv_lanetype_@sfx@ result = s[0];
+ for(int i=1; i<npyv_nlanes_@sfx@; ++i){
+ result = scalar_@scalar_intrin@_@scalar_sfx@(result, s[i]);
+ }
+ return result;
+ }
+ /**end repeat1**/
+#endif
+#endif // simd_chk
+/**end repeat**/
+
+/*******************************************************************************
+ ** Defining the SIMD kernels
+ ******************************************************************************/
+/**begin repeat
+ * #sfx = s8, u8, s16, u16, s32, u32, s64, u64, f32, f64#
+ * #simd_chk = NPY_SIMD*9, NPY_SIMD_F64#
+ * #is_fp = 0*8, 1, 1#
+ * #scalar_sfx = i*8, f, d#
+ */
+/**begin repeat1
+ * # intrin = max, min, maxp, minp#
+ * # fp_only = 0, 0, 1, 1#
+ */
+#define SCALAR_OP scalar_@intrin@_@scalar_sfx@
+#if @simd_chk@ && (!@fp_only@ || (@is_fp@ && @fp_only@))
+
+#if @is_fp@ && !@fp_only@
+ #define V_INTRIN npyv_@intrin@n_@sfx@ // propagates NaNs
+ #define V_REDUCE_INTRIN npyv_reduce_@intrin@n_@sfx@
+#else
+ #define V_INTRIN npyv_@intrin@_@sfx@
+ #define V_REDUCE_INTRIN npyv_reduce_@intrin@_@sfx@
+#endif
+
+// contiguous input.
+static inline void
+simd_reduce_c_@intrin@_@sfx@(const npyv_lanetype_@sfx@ *ip, npyv_lanetype_@sfx@ *op1, npy_intp len)
+{
+ if (len < 1) {
+ return;
+ }
+ const int vstep = npyv_nlanes_@sfx@;
+ const int wstep = vstep*8;
+ npyv_@sfx@ acc = npyv_setall_@sfx@(op1[0]);
+ for (; len >= wstep; len -= wstep, ip += wstep) {
+ #ifdef NPY_HAVE_SSE2
+ NPY_PREFETCH(ip + wstep, 0, 3);
+ #endif
+ npyv_@sfx@ v0 = npyv_load_@sfx@(ip + vstep * 0);
+ npyv_@sfx@ v1 = npyv_load_@sfx@(ip + vstep * 1);
+ npyv_@sfx@ v2 = npyv_load_@sfx@(ip + vstep * 2);
+ npyv_@sfx@ v3 = npyv_load_@sfx@(ip + vstep * 3);
+
+ npyv_@sfx@ v4 = npyv_load_@sfx@(ip + vstep * 4);
+ npyv_@sfx@ v5 = npyv_load_@sfx@(ip + vstep * 5);
+ npyv_@sfx@ v6 = npyv_load_@sfx@(ip + vstep * 6);
+ npyv_@sfx@ v7 = npyv_load_@sfx@(ip + vstep * 7);
+
+ npyv_@sfx@ r01 = V_INTRIN(v0, v1);
+ npyv_@sfx@ r23 = V_INTRIN(v2, v3);
+ npyv_@sfx@ r45 = V_INTRIN(v4, v5);
+ npyv_@sfx@ r67 = V_INTRIN(v6, v7);
+ acc = V_INTRIN(acc, V_INTRIN(V_INTRIN(r01, r23), V_INTRIN(r45, r67)));
+ }
+ for (; len >= vstep; len -= vstep, ip += vstep) {
+ acc = V_INTRIN(acc, npyv_load_@sfx@(ip));
+ }
+ npyv_lanetype_@sfx@ r = V_REDUCE_INTRIN(acc);
+ // Scalar - finish up any remaining iterations
+ for (; len > 0; --len, ++ip) {
+ const npyv_lanetype_@sfx@ in2 = *ip;
+ r = SCALAR_OP(r, in2);
+ }
+ op1[0] = r;
+}
+
+// contiguous inputs and output.
+static inline void
+simd_binary_ccc_@intrin@_@sfx@(const npyv_lanetype_@sfx@ *ip1, const npyv_lanetype_@sfx@ *ip2,
+ npyv_lanetype_@sfx@ *op1, npy_intp len)
+{
+#if NPY_SIMD_WIDTH == 128
+ // Note, 6x unroll was chosen for best results on Apple M1
+ const int vectorsPerLoop = 6;
+#else
+ // To avoid memory bandwidth bottleneck
+ const int vectorsPerLoop = 2;
+#endif
+ const int elemPerVector = npyv_nlanes_@sfx@;
+ int elemPerLoop = vectorsPerLoop * elemPerVector;
+
+ npy_intp i = 0;
+
+ for (; (i+elemPerLoop) <= len; i += elemPerLoop) {
+ npyv_@sfx@ v0 = npyv_load_@sfx@(&ip1[i + 0 * elemPerVector]);
+ npyv_@sfx@ v1 = npyv_load_@sfx@(&ip1[i + 1 * elemPerVector]);
+ #if NPY_SIMD_WIDTH == 128
+ npyv_@sfx@ v2 = npyv_load_@sfx@(&ip1[i + 2 * elemPerVector]);
+ npyv_@sfx@ v3 = npyv_load_@sfx@(&ip1[i + 3 * elemPerVector]);
+ npyv_@sfx@ v4 = npyv_load_@sfx@(&ip1[i + 4 * elemPerVector]);
+ npyv_@sfx@ v5 = npyv_load_@sfx@(&ip1[i + 5 * elemPerVector]);
+ #endif
+ npyv_@sfx@ u0 = npyv_load_@sfx@(&ip2[i + 0 * elemPerVector]);
+ npyv_@sfx@ u1 = npyv_load_@sfx@(&ip2[i + 1 * elemPerVector]);
+ #if NPY_SIMD_WIDTH == 128
+ npyv_@sfx@ u2 = npyv_load_@sfx@(&ip2[i + 2 * elemPerVector]);
+ npyv_@sfx@ u3 = npyv_load_@sfx@(&ip2[i + 3 * elemPerVector]);
+ npyv_@sfx@ u4 = npyv_load_@sfx@(&ip2[i + 4 * elemPerVector]);
+ npyv_@sfx@ u5 = npyv_load_@sfx@(&ip2[i + 5 * elemPerVector]);
+ #endif
+ npyv_@sfx@ m0 = V_INTRIN(v0, u0);
+ npyv_@sfx@ m1 = V_INTRIN(v1, u1);
+ #if NPY_SIMD_WIDTH == 128
+ npyv_@sfx@ m2 = V_INTRIN(v2, u2);
+ npyv_@sfx@ m3 = V_INTRIN(v3, u3);
+ npyv_@sfx@ m4 = V_INTRIN(v4, u4);
+ npyv_@sfx@ m5 = V_INTRIN(v5, u5);
+ #endif
+ npyv_store_@sfx@(&op1[i + 0 * elemPerVector], m0);
+ npyv_store_@sfx@(&op1[i + 1 * elemPerVector], m1);
+ #if NPY_SIMD_WIDTH == 128
+ npyv_store_@sfx@(&op1[i + 2 * elemPerVector], m2);
+ npyv_store_@sfx@(&op1[i + 3 * elemPerVector], m3);
+ npyv_store_@sfx@(&op1[i + 4 * elemPerVector], m4);
+ npyv_store_@sfx@(&op1[i + 5 * elemPerVector], m5);
+ #endif
+ }
+ for (; (i+elemPerVector) <= len; i += elemPerVector) {
+ npyv_@sfx@ v0 = npyv_load_@sfx@(ip1 + i);
+ npyv_@sfx@ u0 = npyv_load_@sfx@(ip2 + i);
+ npyv_@sfx@ m0 = V_INTRIN(v0, u0);
+ npyv_store_@sfx@(op1 + i, m0);
+ }
+ // Scalar - finish up any remaining iterations
+ for (; i < len; ++i) {
+ const npyv_lanetype_@sfx@ in1 = ip1[i];
+ const npyv_lanetype_@sfx@ in2 = ip2[i];
+ op1[i] = SCALAR_OP(in1, in2);
+ }
+}
+// non-contiguous for float 32/64-bit memory access
+#if @is_fp@
+static inline void
+simd_binary_@intrin@_@sfx@(const npyv_lanetype_@sfx@ *ip1, npy_intp sip1,
+ const npyv_lanetype_@sfx@ *ip2, npy_intp sip2,
+ npyv_lanetype_@sfx@ *op1, npy_intp sop1,
+ npy_intp len)
+{
+ const int vstep = npyv_nlanes_@sfx@;
+ for (; len >= vstep; len -= vstep, ip1 += sip1*vstep,
+ ip2 += sip2*vstep, op1 += sop1*vstep
+ ) {
+ npyv_@sfx@ a, b;
+ if (sip1 == 1) {
+ a = npyv_load_@sfx@(ip1);
+ } else {
+ a = npyv_loadn_@sfx@(ip1, sip1);
+ }
+ if (sip2 == 1) {
+ b = npyv_load_@sfx@(ip2);
+ } else {
+ b = npyv_loadn_@sfx@(ip2, sip2);
+ }
+ npyv_@sfx@ r = V_INTRIN(a, b);
+ if (sop1 == 1) {
+ npyv_store_@sfx@(op1, r);
+ } else {
+ npyv_storen_@sfx@(op1, sop1, r);
+ }
+ }
+ for (; len > 0; --len, ip1 += sip1, ip2 += sip2, op1 += sop1) {
+ const npyv_lanetype_@sfx@ a = *ip1;
+ const npyv_lanetype_@sfx@ b = *ip2;
+ *op1 = SCALAR_OP(a, b);
+ }
+}
+#endif
+
+#undef V_INTRIN
+#undef V_REDUCE_INTRIN
+
+#endif // simd_chk && (!fp_only || (is_fp && fp_only))
+
+#undef SCALAR_OP
+/**end repeat1**/
+/**end repeat**/
+
+/*******************************************************************************
+ ** Defining ufunc inner functions
+ ******************************************************************************/
+/**begin repeat
+ * #TYPE = UBYTE, USHORT, UINT, ULONG, ULONGLONG,
+ * BYTE, SHORT, INT, LONG, LONGLONG,
+ * FLOAT, DOUBLE, LONGDOUBLE#
+ *
+ * #BTYPE = BYTE, SHORT, INT, LONG, LONGLONG,
+ * BYTE, SHORT, INT, LONG, LONGLONG,
+ * FLOAT, DOUBLE, LONGDOUBLE#
+ * #type = npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong,
+ * npy_byte, npy_short, npy_int, npy_long, npy_longlong,
+ * npy_float, npy_double, npy_longdouble#
+ *
+ * #is_fp = 0*10, 1*3#
+ * #is_unsigned = 1*5, 0*5, 0*3#
+ * #scalar_sfx = i*10, f, d, l#
+ */
+#undef TO_SIMD_SFX
+#if 0
+/**begin repeat1
+ * #len = 8, 16, 32, 64#
+ */
+#elif NPY_SIMD && NPY_BITSOF_@BTYPE@ == @len@
+ #if @is_fp@
+ #define TO_SIMD_SFX(X) X##_f@len@
+ #if NPY_BITSOF_@BTYPE@ == 64 && !NPY_SIMD_F64
+ #undef TO_SIMD_SFX
+ #endif
+ #elif @is_unsigned@
+ #define TO_SIMD_SFX(X) X##_u@len@
+ #else
+ #define TO_SIMD_SFX(X) X##_s@len@
+ #endif
+/**end repeat1**/
+#endif
+
+/**begin repeat1
+ * # kind = maximum, minimum, fmax, fmin#
+ * # intrin = max, min, maxp, minp#
+ * # fp_only = 0, 0, 1, 1#
+ */
+#if !@fp_only@ || (@is_fp@ && @fp_only@)
+#define SCALAR_OP scalar_@intrin@_@scalar_sfx@
+
+NPY_NO_EXPORT void NPY_CPU_DISPATCH_CURFX(@TYPE@_@kind@)
+(char **args, npy_intp const *dimensions, npy_intp const *steps, void *NPY_UNUSED(func))
+{
+ char *ip1 = args[0], *ip2 = args[1], *op1 = args[2];
+ npy_intp is1 = steps[0], is2 = steps[1], os1 = steps[2],
+ len = dimensions[0];
+ npy_intp i = 0;
+#ifdef TO_SIMD_SFX
+ #undef STYPE
+ #define STYPE TO_SIMD_SFX(npyv_lanetype)
+ if (IS_BINARY_REDUCE) {
+ // reduce and contiguous
+ if (is2 == sizeof(@type@)) {
+ TO_SIMD_SFX(simd_reduce_c_@intrin@)(
+ (STYPE*)ip2, (STYPE*)op1, len
+ );
+ goto clear_fp;
+ }
+ }
+ else if (!is_mem_overlap(ip1, is1, op1, os1, len) &&
+ !is_mem_overlap(ip2, is2, op1, os1, len)
+ ) {
+ // no overlap and operands are binary contiguous
+ if (IS_BINARY_CONT(@type@, @type@)) {
+ TO_SIMD_SFX(simd_binary_ccc_@intrin@)(
+ (STYPE*)ip1, (STYPE*)ip2, (STYPE*)op1, len
+ );
+ goto clear_fp;
+ }
+ // unroll scalars faster than non-contiguous vector load/store on Arm
+ #if !defined(NPY_HAVE_NEON) && @is_fp@
+ if (TO_SIMD_SFX(npyv_loadable_stride)(is1/sizeof(STYPE)) &&
+ TO_SIMD_SFX(npyv_loadable_stride)(is2/sizeof(STYPE)) &&
+ TO_SIMD_SFX(npyv_storable_stride)(os1/sizeof(STYPE))
+ ) {
+ TO_SIMD_SFX(simd_binary_@intrin@)(
+ (STYPE*)ip1, is1/sizeof(STYPE),
+ (STYPE*)ip2, is2/sizeof(STYPE),
+ (STYPE*)op1, os1/sizeof(STYPE), len
+ );
+ goto clear_fp;
+ }
+ #endif
+ }
+#endif // TO_SIMD_SFX
+#ifndef NPY_DISABLE_OPTIMIZATION
+ // scalar unrolls
+ if (IS_BINARY_REDUCE) {
+ // Note, 8x unroll was chosen for best results on Apple M1
+ npy_intp elemPerLoop = 8;
+ if((i+elemPerLoop) <= len){
+ @type@ m0 = *((@type@ *)(ip2 + (i + 0) * is2));
+ @type@ m1 = *((@type@ *)(ip2 + (i + 1) * is2));
+ @type@ m2 = *((@type@ *)(ip2 + (i + 2) * is2));
+ @type@ m3 = *((@type@ *)(ip2 + (i + 3) * is2));
+ @type@ m4 = *((@type@ *)(ip2 + (i + 4) * is2));
+ @type@ m5 = *((@type@ *)(ip2 + (i + 5) * is2));
+ @type@ m6 = *((@type@ *)(ip2 + (i + 6) * is2));
+ @type@ m7 = *((@type@ *)(ip2 + (i + 7) * is2));
+
+ i += elemPerLoop;
+ for(; (i+elemPerLoop)<=len; i+=elemPerLoop){
+ @type@ v0 = *((@type@ *)(ip2 + (i + 0) * is2));
+ @type@ v1 = *((@type@ *)(ip2 + (i + 1) * is2));
+ @type@ v2 = *((@type@ *)(ip2 + (i + 2) * is2));
+ @type@ v3 = *((@type@ *)(ip2 + (i + 3) * is2));
+ @type@ v4 = *((@type@ *)(ip2 + (i + 4) * is2));
+ @type@ v5 = *((@type@ *)(ip2 + (i + 5) * is2));
+ @type@ v6 = *((@type@ *)(ip2 + (i + 6) * is2));
+ @type@ v7 = *((@type@ *)(ip2 + (i + 7) * is2));
+
+ m0 = SCALAR_OP(m0, v0);
+ m1 = SCALAR_OP(m1, v1);
+ m2 = SCALAR_OP(m2, v2);
+ m3 = SCALAR_OP(m3, v3);
+ m4 = SCALAR_OP(m4, v4);
+ m5 = SCALAR_OP(m5, v5);
+ m6 = SCALAR_OP(m6, v6);
+ m7 = SCALAR_OP(m7, v7);
+ }
+
+ m0 = SCALAR_OP(m0, m1);
+ m2 = SCALAR_OP(m2, m3);
+ m4 = SCALAR_OP(m4, m5);
+ m6 = SCALAR_OP(m6, m7);
+
+ m0 = SCALAR_OP(m0, m2);
+ m4 = SCALAR_OP(m4, m6);
+
+ m0 = SCALAR_OP(m0, m4);
+
+ *((@type@ *)op1) = SCALAR_OP(*((@type@ *)op1), m0);
+ }
+ } else{
+ // Note, 4x unroll was chosen for best results on Apple M1
+ npy_intp elemPerLoop = 4;
+ for(; (i+elemPerLoop)<=len; i+=elemPerLoop){
+ /* Note, we can't just load all, do all ops, then store all here.
+ * Sometimes ufuncs are called with `accumulate`, which makes the
+ * assumption that previous iterations have finished before next
+ * iteration. For example, the output of iteration 2 depends on the
+ * result of iteration 1.
+ */
+
+ /**begin repeat2
+ * #unroll = 0, 1, 2, 3#
+ */
+ @type@ v@unroll@ = *((@type@ *)(ip1 + (i + @unroll@) * is1));
+ @type@ u@unroll@ = *((@type@ *)(ip2 + (i + @unroll@) * is2));
+ *((@type@ *)(op1 + (i + @unroll@) * os1)) = SCALAR_OP(v@unroll@, u@unroll@);
+ /**end repeat2**/
+ }
+ }
+#endif // NPY_DISABLE_OPTIMIZATION
+ ip1 += is1 * i;
+ ip2 += is2 * i;
+ op1 += os1 * i;
+ for (; i < len; ++i, ip1 += is1, ip2 += is2, op1 += os1) {
+ const @type@ in1 = *(@type@ *)ip1;
+ const @type@ in2 = *(@type@ *)ip2;
+ *((@type@ *)op1) = SCALAR_OP(in1, in2);
+ }
+#ifdef TO_SIMD_SFX
+clear_fp:
+ npyv_cleanup();
+#endif
+#if @is_fp@
+ npy_clear_floatstatus_barrier((char*)dimensions);
+#endif
+}
+
+#undef SCALAR_OP
+
+#endif // !fp_only || (is_fp && fp_only)
+/**end repeat1**/
+/**end repeat**/
+
diff --git a/numpy/core/src/umath/simd.inc.src b/numpy/core/src/umath/simd.inc.src
index 0e2c1ab8b..8b833ee56 100644
--- a/numpy/core/src/umath/simd.inc.src
+++ b/numpy/core/src/umath/simd.inc.src
@@ -95,38 +95,6 @@ run_unary_avx512f_@func@_@TYPE@(char **args, const npy_intp *dimensions, const n
*/
/**begin repeat1
- * #func = maximum, minimum#
- */
-
-#if defined HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS && defined NPY_HAVE_SSE2_INTRINSICS && @EXISTS@
-static NPY_INLINE NPY_GCC_TARGET_AVX512F void
-AVX512F_@func@_@TYPE@(char **args, npy_intp const *dimensions, npy_intp const *steps);
-#endif
-
-static NPY_INLINE int
-run_binary_avx512f_@func@_@TYPE@(char **args, npy_intp const *dimensions, npy_intp const *steps)
-{
-#if defined HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS && defined NPY_HAVE_SSE2_INTRINSICS && @EXISTS@
- if (IS_BINARY_SMALL_STEPS_AND_NOMEMOVERLAP) {
- AVX512F_@func@_@TYPE@(args, dimensions, steps);
- return 1;
- }
- else
- return 0;
-#endif
- return 0;
-}
-/**end repeat1**/
-
-/**end repeat**/
-
-/**begin repeat
- * #type = npy_float, npy_double, npy_longdouble#
- * #TYPE = FLOAT, DOUBLE, LONGDOUBLE#
- * #EXISTS = 1, 1, 0#
- */
-
-/**begin repeat1
* #func = isnan, isfinite, isinf, signbit#
*/
@@ -204,9 +172,9 @@ run_unary_@isa@_@func@_@TYPE@(char **args, npy_intp const *dimensions, npy_intp
*/
/**begin repeat1
- * #func = negative, minimum, maximum#
- * #check = IS_BLOCKABLE_UNARY, IS_BLOCKABLE_REDUCE*2 #
- * #name = unary, unary_reduce*2#
+ * #func = negative#
+ * #check = IS_BLOCKABLE_UNARY#
+ * #name = unary#
*/
#if @vector@ && defined NPY_HAVE_SSE2_INTRINSICS
@@ -678,55 +646,6 @@ sse2_negative_@TYPE@(@type@ * op, @type@ * ip, const npy_intp n)
}
/**end repeat1**/
-
-/**begin repeat1
- * #kind = maximum, minimum#
- * #VOP = max, min#
- * #OP = >=, <=#
- **/
-/* arguments swapped as unary reduce has the swapped compared to unary */
-static void
-sse2_@kind@_@TYPE@(@type@ * ip, @type@ * op, const npy_intp n)
-{
- const npy_intp stride = VECTOR_SIZE_BYTES / (npy_intp)sizeof(@type@);
- LOOP_BLOCK_ALIGN_VAR(ip, @type@, VECTOR_SIZE_BYTES) {
- /* Order of operations important for MSVC 2015 */
- *op = (*op @OP@ ip[i] || npy_isnan(*op)) ? *op : ip[i];
- }
- assert(n < stride || npy_is_aligned(&ip[i], VECTOR_SIZE_BYTES));
- if (i + 3 * stride <= n) {
- /* load the first elements */
- @vtype@ c1 = @vpre@_load_@vsuf@((@type@*)&ip[i]);
- @vtype@ c2 = @vpre@_load_@vsuf@((@type@*)&ip[i + stride]);
- i += 2 * stride;
-
- /* minps/minpd will set invalid flag if nan is encountered */
- npy_clear_floatstatus_barrier((char*)&c1);
- LOOP_BLOCKED(@type@, 2 * VECTOR_SIZE_BYTES) {
- @vtype@ v1 = @vpre@_load_@vsuf@((@type@*)&ip[i]);
- @vtype@ v2 = @vpre@_load_@vsuf@((@type@*)&ip[i + stride]);
- c1 = @vpre@_@VOP@_@vsuf@(c1, v1);
- c2 = @vpre@_@VOP@_@vsuf@(c2, v2);
- }
- c1 = @vpre@_@VOP@_@vsuf@(c1, c2);
-
- if (npy_get_floatstatus_barrier((char*)&c1) & NPY_FPE_INVALID) {
- *op = @nan@;
- }
- else {
- @type@ tmp = sse2_horizontal_@VOP@_@vtype@(c1);
- /* Order of operations important for MSVC 2015 */
- *op = (*op @OP@ tmp || npy_isnan(*op)) ? *op : tmp;
- }
- }
- LOOP_BLOCKED_END {
- /* Order of operations important for MSVC 2015 */
- *op = (*op @OP@ ip[i] || npy_isnan(*op)) ? *op : ip[i];
- }
- npy_clear_floatstatus_barrier((char*)op);
-}
-/**end repeat1**/
-
/**end repeat**/
/* bunch of helper functions used in ISA_exp/log_FLOAT*/
@@ -1200,107 +1119,6 @@ AVX512_SKX_@func@_@TYPE@(npy_bool* op, @type@* ip, const npy_intp array_size, co
/**end repeat**/
/**begin repeat
- * #type = npy_float, npy_double#
- * #TYPE = FLOAT, DOUBLE#
- * #num_lanes = 16, 8#
- * #vsuffix = ps, pd#
- * #mask = __mmask16, __mmask8#
- * #vtype1 = __m512, __m512d#
- * #vtype2 = __m512i, __m256i#
- * #scale = 4, 8#
- * #vindextype = __m512i, __m256i#
- * #vindexsize = 512, 256#
- * #vindexload = _mm512_loadu_si512, _mm256_loadu_si256#
- * #vtype2_load = _mm512_maskz_loadu_epi32, _mm256_maskz_loadu_epi32#
- * #vtype2_gather = _mm512_mask_i32gather_epi32, _mm256_mmask_i32gather_epi32#
- * #vtype2_store = _mm512_mask_storeu_epi32, _mm256_mask_storeu_epi32#
- * #vtype2_scatter = _mm512_mask_i32scatter_epi32, _mm256_mask_i32scatter_epi32#
- * #setzero = _mm512_setzero_epi32, _mm256_setzero_si256#
- */
-/**begin repeat1
- * #func = maximum, minimum#
- * #vectorf = max, min#
- */
-
-#if defined HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS && defined NPY_HAVE_SSE2_INTRINSICS
-static NPY_INLINE NPY_GCC_TARGET_AVX512F void
-AVX512F_@func@_@TYPE@(char **args, npy_intp const *dimensions, npy_intp const *steps)
-{
- const npy_intp stride_ip1 = steps[0]/(npy_intp)sizeof(@type@);
- const npy_intp stride_ip2 = steps[1]/(npy_intp)sizeof(@type@);
- const npy_intp stride_op = steps[2]/(npy_intp)sizeof(@type@);
- const npy_intp array_size = dimensions[0];
- npy_intp num_remaining_elements = array_size;
- @type@* ip1 = (@type@*) args[0];
- @type@* ip2 = (@type@*) args[1];
- @type@* op = (@type@*) args[2];
-
- @mask@ load_mask = avx512_get_full_load_mask_@vsuffix@();
-
- /*
- * Note: while generally indices are npy_intp, we ensure that our maximum index
- * will fit in an int32 as a precondition for this function via
- * IS_BINARY_SMALL_STEPS_AND_NOMEMOVERLAP
- */
-
- npy_int32 index_ip1[@num_lanes@], index_ip2[@num_lanes@], index_op[@num_lanes@];
- for (npy_int32 ii = 0; ii < @num_lanes@; ii++) {
- index_ip1[ii] = ii*stride_ip1;
- index_ip2[ii] = ii*stride_ip2;
- index_op[ii] = ii*stride_op;
- }
- @vindextype@ vindex_ip1 = @vindexload@((@vindextype@*)&index_ip1[0]);
- @vindextype@ vindex_ip2 = @vindexload@((@vindextype@*)&index_ip2[0]);
- @vindextype@ vindex_op = @vindexload@((@vindextype@*)&index_op[0]);
- @vtype1@ zeros_f = _mm512_setzero_@vsuffix@();
-
- while (num_remaining_elements > 0) {
- if (num_remaining_elements < @num_lanes@) {
- load_mask = avx512_get_partial_load_mask_@vsuffix@(
- num_remaining_elements, @num_lanes@);
- }
- @vtype1@ x1, x2;
- if (stride_ip1 == 1) {
- x1 = avx512_masked_load_@vsuffix@(load_mask, ip1);
- }
- else {
- x1 = avx512_masked_gather_@vsuffix@(zeros_f, ip1, vindex_ip1, load_mask);
- }
- if (stride_ip2 == 1) {
- x2 = avx512_masked_load_@vsuffix@(load_mask, ip2);
- }
- else {
- x2 = avx512_masked_gather_@vsuffix@(zeros_f, ip2, vindex_ip2, load_mask);
- }
-
- /*
- * when only one of the argument is a nan, the maxps/maxpd instruction
- * returns the second argument. The additional blend instruction fixes
- * this issue to conform with NumPy behaviour.
- */
- @mask@ nan_mask = _mm512_cmp_@vsuffix@_mask(x1, x1, _CMP_NEQ_UQ);
- @vtype1@ out = _mm512_@vectorf@_@vsuffix@(x1, x2);
- out = _mm512_mask_blend_@vsuffix@(nan_mask, out, x1);
-
- if (stride_op == 1) {
- _mm512_mask_storeu_@vsuffix@(op, load_mask, out);
- }
- else {
- /* scatter! */
- _mm512_mask_i32scatter_@vsuffix@(op, load_mask, vindex_op, out, @scale@);
- }
-
- ip1 += @num_lanes@*stride_ip1;
- ip2 += @num_lanes@*stride_ip2;
- op += @num_lanes@*stride_op;
- num_remaining_elements -= @num_lanes@;
- }
-}
-#endif
-/**end repeat1**/
-/**end repeat**/
-
-/**begin repeat
* #ISA = FMA, AVX512F#
* #isa = fma, avx512#
* #vsize = 256, 512#