summaryrefslogtreecommitdiff
path: root/numpy
diff options
context:
space:
mode:
authorJulian Taylor <jtaylor.debian@googlemail.com>2013-06-10 19:38:58 +0200
committerJulian Taylor <jtaylor.debian@googlemail.com>2013-06-10 23:03:06 +0200
commitaef286debfd11a62f1c337dea55624cee7fd4d9e (patch)
tree34cd255a107668c8442984b4af5e9882635fb0f0 /numpy
parentc9bf9b0b1c8a85391695d4ded39921e98c63257d (diff)
downloadnumpy-aef286debfd11a62f1c337dea55624cee7fd4d9e.tar.gz
ENH: enable unaligned loads on x86
x86 can handle unaligned load and there is no hand vectorized code in this file. It would be a serious compiler bug if it adds vectorization without checking for alignment. Enables fast complex128 copies which are unaligned on 32 bit gcc unless compiled with -malign-double.
Diffstat (limited to 'numpy')
-rw-r--r--numpy/core/src/multiarray/lowlevel_strided_loops.c.src6
1 files changed, 2 insertions, 4 deletions
diff --git a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
index 5c02c6e9f..bfa7ca224 100644
--- a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
+++ b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
@@ -21,12 +21,10 @@
#include "lowlevel_strided_loops.h"
/*
- * x86 platform may work with unaligned access, except when the
- * compiler uses aligned SSE instructions, which gcc does in some
- * cases. This is disabled for the time being.
+ * x86 platform works with unaligned access
*/
#if (defined(NPY_CPU_X86) || defined(NPY_CPU_AMD64))
-# define NPY_USE_UNALIGNED_ACCESS 0
+# define NPY_USE_UNALIGNED_ACCESS 1
#else
# define NPY_USE_UNALIGNED_ACCESS 0
#endif