summaryrefslogtreecommitdiff
path: root/numpy/distutils
diff options
context:
space:
mode:
Diffstat (limited to 'numpy/distutils')
-rw-r--r--numpy/distutils/__init__.py4
-rw-r--r--numpy/distutils/__init__.pyi4
-rw-r--r--numpy/distutils/ccompiler_opt.py106
-rw-r--r--numpy/distutils/checks/extra_avx512bw_mask.c18
-rw-r--r--numpy/distutils/checks/extra_avx512f_reduce.c41
-rw-r--r--numpy/distutils/checks/extra_vsx_asm.c36
-rw-r--r--numpy/distutils/command/autodist.py33
-rw-r--r--numpy/distutils/command/build.py12
-rw-r--r--numpy/distutils/command/build_clib.py67
-rw-r--r--numpy/distutils/command/build_ext.py69
-rw-r--r--numpy/distutils/command/config.py14
-rw-r--r--numpy/distutils/fcompiler/__init__.py63
-rw-r--r--numpy/distutils/fcompiler/environment.py5
-rw-r--r--numpy/distutils/fcompiler/fujitsu.py46
-rw-r--r--numpy/distutils/fcompiler/gnu.py17
-rw-r--r--numpy/distutils/fcompiler/nag.py2
-rw-r--r--numpy/distutils/fcompiler/nv.py55
-rw-r--r--numpy/distutils/fcompiler/pg.py2
-rw-r--r--numpy/distutils/mingw32ccompiler.py24
-rw-r--r--numpy/distutils/misc_util.py33
-rw-r--r--numpy/distutils/setup.py1
-rw-r--r--numpy/distutils/system_info.py161
-rw-r--r--numpy/distutils/tests/test_ccompiler_opt_conf.py51
-rw-r--r--numpy/distutils/tests/test_system_info.py35
-rw-r--r--numpy/distutils/unixccompiler.py10
25 files changed, 674 insertions, 235 deletions
diff --git a/numpy/distutils/__init__.py b/numpy/distutils/__init__.py
index 528b76eb5..79974d1c2 100644
--- a/numpy/distutils/__init__.py
+++ b/numpy/distutils/__init__.py
@@ -18,9 +18,7 @@ LAPACK, and for setting include paths and similar build options, please see
``site.cfg.example`` in the root of the NumPy repository or sdist.
"""
-# from setuptools v49.2.0, setuptools warns if distutils is imported first,
-# so pre-emptively import setuptools
-import setuptools
+
# Must import local ccompiler ASAP in order to get
# customized CCompiler.spawn effective.
from . import ccompiler
diff --git a/numpy/distutils/__init__.pyi b/numpy/distutils/__init__.pyi
new file mode 100644
index 000000000..3938d68de
--- /dev/null
+++ b/numpy/distutils/__init__.pyi
@@ -0,0 +1,4 @@
+from typing import Any
+
+# TODO: remove when the full numpy namespace is defined
+def __getattr__(name: str) -> Any: ...
diff --git a/numpy/distutils/ccompiler_opt.py b/numpy/distutils/ccompiler_opt.py
index b6b7939a2..20dbb5c00 100644
--- a/numpy/distutils/ccompiler_opt.py
+++ b/numpy/distutils/ccompiler_opt.py
@@ -152,6 +152,18 @@ class _Config:
By default(None), treated as True if the feature contains at
least one applicable flag. see `feature_can_autovec()`
+ "extra_checks": str or list, optional
+ Extra test case names for the CPU feature that need to be tested
+ against the compiler.
+
+ Each test case must have a C file named ``extra_xxxx.c``, where
+ ``xxxx`` is the case name in lower case, under 'conf_check_path'.
+ It should contain at least one intrinsic or function related to the test case.
+
+ If the compiler able to successfully compile the C file then `CCompilerOpt`
+ will add a C ``#define`` for it into the main dispatch header, e.g.
+ ```#define {conf_c_prefix}_XXXX`` where ``XXXX`` is the case name in upper case.
+
**NOTES**:
* space can be used as separator with options that supports "str or list"
* case-sensitive for all values and feature name must be in upper-case.
@@ -230,7 +242,10 @@ class _Config:
F16C = dict(interest=11, implies="AVX"),
FMA3 = dict(interest=12, implies="F16C"),
AVX2 = dict(interest=13, implies="F16C"),
- AVX512F = dict(interest=20, implies="FMA3 AVX2", implies_detect=False),
+ AVX512F = dict(
+ interest=20, implies="FMA3 AVX2", implies_detect=False,
+ extra_checks="AVX512F_REDUCE"
+ ),
AVX512CD = dict(interest=21, implies="AVX512F"),
AVX512_KNL = dict(
interest=40, implies="AVX512CD", group="AVX512ER AVX512PF",
@@ -243,7 +258,8 @@ class _Config:
),
AVX512_SKX = dict(
interest=42, implies="AVX512CD", group="AVX512VL AVX512BW AVX512DQ",
- detect="AVX512_SKX", implies_detect=False
+ detect="AVX512_SKX", implies_detect=False,
+ extra_checks="AVX512BW_MASK"
),
AVX512_CLX = dict(
interest=43, implies="AVX512_SKX", group="AVX512VNNI",
@@ -260,7 +276,7 @@ class _Config:
),
# IBM/Power
## Power7/ISA 2.06
- VSX = dict(interest=1, headers="altivec.h"),
+ VSX = dict(interest=1, headers="altivec.h", extra_checks="VSX_ASM"),
## Power8/ISA 2.07
VSX2 = dict(interest=2, implies="VSX", implies_detect=False),
## Power9/ISA 3.00
@@ -673,7 +689,7 @@ class _Distutils:
# intel and msvc compilers don't raise
# fatal errors when flags are wrong or unsupported
".*("
- "warning D9002|" # msvc, it should be work with any language.
+ "warning D9002|" # msvc, it should be work with any language.
"invalid argument for option" # intel
").*"
)
@@ -1137,7 +1153,7 @@ class _Feature:
continue
# list is used internally for these options
for option in (
- "implies", "group", "detect", "headers", "flags"
+ "implies", "group", "detect", "headers", "flags", "extra_checks"
) :
oval = feature.get(option)
if isinstance(oval, str):
@@ -1439,7 +1455,7 @@ class _Feature:
self.conf_check_path, "cpu_%s.c" % name.lower()
)
if not os.path.exists(test_path):
- self.dist_fatal("feature test file is not exist", path)
+ self.dist_fatal("feature test file is not exist", test_path)
test = self.dist_test(test_path, force_flags + self.cc_flags["werror"])
if not test:
@@ -1487,6 +1503,45 @@ class _Feature:
can = valid_flags and any(valid_flags)
return can
+ @_Cache.me
+ def feature_extra_checks(self, name):
+ """
+ Return a list of supported extra checks after testing them against
+ the compiler.
+
+ Parameters
+ ----------
+ names: str
+ CPU feature name in uppercase.
+ """
+ assert isinstance(name, str)
+ d = self.feature_supported[name]
+ extra_checks = d.get("extra_checks", [])
+ if not extra_checks:
+ return []
+
+ self.dist_log("Testing extra checks for feature '%s'" % name, extra_checks)
+ flags = self.feature_flags(name)
+ available = []
+ not_available = []
+ for chk in extra_checks:
+ test_path = os.path.join(
+ self.conf_check_path, "extra_%s.c" % chk.lower()
+ )
+ if not os.path.exists(test_path):
+ self.dist_fatal("extra check file does not exist", test_path)
+
+ is_supported = self.dist_test(test_path, flags + self.cc_flags["werror"])
+ if is_supported:
+ available.append(chk)
+ else:
+ not_available.append(chk)
+
+ if not_available:
+ self.dist_log("testing failed for checks", not_available, stderr=True)
+ return available
+
+
def feature_c_preprocessor(self, feature_name, tabs=0):
"""
Generate C preprocessor definitions and include headers of a CPU feature.
@@ -1520,14 +1575,18 @@ class _Feature:
prepr += [
"#include <%s>" % h for h in feature.get("headers", [])
]
- group = feature.get("group", [])
- for f in group:
- # Guard features in case of duplicate definitions
+
+ extra_defs = feature.get("group", [])
+ extra_defs += self.feature_extra_checks(feature_name)
+ for edef in extra_defs:
+ # Guard extra definitions in case of duplicate with
+ # another feature
prepr += [
- "#ifndef %sHAVE_%s" % (self.conf_c_prefix, f),
- "\t#define %sHAVE_%s 1" % (self.conf_c_prefix, f),
+ "#ifndef %sHAVE_%s" % (self.conf_c_prefix, edef),
+ "\t#define %sHAVE_%s 1" % (self.conf_c_prefix, edef),
"#endif",
]
+
if tabs > 0:
prepr = [('\t'*tabs) + l for l in prepr]
return '\n'.join(prepr)
@@ -2127,7 +2186,7 @@ class CCompilerOpt(_Config, _Distutils, _Cache, _CCompiler, _Feature, _Parse):
See Also
--------
- parse_targets() :
+ parse_targets :
Parsing the configuration statements of dispatch-able sources.
"""
to_compile = {}
@@ -2136,9 +2195,12 @@ class CCompilerOpt(_Config, _Distutils, _Cache, _CCompiler, _Feature, _Parse):
for src in sources:
output_dir = os.path.dirname(src)
- if src_dir and not output_dir.startswith(src_dir):
- output_dir = os.path.join(src_dir, output_dir)
+ if src_dir:
+ if not output_dir.startswith(src_dir):
+ output_dir = os.path.join(src_dir, output_dir)
if output_dir not in include_dirs:
+ # To allow including the generated config header(*.dispatch.h)
+ # by the dispatch-able sources
include_dirs.append(output_dir)
has_baseline, targets, extra_flags = self.parse_targets(src)
@@ -2266,6 +2328,12 @@ class CCompilerOpt(_Config, _Distutils, _Cache, _CCompiler, _Feature, _Parse):
baseline_rows.append((
"Flags", (' '.join(baseline_flags) if baseline_flags else "none")
))
+ extra_checks = []
+ for name in baseline_names:
+ extra_checks += self.feature_extra_checks(name)
+ baseline_rows.append((
+ "Extra checks", (' '.join(extra_checks) if extra_checks else "none")
+ ))
########## dispatch ##########
if self.cc_noopt:
@@ -2305,13 +2373,19 @@ class CCompilerOpt(_Config, _Distutils, _Cache, _CCompiler, _Feature, _Parse):
dispatch_rows.append(("Generated", ''))
for tar in self.feature_sorted(target_sources):
sources = target_sources[tar]
- name = tar if isinstance(tar, str) else '(%s)' % ' '.join(tar)
+ pretty_name = tar if isinstance(tar, str) else '(%s)' % ' '.join(tar)
flags = ' '.join(self.feature_flags(tar))
implies = ' '.join(self.feature_sorted(self.feature_implies(tar)))
detect = ' '.join(self.feature_detect(tar))
+ extra_checks = []
+ for name in ((tar,) if isinstance(tar, str) else tar):
+ extra_checks += self.feature_extra_checks(name)
+ extra_checks = (' '.join(extra_checks) if extra_checks else "none")
+
dispatch_rows.append(('', ''))
- dispatch_rows.append((name, implies))
+ dispatch_rows.append((pretty_name, implies))
dispatch_rows.append(("Flags", flags))
+ dispatch_rows.append(("Extra checks", extra_checks))
dispatch_rows.append(("Detect", detect))
for src in sources:
dispatch_rows.append(("", src))
diff --git a/numpy/distutils/checks/extra_avx512bw_mask.c b/numpy/distutils/checks/extra_avx512bw_mask.c
new file mode 100644
index 000000000..9cfd0c2a5
--- /dev/null
+++ b/numpy/distutils/checks/extra_avx512bw_mask.c
@@ -0,0 +1,18 @@
+#include <immintrin.h>
+/**
+ * Test BW mask operations due to:
+ * - MSVC has supported it since vs2019 see,
+ * https://developercommunity.visualstudio.com/content/problem/518298/missing-avx512bw-mask-intrinsics.html
+ * - Clang >= v8.0
+ * - GCC >= v7.1
+ */
+int main(void)
+{
+ __mmask64 m64 = _mm512_cmpeq_epi8_mask(_mm512_set1_epi8((char)1), _mm512_set1_epi8((char)1));
+ m64 = _kor_mask64(m64, m64);
+ m64 = _kxor_mask64(m64, m64);
+ m64 = _cvtu64_mask64(_cvtmask64_u64(m64));
+ m64 = _mm512_kunpackd(m64, m64);
+ m64 = (__mmask64)_mm512_kunpackw((__mmask32)m64, (__mmask32)m64);
+ return (int)_cvtmask64_u64(m64);
+}
diff --git a/numpy/distutils/checks/extra_avx512f_reduce.c b/numpy/distutils/checks/extra_avx512f_reduce.c
new file mode 100644
index 000000000..f979d504e
--- /dev/null
+++ b/numpy/distutils/checks/extra_avx512f_reduce.c
@@ -0,0 +1,41 @@
+#include <immintrin.h>
+/**
+ * The following intrinsics don't have direct native support but compilers
+ * tend to emulate them.
+ * They're usually supported by gcc >= 7.1, clang >= 4 and icc >= 19
+ */
+int main(void)
+{
+ __m512 one_ps = _mm512_set1_ps(1.0f);
+ __m512d one_pd = _mm512_set1_pd(1.0);
+ __m512i one_i64 = _mm512_set1_epi64(1.0);
+ // add
+ float sum_ps = _mm512_reduce_add_ps(one_ps);
+ double sum_pd = _mm512_reduce_add_pd(one_pd);
+ int sum_int = (int)_mm512_reduce_add_epi64(one_i64);
+ sum_int += (int)_mm512_reduce_add_epi32(one_i64);
+ // mul
+ sum_ps += _mm512_reduce_mul_ps(one_ps);
+ sum_pd += _mm512_reduce_mul_pd(one_pd);
+ sum_int += (int)_mm512_reduce_mul_epi64(one_i64);
+ sum_int += (int)_mm512_reduce_mul_epi32(one_i64);
+ // min
+ sum_ps += _mm512_reduce_min_ps(one_ps);
+ sum_pd += _mm512_reduce_min_pd(one_pd);
+ sum_int += (int)_mm512_reduce_min_epi32(one_i64);
+ sum_int += (int)_mm512_reduce_min_epu32(one_i64);
+ sum_int += (int)_mm512_reduce_min_epi64(one_i64);
+ // max
+ sum_ps += _mm512_reduce_max_ps(one_ps);
+ sum_pd += _mm512_reduce_max_pd(one_pd);
+ sum_int += (int)_mm512_reduce_max_epi32(one_i64);
+ sum_int += (int)_mm512_reduce_max_epu32(one_i64);
+ sum_int += (int)_mm512_reduce_max_epi64(one_i64);
+ // and
+ sum_int += (int)_mm512_reduce_and_epi32(one_i64);
+ sum_int += (int)_mm512_reduce_and_epi64(one_i64);
+ // or
+ sum_int += (int)_mm512_reduce_or_epi32(one_i64);
+ sum_int += (int)_mm512_reduce_or_epi64(one_i64);
+ return (int)sum_ps + (int)sum_pd + sum_int;
+}
diff --git a/numpy/distutils/checks/extra_vsx_asm.c b/numpy/distutils/checks/extra_vsx_asm.c
new file mode 100644
index 000000000..b73a6f438
--- /dev/null
+++ b/numpy/distutils/checks/extra_vsx_asm.c
@@ -0,0 +1,36 @@
+/**
+ * Testing ASM VSX register number fixer '%x<n>'
+ *
+ * old versions of CLANG doesn't support %x<n> in the inline asm template
+ * which fixes register number when using any of the register constraints wa, wd, wf.
+ *
+ * xref:
+ * - https://bugs.llvm.org/show_bug.cgi?id=31837
+ * - https://gcc.gnu.org/onlinedocs/gcc/Machine-Constraints.html
+ */
+#ifndef __VSX__
+ #error "VSX is not supported"
+#endif
+#include <altivec.h>
+
+#if (defined(__GNUC__) && !defined(vec_xl)) || (defined(__clang__) && !defined(__IBMC__))
+ #define vsx_ld vec_vsx_ld
+ #define vsx_st vec_vsx_st
+#else
+ #define vsx_ld vec_xl
+ #define vsx_st vec_xst
+#endif
+
+int main(void)
+{
+ float z4[] = {0, 0, 0, 0};
+ signed int zout[] = {0, 0, 0, 0};
+
+ __vector float vz4 = vsx_ld(0, z4);
+ __vector signed int asm_ret = vsx_ld(0, zout);
+
+ __asm__ ("xvcvspsxws %x0,%x1" : "=wa" (vz4) : "wa" (asm_ret));
+
+ vsx_st(asm_ret, 0, zout);
+ return zout[0];
+}
diff --git a/numpy/distutils/command/autodist.py b/numpy/distutils/command/autodist.py
index 8f6436004..b72d0cab1 100644
--- a/numpy/distutils/command/autodist.py
+++ b/numpy/distutils/command/autodist.py
@@ -46,15 +46,16 @@ def check_restrict(cmd):
return ''
-def check_compiler_gcc4(cmd):
- """Return True if the C compiler is GCC 4.x."""
+def check_compiler_gcc(cmd):
+ """Check if the compiler is GCC."""
+
cmd._check_compiler()
body = textwrap.dedent("""
int
main()
{
- #if (! defined __GNUC__) || (__GNUC__ < 4)
- #error gcc >= 4 required
+ #if (! defined __GNUC__)
+ #error gcc required
#endif
return 0;
}
@@ -62,6 +63,30 @@ def check_compiler_gcc4(cmd):
return cmd.try_compile(body, None, None)
+def check_gcc_version_at_least(cmd, major, minor=0, patchlevel=0):
+ """
+ Check that the gcc version is at least the specified version."""
+
+ cmd._check_compiler()
+ version = '.'.join([str(major), str(minor), str(patchlevel)])
+ body = textwrap.dedent("""
+ int
+ main()
+ {
+ #if (! defined __GNUC__) || (__GNUC__ < %(major)d) || \\
+ (__GNUC_MINOR__ < %(minor)d) || \\
+ (__GNUC_PATCHLEVEL__ < %(patchlevel)d)
+ #error gcc >= %(version)s required
+ #endif
+ return 0;
+ }
+ """)
+ kw = {'version': version, 'major': major, 'minor': minor,
+ 'patchlevel': patchlevel}
+
+ return cmd.try_compile(body % kw, None, None)
+
+
def check_gcc_function_attribute(cmd, attribute, name):
"""Return True if the given function attribute is supported."""
cmd._check_compiler()
diff --git a/numpy/distutils/command/build.py b/numpy/distutils/command/build.py
index 60ba4c917..a4fda537d 100644
--- a/numpy/distutils/command/build.py
+++ b/numpy/distutils/command/build.py
@@ -22,6 +22,8 @@ class build(old_build):
"specify a list of dispatched CPU optimizations"),
('disable-optimization', None,
"disable CPU optimized code(dispatch,simd,fast...)"),
+ ('simd-test=', None,
+ "specify a list of CPU optimizations to be tested against NumPy SIMD interface"),
]
help_options = old_build.help_options + [
@@ -36,6 +38,16 @@ class build(old_build):
self.cpu_baseline = "min"
self.cpu_dispatch = "max -xop -fma4" # drop AMD legacy features by default
self.disable_optimization = False
+ """
+ the '_simd' module is a very large. Adding more dispatched features
+ will increase binary size and compile time. By default we minimize
+ the targeted features to those most commonly used by the NumPy SIMD interface(NPYV),
+ NOTE: any specified features will be ignored if they're:
+ - part of the baseline(--cpu-baseline)
+ - not part of dispatch-able features(--cpu-dispatch)
+ - not supported by compiler or platform
+ """
+ self.simd_test = "BASELINE SSE2 SSE42 XOP FMA4 (FMA3 AVX2) AVX512F AVX512_SKX VSX VSX2 VSX3 NEON ASIMD"
def finalize_options(self):
build_scripts = self.build_scripts
diff --git a/numpy/distutils/command/build_clib.py b/numpy/distutils/command/build_clib.py
index 87345adbc..a0db6f31f 100644
--- a/numpy/distutils/command/build_clib.py
+++ b/numpy/distutils/command/build_clib.py
@@ -259,57 +259,56 @@ class build_clib(old_build_clib):
if requiref90:
self.mkpath(module_build_dir)
- dispatch_objects = []
- if not self.disable_optimization:
- dispatch_sources = [
- c_sources.pop(c_sources.index(src))
- for src in c_sources[:] if src.endswith(".dispatch.c")
- ]
- if dispatch_sources:
- if not self.inplace:
- build_src = self.get_finalized_command("build_src").build_src
- else:
- build_src = None
- dispatch_objects = self.compiler_opt.try_dispatch(
- dispatch_sources,
- output_dir=self.build_temp,
- src_dir=build_src,
- macros=macros,
- include_dirs=include_dirs,
- debug=self.debug,
- extra_postargs=extra_postargs
- )
- extra_args_baseopt = extra_postargs + self.compiler_opt.cpu_baseline_flags()
- else:
- extra_args_baseopt = extra_postargs
- macros.append(("NPY_DISABLE_OPTIMIZATION", 1))
-
if compiler.compiler_type == 'msvc':
# this hack works around the msvc compiler attributes
# problem, msvc uses its own convention :(
c_sources += cxx_sources
cxx_sources = []
+ # filtering C dispatch-table sources when optimization is not disabled,
+ # otherwise treated as normal sources.
+ copt_c_sources = []
+ copt_baseline_flags = []
+ copt_macros = []
+ if not self.disable_optimization:
+ copt_build_src = None if self.inplace else self.get_finalized_command("build_src").build_src
+ copt_c_sources = [
+ c_sources.pop(c_sources.index(src))
+ for src in c_sources[:] if src.endswith(".dispatch.c")
+ ]
+ copt_baseline_flags = self.compiler_opt.cpu_baseline_flags()
+ else:
+ copt_macros.append(("NPY_DISABLE_OPTIMIZATION", 1))
+
objects = []
+ if copt_c_sources:
+ log.info("compiling C dispatch-able sources")
+ objects += self.compiler_opt.try_dispatch(copt_c_sources,
+ output_dir=self.build_temp,
+ src_dir=copt_build_src,
+ macros=macros + copt_macros,
+ include_dirs=include_dirs,
+ debug=self.debug,
+ extra_postargs=extra_postargs)
+
if c_sources:
log.info("compiling C sources")
- objects = compiler.compile(c_sources,
- output_dir=self.build_temp,
- macros=macros,
- include_dirs=include_dirs,
- debug=self.debug,
- extra_postargs=extra_args_baseopt)
- objects.extend(dispatch_objects)
+ objects += compiler.compile(c_sources,
+ output_dir=self.build_temp,
+ macros=macros + copt_macros,
+ include_dirs=include_dirs,
+ debug=self.debug,
+ extra_postargs=extra_postargs + copt_baseline_flags)
if cxx_sources:
log.info("compiling C++ sources")
cxx_compiler = compiler.cxx_compiler()
cxx_objects = cxx_compiler.compile(cxx_sources,
output_dir=self.build_temp,
- macros=macros,
+ macros=macros + copt_macros,
include_dirs=include_dirs,
debug=self.debug,
- extra_postargs=extra_postargs)
+ extra_postargs=extra_postargs + copt_baseline_flags)
objects.extend(cxx_objects)
if f_sources or fmodule_sources:
diff --git a/numpy/distutils/command/build_ext.py b/numpy/distutils/command/build_ext.py
index b6557fcf6..ca6f8bcd2 100644
--- a/numpy/distutils/command/build_ext.py
+++ b/numpy/distutils/command/build_ext.py
@@ -19,8 +19,7 @@ from numpy.distutils.misc_util import (
has_cxx_sources, has_f_sources, is_sequence
)
from numpy.distutils.command.config_compiler import show_fortran_compilers
-from numpy.distutils.ccompiler_opt import new_ccompiler_opt
-
+from numpy.distutils.ccompiler_opt import new_ccompiler_opt, CCompilerOpt
class build_ext (old_build_ext):
@@ -39,6 +38,8 @@ class build_ext (old_build_ext):
"specify a list of dispatched CPU optimizations"),
('disable-optimization', None,
"disable CPU optimized code(dispatch,simd,fast...)"),
+ ('simd-test=', None,
+ "specify a list of CPU optimizations to be tested against NumPy SIMD interface"),
]
help_options = old_build_ext.help_options + [
@@ -56,6 +57,7 @@ class build_ext (old_build_ext):
self.cpu_baseline = None
self.cpu_dispatch = None
self.disable_optimization = None
+ self.simd_test = None
def finalize_options(self):
if self.parallel:
@@ -87,7 +89,9 @@ class build_ext (old_build_ext):
('cpu_baseline', 'cpu_baseline'),
('cpu_dispatch', 'cpu_dispatch'),
('disable_optimization', 'disable_optimization'),
+ ('simd_test', 'simd_test')
)
+ CCompilerOpt.conf_target_groups["simd_test"] = self.simd_test
def run(self):
if not self.extensions:
@@ -406,52 +410,49 @@ class build_ext (old_build_ext):
include_dirs = ext.include_dirs + get_numpy_include_dirs()
- dispatch_objects = []
+ # filtering C dispatch-table sources when optimization is not disabled,
+ # otherwise treated as normal sources.
+ copt_c_sources = []
+ copt_baseline_flags = []
+ copt_macros = []
if not self.disable_optimization:
- dispatch_sources = [
+ copt_build_src = None if self.inplace else self.get_finalized_command("build_src").build_src
+ copt_c_sources = [
c_sources.pop(c_sources.index(src))
for src in c_sources[:] if src.endswith(".dispatch.c")
]
- if dispatch_sources:
- if not self.inplace:
- build_src = self.get_finalized_command("build_src").build_src
- else:
- build_src = None
- dispatch_objects = self.compiler_opt.try_dispatch(
- dispatch_sources,
- output_dir=output_dir,
- src_dir=build_src,
- macros=macros,
- include_dirs=include_dirs,
- debug=self.debug,
- extra_postargs=extra_args,
- **kws
- )
- extra_args_baseopt = extra_args + self.compiler_opt.cpu_baseline_flags()
+ copt_baseline_flags = self.compiler_opt.cpu_baseline_flags()
else:
- extra_args_baseopt = extra_args
- macros.append(("NPY_DISABLE_OPTIMIZATION", 1))
+ copt_macros.append(("NPY_DISABLE_OPTIMIZATION", 1))
c_objects = []
+ if copt_c_sources:
+ log.info("compiling C dispatch-able sources")
+ c_objects += self.compiler_opt.try_dispatch(copt_c_sources,
+ output_dir=output_dir,
+ src_dir=copt_build_src,
+ macros=macros + copt_macros,
+ include_dirs=include_dirs,
+ debug=self.debug,
+ extra_postargs=extra_args,
+ **kws)
if c_sources:
log.info("compiling C sources")
- c_objects = self.compiler.compile(c_sources,
- output_dir=output_dir,
- macros=macros,
- include_dirs=include_dirs,
- debug=self.debug,
- extra_postargs=extra_args_baseopt,
- **kws)
- c_objects.extend(dispatch_objects)
-
+ c_objects += self.compiler.compile(c_sources,
+ output_dir=output_dir,
+ macros=macros + copt_macros,
+ include_dirs=include_dirs,
+ debug=self.debug,
+ extra_postargs=extra_args + copt_baseline_flags,
+ **kws)
if cxx_sources:
log.info("compiling C++ sources")
c_objects += cxx_compiler.compile(cxx_sources,
output_dir=output_dir,
- macros=macros,
+ macros=macros + copt_macros,
include_dirs=include_dirs,
debug=self.debug,
- extra_postargs=extra_args,
+ extra_postargs=extra_args + copt_baseline_flags,
**kws)
extra_postargs = []
@@ -559,7 +560,7 @@ class build_ext (old_build_ext):
unlinkable_fobjects = list(unlinkable_fobjects)
# Expand possible fake static libraries to objects
- for lib in list(libraries):
+ for lib in libraries:
for libdir in library_dirs:
fake_lib = os.path.join(libdir, lib + '.fobjects')
if os.path.isfile(fake_lib):
diff --git a/numpy/distutils/command/config.py b/numpy/distutils/command/config.py
index e54a54449..60881f4a3 100644
--- a/numpy/distutils/command/config.py
+++ b/numpy/distutils/command/config.py
@@ -20,9 +20,10 @@ from numpy.distutils.mingw32ccompiler import generate_manifest
from numpy.distutils.command.autodist import (check_gcc_function_attribute,
check_gcc_function_attribute_with_intrinsics,
check_gcc_variable_attribute,
+ check_gcc_version_at_least,
check_inline,
check_restrict,
- check_compiler_gcc4)
+ check_compiler_gcc)
LANG_EXT['f77'] = '.f'
LANG_EXT['f90'] = '.f90'
@@ -416,9 +417,9 @@ class config(old_config):
otherwise."""
return check_restrict(self)
- def check_compiler_gcc4(self):
- """Return True if the C compiler is gcc >= 4."""
- return check_compiler_gcc4(self)
+ def check_compiler_gcc(self):
+ """Return True if the C compiler is gcc"""
+ return check_compiler_gcc(self)
def check_gcc_function_attribute(self, attribute, name):
return check_gcc_function_attribute(self, attribute, name)
@@ -431,6 +432,11 @@ class config(old_config):
def check_gcc_variable_attribute(self, attribute):
return check_gcc_variable_attribute(self, attribute)
+ def check_gcc_version_at_least(self, major, minor=0, patchlevel=0):
+ """Return True if the GCC version is greater than or equal to the
+ specified version."""
+ return check_gcc_version_at_least(self, major, minor, patchlevel)
+
def get_output(self, body, headers=None, include_dirs=None,
libraries=None, library_dirs=None,
lang="c", use_tee=None):
diff --git a/numpy/distutils/fcompiler/__init__.py b/numpy/distutils/fcompiler/__init__.py
index 1c3069363..76f00ee91 100644
--- a/numpy/distutils/fcompiler/__init__.py
+++ b/numpy/distutils/fcompiler/__init__.py
@@ -20,8 +20,6 @@ import os
import sys
import re
-from numpy.compat import open_latin1
-
from distutils.sysconfig import get_python_lib
from distutils.fancy_getopt import FancyGetopt
from distutils.errors import DistutilsModuleError, \
@@ -745,8 +743,8 @@ _default_compilers = (
('win32', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95',
'intelvem', 'intelem', 'flang')),
('cygwin.*', ('gnu', 'intelv', 'absoft', 'compaqv', 'intelev', 'gnu95', 'g95')),
- ('linux.*', ('gnu95', 'intel', 'lahey', 'pg', 'absoft', 'nag', 'vast', 'compaq',
- 'intele', 'intelem', 'gnu', 'g95', 'pathf95', 'nagfor')),
+ ('linux.*', ('gnu95', 'intel', 'lahey', 'pg', 'nv', 'absoft', 'nag', 'vast', 'compaq',
+ 'intele', 'intelem', 'gnu', 'g95', 'pathf95', 'nagfor', 'fujitsu')),
('darwin.*', ('gnu95', 'nag', 'absoft', 'ibm', 'intel', 'gnu', 'g95', 'pg')),
('sunos.*', ('sun', 'gnu', 'gnu95', 'g95')),
('irix.*', ('mips', 'gnu', 'gnu95',)),
@@ -975,29 +973,27 @@ def is_free_format(file):
# f90 allows both fixed and free format, assuming fixed unless
# signs of free format are detected.
result = 0
- f = open_latin1(file, 'r')
- line = f.readline()
- n = 10000 # the number of non-comment lines to scan for hints
- if _has_f_header(line):
- n = 0
- elif _has_f90_header(line):
- n = 0
- result = 1
- while n>0 and line:
- line = line.rstrip()
- if line and line[0]!='!':
- n -= 1
- if (line[0]!='\t' and _free_f90_start(line[:5])) or line[-1:]=='&':
- result = 1
- break
+ with open(file, encoding='latin1') as f:
line = f.readline()
- f.close()
+ n = 10000 # the number of non-comment lines to scan for hints
+ if _has_f_header(line):
+ n = 0
+ elif _has_f90_header(line):
+ n = 0
+ result = 1
+ while n>0 and line:
+ line = line.rstrip()
+ if line and line[0]!='!':
+ n -= 1
+ if (line[0]!='\t' and _free_f90_start(line[:5])) or line[-1:]=='&':
+ result = 1
+ break
+ line = f.readline()
return result
def has_f90_header(src):
- f = open_latin1(src, 'r')
- line = f.readline()
- f.close()
+ with open(src, encoding='latin1') as f:
+ line = f.readline()
return _has_f90_header(line) or _has_fix_header(line)
_f77flags_re = re.compile(r'(c|)f77flags\s*\(\s*(?P<fcname>\w+)\s*\)\s*=\s*(?P<fflags>.*)', re.I)
@@ -1008,17 +1004,16 @@ def get_f77flags(src):
Return a dictionary {<fcompiler type>:<f77 flags>}.
"""
flags = {}
- f = open_latin1(src, 'r')
- i = 0
- for line in f:
- i += 1
- if i>20: break
- m = _f77flags_re.match(line)
- if not m: continue
- fcname = m.group('fcname').strip()
- fflags = m.group('fflags').strip()
- flags[fcname] = split_quoted(fflags)
- f.close()
+ with open(src, encoding='latin1') as f:
+ i = 0
+ for line in f:
+ i += 1
+ if i>20: break
+ m = _f77flags_re.match(line)
+ if not m: continue
+ fcname = m.group('fcname').strip()
+ fflags = m.group('fflags').strip()
+ flags[fcname] = split_quoted(fflags)
return flags
# TODO: implement get_f90flags and use it in _compile similarly to get_f77flags
diff --git a/numpy/distutils/fcompiler/environment.py b/numpy/distutils/fcompiler/environment.py
index 21a5be003..ecd4d9989 100644
--- a/numpy/distutils/fcompiler/environment.py
+++ b/numpy/distutils/fcompiler/environment.py
@@ -33,7 +33,10 @@ class EnvironmentConfig:
try:
conf_desc = self._conf_keys[name]
except KeyError:
- raise AttributeError(name)
+ raise AttributeError(
+ f"'EnvironmentConfig' object has no attribute '{name}'"
+ ) from None
+
return self._get_var(name, conf_desc)
def get(self, name, default=None):
diff --git a/numpy/distutils/fcompiler/fujitsu.py b/numpy/distutils/fcompiler/fujitsu.py
new file mode 100644
index 000000000..ddce67456
--- /dev/null
+++ b/numpy/distutils/fcompiler/fujitsu.py
@@ -0,0 +1,46 @@
+"""
+fujitsu
+
+Supports Fujitsu compiler function.
+This compiler is developed by Fujitsu and is used in A64FX on Fugaku.
+"""
+from numpy.distutils.fcompiler import FCompiler
+
+compilers = ['FujitsuFCompiler']
+
+class FujitsuFCompiler(FCompiler):
+ compiler_type = 'fujitsu'
+ description = 'Fujitsu Fortran Compiler'
+
+ possible_executables = ['frt']
+ version_pattern = r'frt \(FRT\) (?P<version>[a-z\d.]+)'
+ # $ frt --version
+ # frt (FRT) x.x.x yyyymmdd
+
+ executables = {
+ 'version_cmd' : ["<F77>", "--version"],
+ 'compiler_f77' : ["frt", "-Fixed"],
+ 'compiler_fix' : ["frt", "-Fixed"],
+ 'compiler_f90' : ["frt"],
+ 'linker_so' : ["frt", "-shared"],
+ 'archiver' : ["ar", "-cr"],
+ 'ranlib' : ["ranlib"]
+ }
+ pic_flags = ['-KPIC']
+ module_dir_switch = '-M'
+ module_include_switch = '-I'
+
+ def get_flags_opt(self):
+ return ['-O3']
+ def get_flags_debug(self):
+ return ['-g']
+ def runtime_library_dir_option(self, dir):
+ return f'-Wl,-rpath={dir}'
+ def get_libraries(self):
+ return ['fj90f', 'fj90i', 'fjsrcinfo']
+
+if __name__ == '__main__':
+ from distutils import log
+ from numpy.distutils import customized_fcompiler
+ log.set_verbosity(2)
+ print(customized_fcompiler('fujitsu').get_version())
diff --git a/numpy/distutils/fcompiler/gnu.py b/numpy/distutils/fcompiler/gnu.py
index caa08549e..68d1501ee 100644
--- a/numpy/distutils/fcompiler/gnu.py
+++ b/numpy/distutils/fcompiler/gnu.py
@@ -23,13 +23,6 @@ def is_win64():
return sys.platform == "win32" and platform.architecture()[0] == "64bit"
-if is_win64():
- #_EXTRAFLAGS = ["-fno-leading-underscore"]
- _EXTRAFLAGS = []
-else:
- _EXTRAFLAGS = []
-
-
class GnuFCompiler(FCompiler):
compiler_type = 'gnu'
compiler_aliases = ('g77', )
@@ -133,7 +126,7 @@ class GnuFCompiler(FCompiler):
target = '10.9'
s = f'Env. variable MACOSX_DEPLOYMENT_TARGET set to {target}'
warnings.warn(s, stacklevel=2)
- os.environ['MACOSX_DEPLOYMENT_TARGET'] = target
+ os.environ['MACOSX_DEPLOYMENT_TARGET'] = str(target)
opt.extend(['-undefined', 'dynamic_lookup', '-bundle'])
else:
opt.append("-shared")
@@ -238,7 +231,7 @@ class GnuFCompiler(FCompiler):
def _c_arch_flags(self):
""" Return detected arch flags from CFLAGS """
- from distutils import sysconfig
+ import sysconfig
try:
cflags = sysconfig.get_config_vars()['CFLAGS']
except KeyError:
@@ -297,11 +290,11 @@ class Gnu95FCompiler(GnuFCompiler):
executables = {
'version_cmd' : ["<F90>", "-dumpversion"],
'compiler_f77' : [None, "-Wall", "-g", "-ffixed-form",
- "-fno-second-underscore"] + _EXTRAFLAGS,
+ "-fno-second-underscore"],
'compiler_f90' : [None, "-Wall", "-g",
- "-fno-second-underscore"] + _EXTRAFLAGS,
+ "-fno-second-underscore"],
'compiler_fix' : [None, "-Wall", "-g","-ffixed-form",
- "-fno-second-underscore"] + _EXTRAFLAGS,
+ "-fno-second-underscore"],
'linker_so' : ["<F90>", "-Wall", "-g"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"],
diff --git a/numpy/distutils/fcompiler/nag.py b/numpy/distutils/fcompiler/nag.py
index 908e724e6..7df8ffe2c 100644
--- a/numpy/distutils/fcompiler/nag.py
+++ b/numpy/distutils/fcompiler/nag.py
@@ -19,7 +19,7 @@ class BaseNAGFCompiler(FCompiler):
def get_flags_opt(self):
return ['-O4']
def get_flags_arch(self):
- return ['']
+ return []
class NAGFCompiler(BaseNAGFCompiler):
diff --git a/numpy/distutils/fcompiler/nv.py b/numpy/distutils/fcompiler/nv.py
new file mode 100644
index 000000000..8e9f16835
--- /dev/null
+++ b/numpy/distutils/fcompiler/nv.py
@@ -0,0 +1,55 @@
+import sys
+
+from numpy.distutils.fcompiler import FCompiler
+
+compilers = ['NVHPCFCompiler']
+
+class NVHPCFCompiler(FCompiler):
+ """ NVIDIA High Performance Computing (HPC) SDK Fortran Compiler
+
+ https://developer.nvidia.com/hpc-sdk
+
+ Since august 2020 the NVIDIA HPC SDK includes the compilers formerly known as The Portland Group compilers,
+ https://www.pgroup.com/index.htm.
+ See also `numpy.distutils.fcompiler.pg`.
+ """
+
+ compiler_type = 'nv'
+ description = 'NVIDIA HPC SDK'
+ version_pattern = r'\s*(nvfortran|(pg(f77|f90|fortran)) \(aka nvfortran\)) (?P<version>[\d.-]+).*'
+
+ executables = {
+ 'version_cmd': ["<F90>", "-V"],
+ 'compiler_f77': ["nvfortran"],
+ 'compiler_fix': ["nvfortran", "-Mfixed"],
+ 'compiler_f90': ["nvfortran"],
+ 'linker_so': ["<F90>"],
+ 'archiver': ["ar", "-cr"],
+ 'ranlib': ["ranlib"]
+ }
+ pic_flags = ['-fpic']
+
+ module_dir_switch = '-module '
+ module_include_switch = '-I'
+
+ def get_flags(self):
+ opt = ['-Minform=inform', '-Mnosecond_underscore']
+ return self.pic_flags + opt
+
+ def get_flags_opt(self):
+ return ['-fast']
+
+ def get_flags_debug(self):
+ return ['-g']
+
+ def get_flags_linker_so(self):
+ return ["-shared", '-fpic']
+
+ def runtime_library_dir_option(self, dir):
+ return '-R%s' % dir
+
+if __name__ == '__main__':
+ from distutils import log
+ log.set_verbosity(2)
+ from numpy.distutils import customized_fcompiler
+ print(customized_fcompiler(compiler='nv').get_version())
diff --git a/numpy/distutils/fcompiler/pg.py b/numpy/distutils/fcompiler/pg.py
index eb628cb63..72442c4fe 100644
--- a/numpy/distutils/fcompiler/pg.py
+++ b/numpy/distutils/fcompiler/pg.py
@@ -31,7 +31,7 @@ class PGroupFCompiler(FCompiler):
'compiler_f77': ["pgfortran"],
'compiler_fix': ["pgfortran", "-Mfixed"],
'compiler_f90': ["pgfortran"],
- 'linker_so': ["pgfortran"],
+ 'linker_so': ["<F90>"],
'archiver': ["ar", "-cr"],
'ranlib': ["ranlib"]
}
diff --git a/numpy/distutils/mingw32ccompiler.py b/numpy/distutils/mingw32ccompiler.py
index 7cb6fadcc..3358695a8 100644
--- a/numpy/distutils/mingw32ccompiler.py
+++ b/numpy/distutils/mingw32ccompiler.py
@@ -8,6 +8,7 @@ Support code for building Python extensions on Windows.
"""
import os
+import platform
import sys
import subprocess
import re
@@ -265,16 +266,19 @@ def find_python_dll():
# search in the file system for possible candidates
major_version, minor_version = tuple(sys.version_info[:2])
- patterns = ['python%d%d.dll']
-
- for pat in patterns:
- dllname = pat % (major_version, minor_version)
- print("Looking for %s" % dllname)
- for folder in lib_dirs:
- dll = os.path.join(folder, dllname)
- if os.path.exists(dll):
- return dll
-
+ implementation = platform.python_implementation()
+ if implementation == 'CPython':
+ dllname = f'python{major_version}{minor_version}.dll'
+ elif implementation == 'PyPy':
+ dllname = f'libpypy{major_version}-c.dll'
+ else:
+ dllname = 'Unknown platform {implementation}'
+ print("Looking for %s" % dllname)
+ for folder in lib_dirs:
+ dll = os.path.join(folder, dllname)
+ if os.path.exists(dll):
+ return dll
+
raise ValueError("%s not found in %s" % (dllname, lib_dirs))
def dump_table(dll):
diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py
index 9f9e9f1ac..d3073ab2d 100644
--- a/numpy/distutils/misc_util.py
+++ b/numpy/distutils/misc_util.py
@@ -9,6 +9,7 @@ import subprocess
import shutil
import multiprocessing
import textwrap
+import importlib.util
import distutils
from distutils.errors import DistutilsError
@@ -1900,15 +1901,16 @@ class Configuration:
revision0 = f.read().strip()
branch_map = {}
- for line in file(branch_cache_fn, 'r'):
- branch1, revision1 = line.split()[:2]
- if revision1==revision0:
- branch0 = branch1
- try:
- revision1 = int(revision1)
- except ValueError:
- continue
- branch_map[branch1] = revision1
+ with open(branch_cache_fn, 'r') as f:
+ for line in f:
+ branch1, revision1 = line.split()[:2]
+ if revision1==revision0:
+ branch0 = branch1
+ try:
+ revision1 = int(revision1)
+ except ValueError:
+ continue
+ branch_map[branch1] = revision1
return branch_map.get(branch0)
@@ -1966,6 +1968,13 @@ class Configuration:
version = getattr(version_module, a, None)
if version is not None:
break
+
+ # Try if versioneer module
+ try:
+ version = version_module.get_versions()['version']
+ except AttributeError:
+ version = None
+
if version is not None:
break
@@ -2121,12 +2130,11 @@ def get_npy_pkg_dir():
environment, and using them when cross-compiling.
"""
- # XXX: import here for bootstrapping reasons
- import numpy
d = os.environ.get('NPY_PKG_CONFIG_PATH')
if d is not None:
return d
- d = os.path.join(os.path.dirname(numpy.__file__),
+ spec = importlib.util.find_spec('numpy')
+ d = os.path.join(os.path.dirname(spec.origin),
'core', 'lib', 'npy-pkg-config')
return d
@@ -2355,6 +2363,7 @@ def generate_config_py(target):
Examples
--------
+ >>> import numpy as np
>>> np.show_config()
blas_opt_info:
language = c
diff --git a/numpy/distutils/setup.py b/numpy/distutils/setup.py
index 798c3686f..522756fc9 100644
--- a/numpy/distutils/setup.py
+++ b/numpy/distutils/setup.py
@@ -8,6 +8,7 @@ def configuration(parent_package='',top_path=None):
config.add_data_files('site.cfg')
config.add_data_files('mingw/gfortran_vs2003_hack.c')
config.add_data_dir('checks')
+ config.add_data_files('*.pyi')
config.make_config_py()
return config
diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py
index df82683dc..13f9da0fb 100644
--- a/numpy/distutils/system_info.py
+++ b/numpy/distutils/system_info.py
@@ -171,7 +171,7 @@ from configparser import RawConfigParser as ConfigParser
from distutils.errors import DistutilsError
from distutils.dist import Distribution
-import distutils.sysconfig
+import sysconfig
from numpy.distutils import log
from distutils.util import get_platform
@@ -187,6 +187,7 @@ import distutils.ccompiler
import tempfile
import shutil
+__all__ = ['system_info']
# Determine number of bits
import platform
@@ -255,7 +256,7 @@ def libpaths(paths, bits):
if sys.platform == 'win32':
default_lib_dirs = ['C:\\',
- os.path.join(distutils.sysconfig.EXEC_PREFIX,
+ os.path.join(sysconfig.get_config_var('exec_prefix'),
'libs')]
default_runtime_dirs = []
default_include_dirs = []
@@ -289,7 +290,7 @@ if sys.platform == 'win32':
vcpkg = shutil.which('vcpkg')
if vcpkg:
vcpkg_dir = os.path.dirname(vcpkg)
- if platform.architecture() == '32bit':
+ if platform.architecture()[0] == '32bit':
specifier = 'x86'
else:
specifier = 'x64'
@@ -414,6 +415,89 @@ def get_standard_file(fname):
return filenames
+def _parse_env_order(base_order, env):
+ """ Parse an environment variable `env` by splitting with "," and only returning elements from `base_order`
+
+ This method will sequence the environment variable and check for their invidual elements in `base_order`.
+
+ The items in the environment variable may be negated via '^item' or '!itema,itemb'.
+ It must start with ^/! to negate all options.
+
+ Raises
+ ------
+ ValueError: for mixed negated and non-negated orders or multiple negated orders
+
+ Parameters
+ ----------
+ base_order : list of str
+ the base list of orders
+ env : str
+ the environment variable to be parsed, if none is found, `base_order` is returned
+
+ Returns
+ -------
+ allow_order : list of str
+ allowed orders in lower-case
+ unknown_order : list of str
+ for values not overlapping with `base_order`
+ """
+ order_str = os.environ.get(env, None)
+
+ # ensure all base-orders are lower-case (for easier comparison)
+ base_order = [order.lower() for order in base_order]
+ if order_str is None:
+ return base_order, []
+
+ neg = order_str.startswith('^') or order_str.startswith('!')
+ # Check format
+ order_str_l = list(order_str)
+ sum_neg = order_str_l.count('^') + order_str_l.count('!')
+ if neg:
+ if sum_neg > 1:
+ raise ValueError(f"Environment variable '{env}' may only contain a single (prefixed) negation: {order_str}")
+ # remove prefix
+ order_str = order_str[1:]
+ elif sum_neg > 0:
+ raise ValueError(f"Environment variable '{env}' may not mix negated an non-negated items: {order_str}")
+
+ # Split and lower case
+ orders = order_str.lower().split(',')
+
+ # to inform callee about non-overlapping elements
+ unknown_order = []
+
+ # if negated, we have to remove from the order
+ if neg:
+ allow_order = base_order.copy()
+
+ for order in orders:
+ if not order:
+ continue
+
+ if order not in base_order:
+ unknown_order.append(order)
+ continue
+
+ if order in allow_order:
+ allow_order.remove(order)
+
+ else:
+ allow_order = []
+
+ for order in orders:
+ if not order:
+ continue
+
+ if order not in base_order:
+ unknown_order.append(order)
+ continue
+
+ if order not in allow_order:
+ allow_order.append(order)
+
+ return allow_order, unknown_order
+
+
def get_info(name, notfound_action=0):
"""
notfound_action:
@@ -715,8 +799,7 @@ class system_info:
AliasedOptionError :
in case more than one of the options are found
"""
- found = map(lambda opt: self.cp.has_option(self.section, opt), options)
- found = list(found)
+ found = [self.cp.has_option(self.section, opt) for opt in options]
if sum(found) == 1:
return options[found.index(True)]
elif sum(found) == 0:
@@ -1766,24 +1849,11 @@ class lapack_opt_info(system_info):
return getattr(self, '_calc_info_{}'.format(name))()
def calc_info(self):
- user_order = os.environ.get(self.order_env_var_name, None)
- if user_order is None:
- lapack_order = self.lapack_order
- else:
- # the user has requested the order of the
- # check they are all in the available list, a COMMA SEPARATED list
- user_order = user_order.lower().split(',')
- non_existing = []
- lapack_order = []
- for order in user_order:
- if order in self.lapack_order:
- lapack_order.append(order)
- elif len(order) > 0:
- non_existing.append(order)
- if len(non_existing) > 0:
- raise ValueError("lapack_opt_info user defined "
- "LAPACK order has unacceptable "
- "values: {}".format(non_existing))
+ lapack_order, unknown_order = _parse_env_order(self.lapack_order, self.order_env_var_name)
+ if len(unknown_order) > 0:
+ raise ValueError("lapack_opt_info user defined "
+ "LAPACK order has unacceptable "
+ "values: {}".format(unknown_order))
for lapack in lapack_order:
if self._calc_info(lapack):
@@ -1911,22 +1981,9 @@ class blas_opt_info(system_info):
return getattr(self, '_calc_info_{}'.format(name))()
def calc_info(self):
- user_order = os.environ.get(self.order_env_var_name, None)
- if user_order is None:
- blas_order = self.blas_order
- else:
- # the user has requested the order of the
- # check they are all in the available list
- user_order = user_order.lower().split(',')
- non_existing = []
- blas_order = []
- for order in user_order:
- if order in self.blas_order:
- blas_order.append(order)
- elif len(order) > 0:
- non_existing.append(order)
- if len(non_existing) > 0:
- raise ValueError("blas_opt_info user defined BLAS order has unacceptable values: {}".format(non_existing))
+ blas_order, unknown_order = _parse_env_order(self.blas_order, self.order_env_var_name)
+ if len(unknown_order) > 0:
+ raise ValueError("blas_opt_info user defined BLAS order has unacceptable values: {}".format(unknown_order))
for blas in blas_order:
if self._calc_info(blas):
@@ -1962,6 +2019,14 @@ class blas64__opt_info(blas_ilp64_opt_info):
symbol_suffix = '64_'
+class cblas_info(system_info):
+ section = 'cblas'
+ dir_env_var = 'CBLAS'
+ # No default as it's used only in blas_info
+ _lib_names = []
+ notfounderror = BlasNotFoundError
+
+
class blas_info(system_info):
section = 'blas'
dir_env_var = 'BLAS'
@@ -1983,6 +2048,13 @@ class blas_info(system_info):
# often not installed when mingw is being used. This rough
# treatment is not desirable, but windows is tricky.
info['language'] = 'f77' # XXX: is it generally true?
+ # If cblas is given as an option, use those
+ cblas_info_obj = cblas_info()
+ cblas_opt = cblas_info_obj.get_option_single('cblas_libs', 'libraries')
+ cblas_libs = cblas_info_obj.get_libs(cblas_opt, None)
+ if cblas_libs:
+ info['libraries'] = cblas_libs + blas_libs
+ info['define_macros'] = [('HAVE_CBLAS', None)]
else:
lib = self.get_cblas_libs(info)
if lib is not None:
@@ -2499,13 +2571,12 @@ class _numpy_info(system_info):
except AttributeError:
pass
- include_dirs.append(distutils.sysconfig.get_python_inc(
- prefix=os.sep.join(prefix)))
+ include_dirs.append(sysconfig.get_path('include'))
except ImportError:
pass
- py_incl_dir = distutils.sysconfig.get_python_inc()
+ py_incl_dir = sysconfig.get_path('include')
include_dirs.append(py_incl_dir)
- py_pincl_dir = distutils.sysconfig.get_python_inc(plat_specific=True)
+ py_pincl_dir = sysconfig.get_path('platinclude')
if py_pincl_dir not in include_dirs:
include_dirs.append(py_pincl_dir)
for d in default_include_dirs:
@@ -2632,8 +2703,8 @@ class boost_python_info(system_info):
break
if not src_dir:
return
- py_incl_dirs = [distutils.sysconfig.get_python_inc()]
- py_pincl_dir = distutils.sysconfig.get_python_inc(plat_specific=True)
+ py_incl_dirs = [sysconfig.get_path('include')]
+ py_pincl_dir = sysconfig.get_path('platinclude')
if py_pincl_dir not in py_incl_dirs:
py_incl_dirs.append(py_pincl_dir)
srcs_dir = os.path.join(src_dir, 'libs', 'python', 'src')
diff --git a/numpy/distutils/tests/test_ccompiler_opt_conf.py b/numpy/distutils/tests/test_ccompiler_opt_conf.py
index 2f83a59e0..244748e58 100644
--- a/numpy/distutils/tests/test_ccompiler_opt_conf.py
+++ b/numpy/distutils/tests/test_ccompiler_opt_conf.py
@@ -66,11 +66,12 @@ class _TestConfFeatures(FakeCCompilerOpt):
self.test_implies(error_msg, search_in, feature_name, feature_dict)
self.test_group(error_msg, search_in, feature_name, feature_dict)
+ self.test_extra_checks(error_msg, search_in, feature_name, feature_dict)
def test_option_types(self, error_msg, option, val):
for tp, available in (
((str, list), (
- "implies", "headers", "flags", "group", "detect"
+ "implies", "headers", "flags", "group", "detect", "extra_checks"
)),
((str,), ("disable",)),
((int,), ("interest",)),
@@ -83,29 +84,25 @@ class _TestConfFeatures(FakeCCompilerOpt):
if not isinstance(val, tp):
error_tp = [t.__name__ for t in (*tp,)]
error_tp = ' or '.join(error_tp)
- raise AssertionError(error_msg + \
+ raise AssertionError(error_msg +
"expected '%s' type for option '%s' not '%s'" % (
error_tp, option, type(val).__name__
))
break
if not found_it:
- raise AssertionError(error_msg + \
- "invalid option name '%s'" % option
- )
+ raise AssertionError(error_msg + "invalid option name '%s'" % option)
def test_duplicates(self, error_msg, option, val):
if option not in (
- "implies", "headers", "flags", "group", "detect"
+ "implies", "headers", "flags", "group", "detect", "extra_checks"
) : return
if isinstance(val, str):
val = val.split()
if len(val) != len(set(val)):
- raise AssertionError(error_msg + \
- "duplicated values in option '%s'" % option
- )
+ raise AssertionError(error_msg + "duplicated values in option '%s'" % option)
def test_implies(self, error_msg, search_in, feature_name, feature_dict):
if feature_dict.get("disabled") is not None:
@@ -117,21 +114,15 @@ class _TestConfFeatures(FakeCCompilerOpt):
implies = implies.split()
if feature_name in implies:
- raise AssertionError(error_msg + \
- "feature implies itself"
- )
+ raise AssertionError(error_msg + "feature implies itself")
for impl in implies:
impl_dict = search_in.get(impl)
if impl_dict is not None:
if "disable" in impl_dict:
- raise AssertionError(error_msg + \
- "implies disabled feature '%s'" % impl
- )
+ raise AssertionError(error_msg + "implies disabled feature '%s'" % impl)
continue
- raise AssertionError(error_msg + \
- "implies non-exist feature '%s'" % impl
- )
+ raise AssertionError(error_msg + "implies non-exist feature '%s'" % impl)
def test_group(self, error_msg, search_in, feature_name, feature_dict):
if feature_dict.get("disabled") is not None:
@@ -146,10 +137,26 @@ class _TestConfFeatures(FakeCCompilerOpt):
impl_dict = search_in.get(f)
if not impl_dict or "disable" in impl_dict:
continue
- raise AssertionError(error_msg + \
- "in option '%s', '%s' already exists as a feature name" % (
- option, f
- ))
+ raise AssertionError(error_msg +
+ "in option 'group', '%s' already exists as a feature name" % f
+ )
+
+ def test_extra_checks(self, error_msg, search_in, feature_name, feature_dict):
+ if feature_dict.get("disabled") is not None:
+ return
+ extra_checks = feature_dict.get("extra_checks", "")
+ if not extra_checks:
+ return
+ if isinstance(extra_checks, str):
+ extra_checks = extra_checks.split()
+
+ for f in extra_checks:
+ impl_dict = search_in.get(f)
+ if not impl_dict or "disable" in impl_dict:
+ continue
+ raise AssertionError(error_msg +
+ "in option 'extra_checks', extra test case '%s' already exists as a feature name" % f
+ )
class TestConfFeatures(unittest.TestCase):
def __init__(self, methodName="runTest"):
diff --git a/numpy/distutils/tests/test_system_info.py b/numpy/distutils/tests/test_system_info.py
index 0768ffdde..ec15126f7 100644
--- a/numpy/distutils/tests/test_system_info.py
+++ b/numpy/distutils/tests/test_system_info.py
@@ -284,4 +284,37 @@ class TestSystemInfoReading:
assert info.get_lib_dirs() == lib_dirs
finally:
os.chdir(previousDir)
-
+
+
+def test_distutils_parse_env_order(monkeypatch):
+ from numpy.distutils.system_info import _parse_env_order
+ env = 'NPY_TESTS_DISTUTILS_PARSE_ENV_ORDER'
+
+ base_order = list('abcdef')
+
+ monkeypatch.setenv(env, 'b,i,e,f')
+ order, unknown = _parse_env_order(base_order, env)
+ assert len(order) == 3
+ assert order == list('bef')
+ assert len(unknown) == 1
+
+ # For when LAPACK/BLAS optimization is disabled
+ monkeypatch.setenv(env, '')
+ order, unknown = _parse_env_order(base_order, env)
+ assert len(order) == 0
+ assert len(unknown) == 0
+
+ for prefix in '^!':
+ monkeypatch.setenv(env, f'{prefix}b,i,e')
+ order, unknown = _parse_env_order(base_order, env)
+ assert len(order) == 4
+ assert order == list('acdf')
+ assert len(unknown) == 1
+
+ with pytest.raises(ValueError):
+ monkeypatch.setenv(env, 'b,^e,i')
+ _parse_env_order(base_order, env)
+
+ with pytest.raises(ValueError):
+ monkeypatch.setenv(env, '!b,^e,i')
+ _parse_env_order(base_order, env)
diff --git a/numpy/distutils/unixccompiler.py b/numpy/distutils/unixccompiler.py
index 5f36c439f..0cd2d243e 100644
--- a/numpy/distutils/unixccompiler.py
+++ b/numpy/distutils/unixccompiler.py
@@ -3,6 +3,8 @@ unixccompiler - can handle very long argument lists for ar.
"""
import os
+import sys
+import subprocess
from distutils.errors import CompileError, DistutilsExecError, LibError
from distutils.unixccompiler import UnixCCompiler
@@ -26,7 +28,8 @@ def UnixCCompiler__compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts
self.compiler_so = ccomp
# ensure OPT environment variable is read
if 'OPT' in os.environ:
- from distutils.sysconfig import get_config_vars
+ # XXX who uses this?
+ from sysconfig import get_config_vars
opt = " ".join(os.environ['OPT'].split())
gcv_opt = " ".join(get_config_vars('OPT')[0].split())
ccomp_s = " ".join(self.compiler_so)
@@ -55,6 +58,11 @@ def UnixCCompiler__compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts
# add commandline flags to dependency file
if deps:
+ # After running the compiler, the file created will be in EBCDIC
+ # but will not be tagged as such. This tags it so the file does not
+ # have multiple different encodings being written to it
+ if sys.platform == 'zos':
+ subprocess.check_output(['chtag', '-tc', 'IBM1047', obj + '.d'])
with open(obj + '.d', 'a') as f:
f.write(_commandline_dep_string(cc_args, extra_postargs, pp_opts))