summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.github/workflows/build_test.yml31
-rw-r--r--.github/workflows/codeql.yml6
-rw-r--r--.github/workflows/scorecards.yml2
-rw-r--r--doc/release/upcoming_changes/23357.improvement.rst9
-rw-r--r--doc/source/reference/routines.ma.rst15
-rw-r--r--meson.build2
-rw-r--r--numpy/core/setup.py42
-rw-r--r--numpy/core/src/common/simd/vec/memory.h14
-rw-r--r--numpy/core/src/npymath/npy_math_private.h4
-rw-r--r--numpy/core/tests/test_ufunc.py7
-rw-r--r--numpy/distutils/ccompiler_opt.py11
-rw-r--r--numpy/distutils/command/build_clib.py1
-rw-r--r--numpy/distutils/command/build_ext.py1
-rw-r--r--numpy/lib/npyio.py16
-rw-r--r--numpy/lib/npyio.pyi2
-rw-r--r--numpy/lib/tests/test_io.py19
-rw-r--r--numpy/linalg/setup.py2
-rw-r--r--numpy/ma/core.py23
-rwxr-xr-xsetup.py138
19 files changed, 254 insertions, 91 deletions
diff --git a/.github/workflows/build_test.yml b/.github/workflows/build_test.yml
index 0e82a9bc4..d43786250 100644
--- a/.github/workflows/build_test.yml
+++ b/.github/workflows/build_test.yml
@@ -427,3 +427,34 @@ jobs:
# ICL implies SKX, CLX and CNL
- name: Run SIMD tests (Ice Lake)
run: sde -icl -- python runtests.py -n -v -- -k test_simd
+
+ intel_spr_sde_test:
+ needs: [smoke_test]
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@24cb9080177205b6e8c946b17badbe402adc938f # v3.4.0
+ with:
+ submodules: recursive
+ fetch-depth: 0
+ - uses: actions/setup-python@d27e3f3d7c64b4bbf8e4abfb9b63b83e846e0435 # v4.5.0
+ with:
+ python-version: ${{ env.PYTHON_VERSION }}
+ - name: Install Intel SDE
+ run: |
+ curl -o /tmp/sde.tar.xz https://downloadmirror.intel.com/751535/sde-external-9.14.0-2022-10-25-lin.tar.xz
+ mkdir /tmp/sde && tar -xvf /tmp/sde.tar.xz -C /tmp/sde/
+ sudo mv /tmp/sde/* /opt/sde && sudo ln -s /opt/sde/sde64 /usr/bin/sde
+ - name: Install dependencies
+ run: |
+ python -m pip install -r test_requirements.txt
+ sudo apt install gcc-12 g++-12
+ - name: Build and install NumPy
+ run: |
+ export CC=/usr/bin/gcc-12
+ export CXX=/usr/bin/g++-12
+ python -m pip install -e .
+ # Run only a few tests, running everything in an SDE takes a long time
+ # Using pytest directly, unable to use python runtests.py -n -t ...
+ - name: Run linalg/ufunc/umath tests
+ run: |
+ sde -spr -- python -m pytest numpy/core/tests/test_umath* numpy/core/tests/test_ufunc.py numpy/linalg/tests/test_*
diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml
index 7860716d6..bb173796a 100644
--- a/.github/workflows/codeql.yml
+++ b/.github/workflows/codeql.yml
@@ -45,7 +45,7 @@ jobs:
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
- uses: github/codeql-action/init@04df1262e6247151b5ac09cd2c303ac36ad3f62b # v2.2.9
+ uses: github/codeql-action/init@d186a2a36cc67bfa1b860e6170d37fb9634742c7 # v2.2.11
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
@@ -55,7 +55,7 @@ jobs:
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
- uses: github/codeql-action/autobuild@04df1262e6247151b5ac09cd2c303ac36ad3f62b # v2.2.9
+ uses: github/codeql-action/autobuild@d186a2a36cc67bfa1b860e6170d37fb9634742c7 # v2.2.11
# â„šī¸ Command-line programs to run using the OS shell.
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
@@ -68,6 +68,6 @@ jobs:
# ./location_of_script_within_repo/buildscript.sh
- name: Perform CodeQL Analysis
- uses: github/codeql-action/analyze@04df1262e6247151b5ac09cd2c303ac36ad3f62b # v2.2.9
+ uses: github/codeql-action/analyze@d186a2a36cc67bfa1b860e6170d37fb9634742c7 # v2.2.11
with:
category: "/language:${{matrix.language}}"
diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml
index 03eabc365..203cff1a7 100644
--- a/.github/workflows/scorecards.yml
+++ b/.github/workflows/scorecards.yml
@@ -50,6 +50,6 @@ jobs:
# Upload the results to GitHub's code scanning dashboard.
- name: "Upload to code-scanning"
- uses: github/codeql-action/upload-sarif@04df1262e6247151b5ac09cd2c303ac36ad3f62b # v2.1.27
+ uses: github/codeql-action/upload-sarif@d186a2a36cc67bfa1b860e6170d37fb9634742c7 # v2.1.27
with:
sarif_file: results.sarif
diff --git a/doc/release/upcoming_changes/23357.improvement.rst b/doc/release/upcoming_changes/23357.improvement.rst
new file mode 100644
index 000000000..3b474146b
--- /dev/null
+++ b/doc/release/upcoming_changes/23357.improvement.rst
@@ -0,0 +1,9 @@
+Explicitly show keys of .npz file in repr
+-----------------------------------------
+``NpzFile`` shows keys of loaded .npz file when printed.
+
+.. code-block:: python
+
+ >>> npzfile = np.load('arr.npz')
+ >>> npzfile
+ NpzFile 'arr.npz' with keys arr_0, arr_1, arr_2, arr_3, arr_4...
diff --git a/doc/source/reference/routines.ma.rst b/doc/source/reference/routines.ma.rst
index d503cc243..fd22a74aa 100644
--- a/doc/source/reference/routines.ma.rst
+++ b/doc/source/reference/routines.ma.rst
@@ -31,6 +31,7 @@ From existing data
ma.fromfunction
ma.MaskedArray.copy
+ ma.diagflat
Ones and zeros
@@ -72,6 +73,9 @@ Inspecting the array
ma.isMaskedArray
ma.isMA
ma.isarray
+ ma.isin
+ ma.in1d
+ ma.unique
ma.MaskedArray.all
@@ -394,6 +398,17 @@ Clipping and rounding
ma.MaskedArray.clip
ma.MaskedArray.round
+Set operations
+~~~~~~~~~~~~~~
+.. autosummary::
+ :toctree: generated/
+
+
+ ma.intersect1d
+ ma.setdiff1d
+ ma.setxor1d
+ ma.union1d
+
Miscellanea
~~~~~~~~~~~
diff --git a/meson.build b/meson.build
index c1fc1dad8..47e71efc0 100644
--- a/meson.build
+++ b/meson.build
@@ -11,7 +11,7 @@ project(
'buildtype=debugoptimized',
'b_ndebug=if-release',
'c_std=c99',
- 'cpp_std=c++14',
+ 'cpp_std=c++17',
'blas=openblas',
'lapack=openblas',
'pkgconfig.relocatable=true',
diff --git a/numpy/core/setup.py b/numpy/core/setup.py
index 77561827a..680c2a5f6 100644
--- a/numpy/core/setup.py
+++ b/numpy/core/setup.py
@@ -405,7 +405,6 @@ def configuration(parent_package='',top_path=None):
exec_mod_from_location)
from numpy.distutils.system_info import (get_info, blas_opt_info,
lapack_opt_info)
- from numpy.distutils.ccompiler_opt import NPY_CXX_FLAGS
from numpy.version import release as is_released
config = Configuration('core', parent_package, top_path)
@@ -658,44 +657,6 @@ def configuration(parent_package='',top_path=None):
# but we cannot use add_installed_pkg_config here either, so we only
# update the substitution dictionary during npymath build
config_cmd = config.get_config_cmd()
- # Check that the toolchain works, to fail early if it doesn't
- # (avoid late errors with MATHLIB which are confusing if the
- # compiler does not work).
- for lang, test_code, note in (
- ('c', 'int main(void) { return 0;}', ''),
- ('c++', (
- 'int main(void)'
- '{ auto x = 0.0; return static_cast<int>(x); }'
- ), (
- 'note: A compiler with support for C++11 language '
- 'features is required.'
- )
- ),
- ):
- is_cpp = lang == 'c++'
- if is_cpp:
- # this a workaround to get rid of invalid c++ flags
- # without doing big changes to config.
- # c tested first, compiler should be here
- bk_c = config_cmd.compiler
- config_cmd.compiler = bk_c.cxx_compiler()
-
- # Check that Linux compiler actually support the default flags
- if hasattr(config_cmd.compiler, 'compiler'):
- config_cmd.compiler.compiler.extend(NPY_CXX_FLAGS)
- config_cmd.compiler.compiler_so.extend(NPY_CXX_FLAGS)
-
- st = config_cmd.try_link(test_code, lang=lang)
- if not st:
- # rerun the failing command in verbose mode
- config_cmd.compiler.verbose = True
- config_cmd.try_link(test_code, lang=lang)
- raise RuntimeError(
- f"Broken toolchain: cannot link a simple {lang.upper()} "
- f"program. {note}"
- )
- if is_cpp:
- config_cmd.compiler = bk_c
mlibs = check_mathlib(config_cmd)
posix_mlib = ' '.join(['-l%s' % l for l in mlibs])
@@ -1067,8 +1028,7 @@ def configuration(parent_package='',top_path=None):
common_deps,
libraries=['npymath'],
extra_objects=svml_objs,
- extra_info=extra_info,
- extra_cxx_compile_args=NPY_CXX_FLAGS)
+ extra_info=extra_info)
#######################################################################
# umath_tests module #
diff --git a/numpy/core/src/common/simd/vec/memory.h b/numpy/core/src/common/simd/vec/memory.h
index de78d02e3..1ad39cead 100644
--- a/numpy/core/src/common/simd/vec/memory.h
+++ b/numpy/core/src/common/simd/vec/memory.h
@@ -205,9 +205,9 @@ NPY_FINLINE npyv_s32 npyv_load_till_s32(const npy_int32 *ptr, npy_uintp nlane, n
assert(nlane > 0);
npyv_s32 vfill = npyv_setall_s32(fill);
#ifdef NPY_HAVE_VX
- const unsigned blane = (unsigned short)nlane;
+ const unsigned blane = (nlane > 4) ? 4 : nlane;
const npyv_u32 steps = npyv_set_u32(0, 1, 2, 3);
- const npyv_u32 vlane = npyv_setall_u32((unsigned)blane);
+ const npyv_u32 vlane = npyv_setall_u32(blane);
const npyv_b32 mask = vec_cmpgt(vlane, steps);
npyv_s32 a = vec_load_len(ptr, blane*4-1);
return vec_sel(vfill, a, mask);
@@ -233,8 +233,8 @@ NPY_FINLINE npyv_s32 npyv_load_till_s32(const npy_int32 *ptr, npy_uintp nlane, n
NPY_FINLINE npyv_s32 npyv_load_tillz_s32(const npy_int32 *ptr, npy_uintp nlane)
{
#ifdef NPY_HAVE_VX
- unsigned blane = ((unsigned short)nlane)*4 - 1;
- return vec_load_len(ptr, blane);
+ unsigned blane = (nlane > 4) ? 4 : nlane;
+ return vec_load_len(ptr, blane*4-1);
#else
return npyv_load_till_s32(ptr, nlane, 0);
#endif
@@ -252,7 +252,7 @@ NPY_FINLINE npyv_s64 npyv_load_till_s64(const npy_int64 *ptr, npy_uintp nlane, n
NPY_FINLINE npyv_s64 npyv_load_tillz_s64(const npy_int64 *ptr, npy_uintp nlane)
{
#ifdef NPY_HAVE_VX
- unsigned blane = (unsigned short)nlane;
+ unsigned blane = (nlane > 2) ? 2 : nlane;
return vec_load_len((const signed long long*)ptr, blane*8-1);
#else
return npyv_load_till_s64(ptr, nlane, 0);
@@ -354,7 +354,7 @@ NPY_FINLINE void npyv_store_till_s32(npy_int32 *ptr, npy_uintp nlane, npyv_s32 a
{
assert(nlane > 0);
#ifdef NPY_HAVE_VX
- unsigned blane = (unsigned short)nlane;
+ unsigned blane = (nlane > 4) ? 4 : nlane;
vec_store_len(a, ptr, blane*4-1);
#else
switch(nlane) {
@@ -378,7 +378,7 @@ NPY_FINLINE void npyv_store_till_s64(npy_int64 *ptr, npy_uintp nlane, npyv_s64 a
{
assert(nlane > 0);
#ifdef NPY_HAVE_VX
- unsigned blane = (unsigned short)nlane;
+ unsigned blane = (nlane > 2) ? 2 : nlane;
vec_store_len(a, (signed long long*)ptr, blane*8-1);
#else
if (nlane == 1) {
diff --git a/numpy/core/src/npymath/npy_math_private.h b/numpy/core/src/npymath/npy_math_private.h
index a474b3de3..20c94f98a 100644
--- a/numpy/core/src/npymath/npy_math_private.h
+++ b/numpy/core/src/npymath/npy_math_private.h
@@ -21,6 +21,7 @@
#include <Python.h>
#ifdef __cplusplus
#include <cmath>
+#include <complex>
using std::isgreater;
using std::isless;
#else
@@ -494,8 +495,9 @@ do { \
* Microsoft C defines _MSC_VER
* Intel compiler does not use MSVC complex types, but defines _MSC_VER by
* default.
+ * since c++17 msvc is no longer support them.
*/
-#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
+#if !defined(__cplusplus) && defined(_MSC_VER) && !defined(__INTEL_COMPILER)
typedef union {
npy_cdouble npy_z;
_Dcomplex c99_z;
diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py
index 04add9fa7..71af2ccb7 100644
--- a/numpy/core/tests/test_ufunc.py
+++ b/numpy/core/tests/test_ufunc.py
@@ -2967,3 +2967,10 @@ class TestLowlevelAPIAccess:
with pytest.raises(TypeError):
# cannot call it a second time:
np.negative._get_strided_loop(call_info)
+
+ def test_long_arrays(self):
+ t = np.zeros((1029, 917), dtype=np.single)
+ t[0][0] = 1
+ t[28][414] = 1
+ tc = np.cos(t)
+ assert_equal(tc[0][0], tc[28][414])
diff --git a/numpy/distutils/ccompiler_opt.py b/numpy/distutils/ccompiler_opt.py
index 4904dd3dd..6ba4cd816 100644
--- a/numpy/distutils/ccompiler_opt.py
+++ b/numpy/distutils/ccompiler_opt.py
@@ -16,15 +16,6 @@ import re
import subprocess
import textwrap
-# These flags are used to compile any C++ source within Numpy.
-# They are chosen to have very few runtime dependencies.
-NPY_CXX_FLAGS = [
- '-std=c++11', # Minimal standard version
- '-D__STDC_VERSION__=0', # for compatibility with C headers
- '-fno-exceptions', # no exception support
- '-fno-rtti'] # no runtime type information
-
-
class _Config:
"""An abstract class holds all configurable attributes of `CCompilerOpt`,
these class attributes can be used to change the default behavior
@@ -1000,7 +991,7 @@ class _CCompiler:
)
detect_args = (
("cc_has_debug", ".*(O0|Od|ggdb|coverage|debug:full).*", ""),
- ("cc_has_native",
+ ("cc_has_native",
".*(-march=native|-xHost|/QxHost|-mcpu=a64fx).*", ""),
# in case if the class run with -DNPY_DISABLE_OPTIMIZATION
("cc_noopt", ".*DISABLE_OPT.*", ""),
diff --git a/numpy/distutils/command/build_clib.py b/numpy/distutils/command/build_clib.py
index 45201f98f..11999dae2 100644
--- a/numpy/distutils/command/build_clib.py
+++ b/numpy/distutils/command/build_clib.py
@@ -307,6 +307,7 @@ class build_clib(old_build_clib):
# problem, msvc uses its own convention :(
c_sources += cxx_sources
cxx_sources = []
+ extra_cflags += extra_cxxflags
# filtering C dispatch-table sources when optimization is not disabled,
# otherwise treated as normal sources.
diff --git a/numpy/distutils/command/build_ext.py b/numpy/distutils/command/build_ext.py
index 6dc6b4265..d24162a42 100644
--- a/numpy/distutils/command/build_ext.py
+++ b/numpy/distutils/command/build_ext.py
@@ -407,6 +407,7 @@ class build_ext (old_build_ext):
if cxx_sources:
# Needed to compile kiva.agg._agg extension.
extra_args.append('/Zm1000')
+ extra_cflags += extra_cxxflags
# this hack works around the msvc compiler attributes
# problem, msvc uses its own convention :(
c_sources += cxx_sources
diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py
index bfda6804a..f8f2ab7a2 100644
--- a/numpy/lib/npyio.py
+++ b/numpy/lib/npyio.py
@@ -167,6 +167,8 @@ class NpzFile(Mapping):
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.npyio.NpzFile)
True
+ >>> npz
+ NpzFile 'object' with keys x, y
>>> sorted(npz.files)
['x', 'y']
>>> npz['x'] # getitem access
@@ -178,6 +180,7 @@ class NpzFile(Mapping):
# Make __exit__ safe if zipfile_factory raises an exception
zip = None
fid = None
+ _MAX_REPR_ARRAY_COUNT = 5
def __init__(self, fid, own_fid=False, allow_pickle=False,
pickle_kwargs=None, *,
@@ -259,6 +262,19 @@ class NpzFile(Mapping):
else:
raise KeyError("%s is not a file in the archive" % key)
+ def __repr__(self):
+ # Get filename or default to `object`
+ if isinstance(self.fid, str):
+ filename = self.fid
+ else:
+ filename = getattr(self.fid, "name", "object")
+
+ # Get the name of arrays
+ array_names = ', '.join(self.files[:self._MAX_REPR_ARRAY_COUNT])
+ if len(self.files) > self._MAX_REPR_ARRAY_COUNT:
+ array_names += "..."
+ return f"NpzFile {filename!r} with keys: {array_names}"
+
@set_module('numpy')
def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True,
diff --git a/numpy/lib/npyio.pyi b/numpy/lib/npyio.pyi
index 8007b2dc7..9dd3d6809 100644
--- a/numpy/lib/npyio.pyi
+++ b/numpy/lib/npyio.pyi
@@ -72,6 +72,7 @@ class NpzFile(Mapping[str, NDArray[Any]]):
files: list[str]
allow_pickle: bool
pickle_kwargs: None | Mapping[str, Any]
+ _MAX_REPR_ARRAY_COUNT: int
# Represent `f` as a mutable property so we can access the type of `self`
@property
def f(self: _T) -> BagObj[_T]: ...
@@ -97,6 +98,7 @@ class NpzFile(Mapping[str, NDArray[Any]]):
def __iter__(self) -> Iterator[str]: ...
def __len__(self) -> int: ...
def __getitem__(self, key: str) -> NDArray[Any]: ...
+ def __repr__(self) -> str: ...
# NOTE: Returns a `NpzFile` if file is a zip file;
# returns an `ndarray`/`memmap` otherwise
diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py
index 06d6dbf8d..5a68fbc97 100644
--- a/numpy/lib/tests/test_io.py
+++ b/numpy/lib/tests/test_io.py
@@ -321,6 +321,21 @@ class TestSavezLoad(RoundtripTest):
data.close()
assert_(fp.closed)
+ @pytest.mark.parametrize("count, expected_repr", [
+ (1, "NpzFile {fname!r} with keys: arr_0"),
+ (5, "NpzFile {fname!r} with keys: arr_0, arr_1, arr_2, arr_3, arr_4"),
+ # _MAX_REPR_ARRAY_COUNT is 5, so files with more than 5 keys are
+ # expected to end in '...'
+ (6, "NpzFile {fname!r} with keys: arr_0, arr_1, arr_2, arr_3, arr_4..."),
+ ])
+ def test_repr_lists_keys(self, count, expected_repr):
+ a = np.array([[1, 2], [3, 4]], float)
+ with temppath(suffix='.npz') as tmp:
+ np.savez(tmp, *[a]*count)
+ l = np.load(tmp)
+ assert repr(l) == expected_repr.format(fname=tmp)
+ l.close()
+
class TestSaveTxt:
def test_array(self):
@@ -597,8 +612,8 @@ class TestSaveTxt:
# in our process if needed, see gh-16889
memoryerror_raised = Value(c_bool)
- # Since Python 3.8, the default start method for multiprocessing has
- # been changed from 'fork' to 'spawn' on macOS, causing inconsistency
+ # Since Python 3.8, the default start method for multiprocessing has
+ # been changed from 'fork' to 'spawn' on macOS, causing inconsistency
# on memory sharing model, lead to failed test for check_large_zip
ctx = get_context('fork')
p = ctx.Process(target=check_large_zip, args=(memoryerror_raised,))
diff --git a/numpy/linalg/setup.py b/numpy/linalg/setup.py
index 1c4e1295e..6f72635ab 100644
--- a/numpy/linalg/setup.py
+++ b/numpy/linalg/setup.py
@@ -4,7 +4,6 @@ import sysconfig
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
- from numpy.distutils.ccompiler_opt import NPY_CXX_FLAGS
from numpy.distutils.system_info import get_info, system_info
config = Configuration('linalg', parent_package, top_path)
@@ -81,7 +80,6 @@ def configuration(parent_package='', top_path=None):
sources=['umath_linalg.cpp', get_lapack_lite_sources],
depends=['lapack_lite/f2c.h'],
extra_info=lapack_info,
- extra_cxx_compile_args=NPY_CXX_FLAGS,
libraries=['npymath'],
)
config.add_data_files('*.pyi')
diff --git a/numpy/ma/core.py b/numpy/ma/core.py
index dcec82773..7f57985a9 100644
--- a/numpy/ma/core.py
+++ b/numpy/ma/core.py
@@ -7033,6 +7033,29 @@ def compressed(x):
--------
ma.MaskedArray.compressed : Equivalent method.
+ Examples
+ --------
+
+ Create an array with negative values masked:
+
+ >>> import numpy as np
+ >>> x = np.array([[1, -1, 0], [2, -1, 3], [7, 4, -1]])
+ >>> masked_x = np.ma.masked_array(x, mask=x < 0)
+ >>> masked_x
+ masked_array(
+ data=[[1, --, 0],
+ [2, --, 3],
+ [7, 4, --]],
+ mask=[[False, True, False],
+ [False, True, False],
+ [False, False, True]],
+ fill_value=999999)
+
+ Compress the masked array into a 1-D array of non-masked values:
+
+ >>> np.ma.compressed(masked_x)
+ array([1, 0, 2, 3, 7, 4])
+
"""
return asanyarray(x).compressed()
diff --git a/setup.py b/setup.py
index 671df90fd..edd8c4d6d 100755
--- a/setup.py
+++ b/setup.py
@@ -12,7 +12,9 @@ import textwrap
import warnings
import builtins
import re
+import tempfile
+from distutils.errors import CompileError
# Python supported version checks. Keep right after stdlib imports to ensure we
# get a sensible error for older Python versions
@@ -184,45 +186,135 @@ class sdist_checked(cmdclass['sdist']):
def get_build_overrides():
"""
- Custom build commands to add `-std=c99` to compilation
+ Custom build commands to add std flags if required to compilation
"""
from numpy.distutils.command.build_clib import build_clib
from numpy.distutils.command.build_ext import build_ext
from numpy._utils import _pep440
- def _needs_gcc_c99_flag(obj):
- if obj.compiler.compiler_type != 'unix':
- return False
+ def try_compile(compiler, file, flags = [], verbose=False):
+ # To bypass trapping warnings by Travis CI
+ if getattr(compiler, 'compiler_type', '') == 'unix':
+ flags = ['-Werror'] + flags
+ bk_ver = getattr(compiler, 'verbose', False)
+ compiler.verbose = verbose
+ try:
+ compiler.compile([file], extra_postargs=flags)
+ return True, ''
+ except CompileError as e:
+ return False, str(e)
+ finally:
+ compiler.verbose = bk_ver
+
+ def flags_is_required(compiler, is_cpp, flags, code):
+ if is_cpp:
+ compiler = compiler.cxx_compiler()
+ suf = '.cpp'
+ else:
+ suf = '.c'
+ with tempfile.TemporaryDirectory() as temp_dir:
+ tmp_file = os.path.join(temp_dir, "test" + suf)
+ with open(tmp_file, "w+") as f:
+ f.write(code)
+ # without specify any flags in case of the required
+ # standard already supported by default, then there's
+ # no need for passing the flags
+ comp = try_compile(compiler, tmp_file)
+ if not comp[0]:
+ comp = try_compile(compiler, tmp_file, flags)
+ if not comp[0]:
+ # rerun to verbose the error
+ try_compile(compiler, tmp_file, flags, True)
+ if is_cpp:
+ raise RuntimeError(
+ "Broken toolchain during testing C++ compiler. \n"
+ "A compiler with support for C++17 language "
+ "features is required.\n"
+ f"Triggered the following error: {comp[1]}."
+ )
+ else:
+ raise RuntimeError(
+ "Broken toolchain during testing C compiler. \n"
+ "A compiler with support for C99 language "
+ "features is required.\n"
+ f"Triggered the following error: {comp[1]}."
+ )
+ return True
+ return False
- cc = obj.compiler.compiler[0]
- if "gcc" not in cc:
- return False
-
- # will print something like '4.2.1\n'
- out = subprocess.run([cc, '-dumpversion'],
- capture_output=True, text=True)
- # -std=c99 is default from this version on
- if _pep440.parse(out.stdout) >= _pep440.Version('5.0'):
- return False
- return True
+ def std_cxx_flags(cmd):
+ compiler = cmd.compiler
+ flags = getattr(compiler, '__np_cache_cpp_flags', None)
+ if flags is not None:
+ return flags
+ flags = dict(
+ msvc = ['/std:c++17']
+ ).get(compiler.compiler_type, ['-std=c++17'])
+ # These flags are used to compile any C++ source within Numpy.
+ # They are chosen to have very few runtime dependencies.
+ extra_flags = dict(
+ # to update #def __cplusplus with enabled C++ version
+ msvc = ['/Zc:__cplusplus']
+ ).get(compiler.compiler_type, [
+ # The following flag is used to avoid emit any extra code
+ # from STL since extensions are build by C linker and
+ # without C++ runtime dependencies.
+ '-fno-threadsafe-statics',
+ '-D__STDC_VERSION__=0', # for compatibility with C headers
+ '-fno-exceptions', # no exception support
+ '-fno-rtti' # no runtime type information
+ ])
+ if not flags_is_required(compiler, True, flags, textwrap.dedent('''
+ #include <type_traits>
+ template<typename ...T>
+ constexpr bool test_fold = (... && std::is_const_v<T>);
+ int main()
+ {
+ if constexpr (test_fold<int, const int>) {
+ return 0;
+ }
+ else {
+ return -1;
+ }
+ }
+ ''')):
+ flags.clear()
+ flags += extra_flags
+ setattr(compiler, '__np_cache_cpp_flags', flags)
+ return flags
+
+ def std_c_flags(cmd):
+ compiler = cmd.compiler
+ flags = getattr(compiler, '__np_cache_c_flags', None)
+ if flags is not None:
+ return flags
+ flags = dict(
+ msvc = []
+ ).get(compiler.compiler_type, ['-std=c99'])
+
+ if not flags_is_required(compiler, False, flags, textwrap.dedent('''
+ inline int test_inline() { return 0; }
+ int main(void)
+ { return test_inline(); }
+ ''')):
+ flags.clear()
+
+ setattr(compiler, '__np_cache_c_flags', flags)
+ return flags
class new_build_clib(build_clib):
def build_a_library(self, build_info, lib_name, libraries):
- from numpy.distutils.ccompiler_opt import NPY_CXX_FLAGS
- if _needs_gcc_c99_flag(self):
- build_info['extra_cflags'] = ['-std=c99']
- build_info['extra_cxxflags'] = NPY_CXX_FLAGS
+ build_info['extra_cflags'] = std_c_flags(self)
+ build_info['extra_cxxflags'] = std_cxx_flags(self)
build_clib.build_a_library(self, build_info, lib_name, libraries)
class new_build_ext(build_ext):
def build_extension(self, ext):
- if _needs_gcc_c99_flag(self):
- if '-std=c99' not in ext.extra_compile_args:
- ext.extra_compile_args.append('-std=c99')
+ ext.extra_c_compile_args += std_c_flags(self)
+ ext.extra_cxx_compile_args += std_cxx_flags(self)
build_ext.build_extension(self, ext)
return new_build_clib, new_build_ext
-
def generate_cython():
# Check Cython version
from numpy._utils import _pep440