summaryrefslogtreecommitdiff
path: root/numpy
diff options
context:
space:
mode:
Diffstat (limited to 'numpy')
-rw-r--r--numpy/add_newdocs.py8
-rw-r--r--numpy/bento.info22
-rw-r--r--numpy/core/_internal.py4
-rw-r--r--numpy/core/bento.info41
-rw-r--r--numpy/core/bscript555
-rw-r--r--numpy/core/fromnumeric.py6
-rw-r--r--numpy/core/include/numpy/npy_3kcompat.h6
-rw-r--r--numpy/core/numeric.py9
-rw-r--r--numpy/core/setup.py3
-rw-r--r--numpy/core/src/multiarray/arraytypes.c.src5
-rw-r--r--numpy/core/src/multiarray/conversion_utils.c94
-rw-r--r--numpy/core/src/multiarray/mapping.c126
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c29
-rw-r--r--numpy/core/src/multiarray/number.c13
-rw-r--r--numpy/core/src/multiarray/shape.c36
-rw-r--r--numpy/core/src/umath/loops.c.src49
-rw-r--r--numpy/core/tests/test_deprecations.py241
-rw-r--r--numpy/core/tests/test_dtype.py15
-rw-r--r--numpy/core/tests/test_indexing.py243
-rw-r--r--numpy/core/tests/test_multiarray.py88
-rw-r--r--numpy/core/tests/test_umath.py16
-rw-r--r--numpy/distutils/exec_command.py2
-rw-r--r--numpy/distutils/fcompiler/compaq.py2
-rw-r--r--numpy/distutils/fcompiler/intel.py29
-rw-r--r--numpy/distutils/intelccompiler.py35
-rw-r--r--numpy/distutils/msvc9compiler.py23
-rw-r--r--numpy/distutils/msvccompiler.py17
-rw-r--r--numpy/distutils/npy_pkg_config.py43
-rw-r--r--numpy/distutils/system_info.py19
-rw-r--r--numpy/doc/basics.py39
-rw-r--r--numpy/fft/bento.info6
-rw-r--r--numpy/fft/bscript7
-rw-r--r--numpy/lib/function_base.py50
-rw-r--r--numpy/lib/shape_base.py2
-rw-r--r--numpy/lib/tests/test__version.py13
-rw-r--r--numpy/lib/tests/test_function_base.py43
-rw-r--r--numpy/lib/tests/test_shape_base.py9
-rw-r--r--numpy/linalg/bento.info21
-rw-r--r--numpy/linalg/bscript26
-rw-r--r--numpy/linalg/linalg.py15
-rw-r--r--numpy/linalg/tests/test_linalg.py14
-rw-r--r--numpy/ma/core.py4
-rw-r--r--numpy/ma/tests/test_core.py12
-rw-r--r--numpy/random/bento.info9
-rw-r--r--numpy/random/bscript38
-rw-r--r--numpy/random/mtrand/mtrand.pyx2
-rw-r--r--numpy/setup.py1
47 files changed, 699 insertions, 1391 deletions
diff --git a/numpy/add_newdocs.py b/numpy/add_newdocs.py
index 607e28a28..293005434 100644
--- a/numpy/add_newdocs.py
+++ b/numpy/add_newdocs.py
@@ -752,8 +752,8 @@ add_newdoc('numpy.core.multiarray', 'empty',
Returns
-------
out : ndarray
- Array of uninitialized (arbitrary) data with the given
- shape, dtype, and order.
+ Array of uninitialized (arbitrary) data of the given shape, dtype, and
+ order. Object arrays will be initialized to None.
See Also
--------
@@ -6223,7 +6223,7 @@ add_newdoc('numpy.core.multiarray', 'dtype', ('itemsize',
add_newdoc('numpy.core.multiarray', 'dtype', ('kind',
"""
- A character code (one of 'biufcOSUV') identifying the general kind of data.
+ A character code (one of 'biufcmMOSUV') identifying the general kind of data.
= ======================
b boolean
@@ -6231,6 +6231,8 @@ add_newdoc('numpy.core.multiarray', 'dtype', ('kind',
u unsigned integer
f floating-point
c complex floating-point
+ m timedelta
+ M datetime
O object
S (byte-)string
U Unicode
diff --git a/numpy/bento.info b/numpy/bento.info
deleted file mode 100644
index 52b257543..000000000
--- a/numpy/bento.info
+++ /dev/null
@@ -1,22 +0,0 @@
-Recurse:
- core, fft, linalg, random
-
-Library:
- Packages:
- _build_utils,
- compat,
- core,
- core.code_generators,
- distutils,
- distutils.command,
- distutils.fcompiler,
- doc,
- f2py,
- fft,
- lib,
- linalg,
- ma,
- matrixlib,
- polynomial,
- random,
- testing
diff --git a/numpy/core/_internal.py b/numpy/core/_internal.py
index 879f4a224..81f5be4ad 100644
--- a/numpy/core/_internal.py
+++ b/numpy/core/_internal.py
@@ -121,6 +121,10 @@ def _array_descr(descriptor):
offset += field[0].itemsize
result.append(tup)
+ if descriptor.itemsize > offset:
+ num = descriptor.itemsize - offset
+ result.append(('', '|V%d' % num))
+
return result
# Build a new array from the information in a pickle.
diff --git a/numpy/core/bento.info b/numpy/core/bento.info
deleted file mode 100644
index 0c335e08a..000000000
--- a/numpy/core/bento.info
+++ /dev/null
@@ -1,41 +0,0 @@
-HookFile: bscript
-
-Library:
- CompiledLibrary: lib/npymath
- Sources:
- src/npymath/_signbit.c,
- src/npymath/ieee754.c.src,
- src/npymath/npy_math.c.src,
- src/npymath/npy_math_complex.c.src,
- src/npymath/halffloat.c
- CompiledLibrary: npysort
- Sources:
- src/private/npy_partition.h.src,
- src/private/npy_binsearch.h.src,
- src/npysort/quicksort.c.src,
- src/npysort/mergesort.c.src,
- src/npysort/heapsort.c.src,
- src/npysort/selection.c.src,
- src/npysort/binsearch.c.src
- Extension: multiarray
- Sources:
- src/multiarray/multiarraymodule_onefile.c
- Extension: multiarray_tests
- Sources:
- src/multiarray/multiarray_tests.c.src,
- src/private/mem_overlap.c
- Extension: umath
- Sources:
- src/umath/umathmodule_onefile.c
- Extension: umath_tests
- Sources:
- src/umath/umath_tests.c.src
- Extension: test_rational
- Sources:
- src/umath/test_rational.c.src
- Extension: struct_ufunc_test
- Sources:
- src/umath/struct_ufunc_test.c.src
- Extension: operand_flag_tests
- Sources:
- src/umath/operand_flag_tests.c.src
diff --git a/numpy/core/bscript b/numpy/core/bscript
deleted file mode 100644
index 944c2996f..000000000
--- a/numpy/core/bscript
+++ /dev/null
@@ -1,555 +0,0 @@
-import os
-import sys
-
-from bento.commands import hooks
-
-import waflib
-import waflib.Errors
-from waflib.Task \
- import \
- Task
-waflib.Logs.verbose = 1
-
-# Importing this adds new checkers to waf configure context - I don't like this
-# way of working, should find a more explicit way to attach new functions to
-# context.
-import numpy._build_utils.waf
-
-from numpy._build_utils.apple_accelerate \
- import \
- get_sgemv_fix
-
-from code_generators.numpy_api \
- import \
- multiarray_api, ufunc_api
-from code_generators import generate_numpy_api, generate_ufunc_api, \
- generate_umath
-
-from setup_common \
- import \
- OPTIONAL_STDFUNCS_MAYBE, OPTIONAL_STDFUNCS, C99_FUNCS_EXTENDED, \
- C99_FUNCS_SINGLE, C99_COMPLEX_TYPES, C99_COMPLEX_FUNCS, \
- MANDATORY_FUNCS, C_ABI_VERSION, C_API_VERSION
-
-def make_relpath(f):
- return os.path.relpath(f, os.path.abspath(os.path.dirname(__file__)))
-
-ENABLE_SEPARATE_COMPILATION = (os.environ.get('NPY_SEPARATE_COMPILATION', "1") != "0")
-NPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "1") != "0")
-
-NUMPYCONFIG_SYM = []
-
-# FIXME
-if ENABLE_SEPARATE_COMPILATION:
- NUMPYCONFIG_SYM.append(('DEFINE_NPY_ENABLE_SEPARATE_COMPILATION', '#define NPY_ENABLE_SEPARATE_COMPILATION 1'))
-else:
- NUMPYCONFIG_SYM.append(('DEFINE_NPY_ENABLE_SEPARATE_COMPILATION', ''))
-
-if NPY_RELAXED_STRIDES_CHECKING:
- NUMPYCONFIG_SYM.append(('DEFINE_NPY_RELAXED_STRIDES_CHECKING', '#define NPY_RELAXED_STRIDES_CHECKING 1'))
-else:
- NUMPYCONFIG_SYM.append(('DEFINE_NPY_RELAXED_STRIDES_CHECKING', ''))
-
-NUMPYCONFIG_SYM.append(('VISIBILITY_HIDDEN', '__attribute__((visibility("hidden")))'))
-
-NUMPYCONFIG_SYM.append(('NPY_ABI_VERSION', '0x%.8X' % C_ABI_VERSION))
-NUMPYCONFIG_SYM.append(('NPY_API_VERSION', '0x%.8X' % C_API_VERSION))
-
-global PYTHON_HAS_UNICODE_WIDE
-
-def is_npy_no_signal():
- """Return True if the NPY_NO_SIGNAL symbol must be defined in configuration
- header."""
- return sys.platform == 'win32'
-
-def define_no_smp():
- """Returns True if we should define NPY_NOSMP, False otherwise."""
- #--------------------------------
- # Checking SMP and thread options
- #--------------------------------
- # Perhaps a fancier check is in order here.
- # so that threads are only enabled if there
- # are actually multiple CPUS? -- but
- # threaded code can be nice even on a single
- # CPU so that long-calculating code doesn't
- # block.
- return 'NPY_NOSMP' in os.environ
-
-def write_numpy_config(conf):
- subst_dict = {}
- for key, value in NUMPYCONFIG_SYM:
- subst_dict["@%s@" % key] = str(value)
- node = conf.path.find_node("include/numpy/_numpyconfig.h.in")
- cnt = node.read()
- for k, v in subst_dict.items():
- cnt = cnt.replace(k, v)
- assert node is not None
- onode = conf.bldnode.make_node(node.path_from(conf.srcnode)).change_ext("")
- onode.write(cnt)
-
-def write_numpy_inifiles(conf):
- subst_dict = dict([("@sep@", os.path.sep), ("@pkgname@", "numpy.core")])
- for inifile in ["mlib.ini.in", "npymath.ini.in"]:
- node = conf.path.find_node(inifile)
- cnt = node.read()
- for k, v in subst_dict.items():
- cnt = cnt.replace(k, v)
- assert node is not None
- outfile = os.path.join("lib", "npy-pkg-config", inifile)
- onode = conf.bldnode.make_node(node.path_from(conf.srcnode).replace(
- inifile, outfile)).change_ext("")
- onode.write(cnt)
-
-def type_checks(conf):
- header_name = "Python.h"
- features = "c pyext"
- for c_type in ("int", "long", "short"):
- macro_name = "SIZEOF_%s" % numpy._build_utils.waf.sanitize_string(c_type)
- conf.check_declaration(macro_name, header_name=header_name,
- features=features)
- NUMPYCONFIG_SYM.append((macro_name, macro_name))
-
- for c_type, e_size in (("float", 4), ("double", 8), ("long double", [12, 16])):
- macro_name = "SIZEOF_%s" % numpy._build_utils.waf.sanitize_string(c_type)
- size = conf.check_type_size(c_type, header_name=header_name,
- features=features, expected_sizes=e_size)
- NUMPYCONFIG_SYM.append((macro_name, str(size)))
-
- macro_name = "SIZEOF_COMPLEX_%s" % numpy._build_utils.waf.sanitize_string(c_type)
- complex_def = "struct {%s __x; %s __y;}" % (c_type, c_type)
- size = conf.check_type_size(complex_def, header_name=header_name,
- features=features, expected_sizes=2*size)
- NUMPYCONFIG_SYM.append((macro_name, str(size)))
-
- if sys.platform != 'darwin':
- conf.check_ldouble_representation()
-
- size = conf.check_type_size("Py_intptr_t", header_name=header_name,
- expected_sizes=[4, 8], features=features)
- NUMPYCONFIG_SYM.append(('SIZEOF_%s' % numpy._build_utils.waf.sanitize_string("Py_intptr_t"),
- '%d' % size))
-
- size = conf.check_type_size("off_t", header_name=header_name,
- expected_sizes=[4, 8], features=features)
- NUMPYCONFIG_SYM.append(('SIZEOF_%s' % numpy._build_utils.waf.sanitize_string("off_t"),
- '%d' % size))
-
- # We check declaration AND type because that's how distutils does it.
- try:
- conf.check_declaration("PY_LONG_LONG", header_name=header_name,
- features=features)
- size = conf.check_type_size("PY_LONG_LONG", header_name=header_name,
- features=features, expected_sizes=[4, 8])
- NUMPYCONFIG_SYM.append(("DEFINE_NPY_SIZEOF_LONGLONG",
- "#define NPY_SIZEOF_LONGLONG %d" % size))
- NUMPYCONFIG_SYM.append(("DEFINE_NPY_SIZEOF_PY_LONG_LONG",
- "#define NPY_SIZEOF_PY_LONG_LONG %d" % size))
- except waflib.Errors.ConfigurationError:
- NUMPYCONFIG_SYM.append(("DEFINE_NPY_SIZEOF_LONGLONG", ""))
- NUMPYCONFIG_SYM.append(("DEFINE_NPY_SIZEOF_PY_LONG_LONG", ""))
-
- conf.check_declaration("CHAR_BIT", header_name=header_name, features=features)
-
- # Check whether we need our own wide character support
- global PYTHON_HAS_UNICODE_WIDE
- try:
- conf.check_declaration('Py_UNICODE_WIDE', header_name=header_name, features=features)
- PYTHON_HAS_UNICODE_WIDE = False
- except waflib.Errors.ConfigurationError:
- PYTHON_HAS_UNICODE_WIDE = True
-
- try:
- conf.check_declaration('PyOS_ascii_strtod', header_name=header_name, features=features)
- except waflib.Errors.ConfigurationError:
- try:
- conf.check_func('strtod')
- conf.define('PyOS_ascii_strtod', 'strtod')
- except waflib.Errors.ConfigurationError:
- pass
-
-def signal_smp_checks(conf):
- if is_npy_no_signal():
- NUMPYCONFIG_SYM.append(("DEFINE_NPY_NO_SIGNAL", "#define NPY_NO_SIGNAL\n"))
- conf.define("__NPY_PRIVATE_NO_SIGNAL", 1)
- else:
- NUMPYCONFIG_SYM.append(("DEFINE_NPY_NO_SIGNAL", ""))
-
- if define_no_smp():
- NUMPYCONFIG_SYM.append(("NPY_NO_SMP", 1))
- else:
- NUMPYCONFIG_SYM.append(("NPY_NO_SMP", 0))
-
-def check_math_runtime(conf):
- header_name = "Python.h math.h"
- features = "c cprogram pyext"
-
- mlibs = [None, "m", "cpml"]
- mathlib = os.environ.get('MATHLIB')
- if mathlib:
- mlibs.insert(0, mathlib)
-
- mlib = None
- for lib in mlibs:
- try:
- if lib is None:
- kw = {}
- else:
- kw = {"lib": lib}
- st = conf.check_functions_at_once(["exp"], uselib_store="M", **kw)
- mlib = lib or []
- break
- except waflib.Errors.ConfigurationError:
- pass
- if mlib is None:
- raise waflib.Errors.ConfigurationError("No math lib found !")
-
- # XXX: this is ugly: mathlib has nothing to do in a public header file
- NUMPYCONFIG_SYM.append(('MATHLIB', ','.join(mlib)))
-
- # FIXME: look more into those additional mandatory functions
- MANDATORY_FUNCS.extend(["pow"])
- conf.check_functions_at_once(MANDATORY_FUNCS, use="M")
-
- #mfuncs = ('expl', 'expf', 'log1p', 'expm1', 'asinh', 'atanhf', 'atanhl',
- # 'rint', 'trunc')
- #conf.check_functions_at_once(mfuncs, use="M")
-
- header_name = "Python.h math.h"
- # XXX: with MSVC compiler, one needs to have cprogram defined. Find out why.
- features = "c pyext cprogram"
- for f in OPTIONAL_STDFUNCS_MAYBE:
- try:
- conf.check_declaration("HAVE_%s" % numpy._build_utils.waf.sanitize_string(f),
- header_name=header_name,
- features=features)
- OPTIONAL_STDFUNCS.remove(f)
- except waflib.Errors.ConfigurationError:
- pass
-
- conf.check_functions_at_once(OPTIONAL_STDFUNCS,
- features=features, mandatory=False, use="M")
- conf.check_functions_at_once(C99_FUNCS_SINGLE,
- features=features, mandatory=False, use="M")
- conf.check_functions_at_once(C99_FUNCS_EXTENDED,
- features=features, mandatory=False, use="M")
- # TODO: add OPTIONAL_HEADERS, OPTIONAL_INTRINSICS and
- # OPTIONAL_GCC_ATTRIBUTES (see setup.py and gh-3766). These are
- # performance optimizations for GCC.
-
- for f in ["isnan", "isinf", "signbit", "isfinite"]:
- try:
- conf.check_declaration("HAVE_DECL_%s" % f.upper(), header_name=header_name,
- features=features)
- NUMPYCONFIG_SYM.append(('DEFINE_NPY_HAVE_DECL_%s' % f.upper(),
- '#define NPY_HAVE_DECL_%s' % f.upper()))
- except waflib.Errors.ConfigurationError:
- try:
- conf.check_declaration(f, header_name=header_name, features=features)
- NUMPYCONFIG_SYM.append(('DEFINE_NPY_HAVE_DECL_%s' % f.upper(),
- '#define NPY_HAVE_DECL_%s' % f.upper()))
- except waflib.Errors.ConfigurationError:
- NUMPYCONFIG_SYM.append(('DEFINE_NPY_HAVE_DECL_%s' % f.upper(), ''))
-
-def check_complex(conf):
- if conf.check_header("complex.h", mandatory=False):
- NUMPYCONFIG_SYM.append(('DEFINE_NPY_USE_C99_COMPLEX',
- '#define NPY_USE_C99_COMPLEX 1'))
- for t in C99_COMPLEX_TYPES:
- try:
- conf.check_type(t, header_name='complex.h')
- NUMPYCONFIG_SYM.append(('DEFINE_NPY_HAVE_%s' % numpy._build_utils.waf.sanitize_string(t),
- '#define NPY_HAVE_%s' % numpy._build_utils.waf.sanitize_string(t)))
- except waflib.Errors.ConfigurationError:
- NUMPYCONFIG_SYM.append(('DEFINE_NPY_HAVE_%s' % numpy._build_utils.waf.sanitize_string(t), ''))
-
- for prec in ["", "f", "l"]:
- flist = [f + prec for f in C99_COMPLEX_FUNCS]
- conf.check_functions_at_once(flist, use="M")
- else:
- NUMPYCONFIG_SYM.append(('DEFINE_NPY_USE_C99_COMPLEX', ''))
- for t in C99_COMPLEX_TYPES:
- NUMPYCONFIG_SYM.append(('DEFINE_NPY_HAVE_%s' % numpy._build_utils.waf.sanitize_string(t), ''))
-
-def check_win32_specifics(conf):
- from numpy.distutils.misc_util import get_build_architecture
- arch = get_build_architecture()
-
- # On win32, force long double format string to be 'g', not
- # 'Lg', since the MS runtime does not support long double whose
- # size is > sizeof(double)
- if arch == "Intel" or arch == "AMD64":
- conf.define('FORCE_NO_LONG_DOUBLE_FORMATTING', 1)
-
-@hooks.post_configure
-def post_configure(context):
- conf = context.waf_context
-
- try:
- conf.check_header("endian.h")
- NUMPYCONFIG_SYM.append(('DEFINE_NPY_HAVE_ENDIAN_H',
- '#define NPY_HAVE_ENDIAN_H 1'))
- except waflib.Errors.ConfigurationError:
- NUMPYCONFIG_SYM.append(('DEFINE_NPY_HAVE_ENDIAN_H', ''))
-
- try:
- conf.check_declaration('PRIdPTR', header_name='inttypes.h')
- NUMPYCONFIG_SYM.append(('DEFINE_NPY_USE_C99_FORMATS', '#define NPY_USE_C99_FORMATS 1'))
- except waflib.Errors.ConfigurationError:
- NUMPYCONFIG_SYM.append(('DEFINE_NPY_USE_C99_FORMATS', ''))
-
- type_checks(conf)
- signal_smp_checks(conf)
- check_math_runtime(conf)
- numpy._build_utils.waf.check_inline(conf)
- check_complex(conf)
- check_win32_specifics(conf)
-
- if ENABLE_SEPARATE_COMPILATION:
- conf.define("ENABLE_SEPARATE_COMPILATION", 1)
-
- conf.env["CONFIG_HEADER_TEMPLATE"] = """\
-%(content)s
-#ifndef _NPY_NPY_CONFIG_H_
-#error config.h should never be included directly, include npy_config.h instead
-#endif"""
- conf.write_config_header("config.h")
-
- write_numpy_config(conf)
-
- write_numpy_inifiles(conf)
-
- conf.env.INCLUDES = [".", "include", "include/numpy"]
-
- # FIXME: Should be handled in bento context
- conf.store()
-
-class numpy_api_generator(Task):
- vars = ["API_TUPLE"]
- color = "BLUE"
- before = ["c"]
- def run(self):
- targets = [o.path_from(self.generator.bld.srcnode) for o in self.outputs]
- generate_numpy_api.do_generate_api(targets, self.env.API_TUPLE)
- return 0
-
-class ufunc_api_generator(Task):
- vars = ["API_TUPLE"]
- color = "BLUE"
- before = ["c"]
- def run(self):
- targets = [o.path_from(self.generator.bld.srcnode) for o in self.outputs]
- generate_ufunc_api.do_generate_api(targets, self.env.API_TUPLE)
- return 0
-
-@waflib.TaskGen.feature("numpy_api_gen")
-def process_multiarray_api_generator(self):
- tsk = self.create_task("numpy_api_generator")
- if hasattr(self, "api_tuple"):
- tsk.env.API_TUPLE = self.api_tuple
- else:
- if not "API_TUPLE" in tsk.env:
- tsk.env.API_TUPLE = ()
- header = "__%s.h" % self.pattern
- source = "__%s.c" % self.pattern
- txt = self.pattern + ".txt"
- files = [header, source, txt]
- tsk.set_outputs([self.path.find_or_declare(f) for f in files])
-
- self.bld.register_outputs("numpy_gen_headers", "multiarray",
- [output for output in tsk.outputs if output.suffix() == ".h"],
- target_dir="$sitedir/numpy/core/include/numpy")
-
- return tsk
-
-@waflib.TaskGen.feature("ufunc_api_gen")
-def process_api_ufunc_generator(self):
- tsk = self.create_task("ufunc_api_generator")
- if hasattr(self, "api_tuple"):
- tsk.env.API_TUPLE = self.api_tuple
- else:
- if not "API_TUPLE" in tsk.env:
- tsk.env.API_TUPLE = ()
- header = "__%s.h" % self.pattern
- source = "__%s.c" % self.pattern
- txt = self.pattern + ".txt"
- files = [header, source, txt]
- tsk.set_outputs([self.path.find_or_declare(f) for f in files])
-
- headers = [output for output in tsk.outputs if output.suffix() == ".h"]
- self.bld.register_outputs("numpy_gen_headers", "ufunc", headers,
- target_dir="$sitedir/numpy/core/include/numpy")
- return tsk
-
-class umath_generator(Task):
- vars = ["API_TUPLE"]
- color = "BLUE"
- before = ["c"]
- ext_in = ".in"
- def run(self):
- if len(self.outputs) > 1:
- raise ValueError("Only one target (the .c file) is expected in the umath generator task")
- code = generate_umath.make_code(generate_umath.defdict, generate_umath.__file__)
- self.outputs[0].write(code)
- return 0
-
-@waflib.TaskGen.feature("umath_gen")
-def process_umath_generator(self):
- tsk = self.create_task("umath_generator")
- source = "__%s.c" % self.pattern
- tsk.set_outputs(self.path.find_or_declare(source))
- return tsk
-
-from os.path import join as pjoin
-@hooks.pre_build
-def pre_build(context):
- bld = context.waf_context
-
- context.register_category("numpy_gen_inifiles")
- inifile_mlib = context.local_node.declare(os.path.join(
- "lib", "npy-pkg-config", "mlib.ini"))
- inifile_npymath = context.local_node.declare(os.path.join(
- "lib", "npy-pkg-config", "npymath.ini"))
- context.register_outputs("numpy_gen_inifiles", "numpyconfig",
- [inifile_mlib, inifile_npymath])
-
- context.register_category("numpy_gen_headers")
-
- numpyconfig_h = context.local_node.declare(os.path.join("include", "numpy", "_numpyconfig.h"))
- context.register_outputs("numpy_gen_headers", "numpyconfig", [numpyconfig_h])
-
- context.tweak_library("lib/npymath", includes=["src/private", "src/npymath", "include"])
-
- context.tweak_library("npysort",
- includes=[".", "src/private", "src/npysort"],
- use="npymath")
-
- def builder_multiarray(extension):
- bld(name="multiarray_api",
- features="numpy_api_gen",
- api_tuple=multiarray_api,
- pattern="multiarray_api")
-
- multiarray_templates = ["src/multiarray/scalartypes.c.src",
- "src/multiarray/arraytypes.c.src",
- "src/multiarray/nditer_templ.c.src",
- "src/multiarray/lowlevel_strided_loops.c.src",
- "src/private/templ_common.h.src",
- "src/multiarray/einsum.c.src"]
- bld(target="multiarray_templates", source=multiarray_templates)
- if ENABLE_SEPARATE_COMPILATION:
- sources = [
- pjoin('src', 'multiarray', 'arrayobject.c'),
- pjoin('src', 'multiarray', 'alloc.c'),
- pjoin('src', 'multiarray', 'arraytypes.c.src'),
- pjoin('src', 'multiarray', 'array_assign.c'),
- pjoin('src', 'multiarray', 'array_assign_array.c'),
- pjoin('src', 'multiarray', 'array_assign_scalar.c'),
- pjoin('src', 'multiarray', 'buffer.c'),
- pjoin('src', 'multiarray', 'calculation.c'),
- pjoin('src', 'multiarray', 'common.c'),
- pjoin('src', 'multiarray', 'compiled_base.c'),
- pjoin('src', 'multiarray', 'conversion_utils.c'),
- pjoin('src', 'multiarray', 'convert.c'),
- pjoin('src', 'multiarray', 'convert_datatype.c'),
- pjoin('src', 'multiarray', 'ctors.c'),
- pjoin('src', 'multiarray', 'datetime.c'),
- pjoin('src', 'multiarray', 'datetime_busday.c'),
- pjoin('src', 'multiarray', 'datetime_busdaycal.c'),
- pjoin('src', 'multiarray', 'datetime_strings.c'),
- pjoin('src', 'multiarray', 'descriptor.c'),
- pjoin('src', 'multiarray', 'dtype_transfer.c'),
- pjoin('src', 'multiarray', 'einsum.c.src'),
- pjoin('src', 'multiarray', 'flagsobject.c'),
- pjoin('src', 'multiarray', 'getset.c'),
- pjoin('src', 'multiarray', 'hashdescr.c'),
- pjoin('src', 'multiarray', 'item_selection.c'),
- pjoin('src', 'multiarray', 'iterators.c'),
- pjoin('src', 'multiarray', 'lowlevel_strided_loops.c.src'),
- pjoin('src', 'multiarray', 'mapping.c'),
- pjoin('src', 'multiarray', 'methods.c'),
- pjoin('src', 'multiarray', 'multiarraymodule.c'),
- pjoin('src', 'multiarray', 'nditer_templ.c.src'),
- pjoin('src', 'multiarray', 'nditer_api.c'),
- pjoin('src', 'multiarray', 'nditer_constr.c'),
- pjoin('src', 'multiarray', 'nditer_pywrap.c'),
- pjoin('src', 'multiarray', 'number.c'),
- pjoin('src', 'multiarray', 'numpymemoryview.c'),
- pjoin('src', 'multiarray', 'numpyos.c'),
- pjoin('src', 'multiarray', 'refcount.c'),
- pjoin('src', 'multiarray', 'scalarapi.c'),
- pjoin('src', 'multiarray', 'scalartypes.c.src'),
- pjoin('src', 'multiarray', 'sequence.c'),
- pjoin('src', 'multiarray', 'shape.c'),
- pjoin('src', 'multiarray', 'ucsnarrow.c'),
- pjoin('src', 'multiarray', 'usertypes.c'),
- pjoin('src', 'multiarray', 'vdot.c'),
- pjoin('src', 'private', 'templ_common.h.src'),
- pjoin('src', 'private', 'mem_overlap.c'),
- ]
-
- if bld.env.HAS_CBLAS:
- sources.extend([pjoin('src', 'multiarray', 'cblasfuncs.c'),
- pjoin('src', 'multiarray', 'python_xerbla.c'),
- ])
- if "Accelerate" in bld.env.FRAMEWORK_CBLAS:
- sources.extend([make_relpath(get_sgemv_fix()[0])])
- else:
- sources = extension.sources
-
- use = 'npysort npymath'
- defines = ['_FILE_OFFSET_BITS=64',
- '_LARGEFILE_SOURCE=1',
- '_LARGEFILE64_SOURCE=1']
-
- if bld.env.HAS_CBLAS:
- use += ' CBLAS'
- defines.append('HAVE_CBLAS')
-
- includes = ["src/multiarray", "src/private"]
- return context.default_builder(extension,
- includes=includes,
- source=sources,
- use=use,
- defines=defines
- )
- context.register_builder("multiarray", builder_multiarray)
-
- def build_ufunc(extension):
- bld(features="ufunc_api_gen",
- api_tuple=ufunc_api,
- pattern="ufunc_api",
- name="ufunc_api")
-
- ufunc_templates = [
- "src/umath/scalarmath.c.src",
- "src/umath/loops.h.src",
- "src/umath/loops.c.src",
- "src/umath/funcs.inc.src",
- "src/umath/simd.inc.src"]
- bld(target="ufunc_templates", source=ufunc_templates)
-
- bld(features="umath_gen",
- pattern="umath_generated",
- name="umath_gen")
-
- includes = ["src/umath", "src/multiarray", "src/private"]
- if ENABLE_SEPARATE_COMPILATION:
- sources = [
- pjoin("src", "umath", "scalarmath.c.src"),
- pjoin("src", "umath", "loops.h.src"),
- pjoin("src", "umath", "loops.c.src"),
- pjoin('src', 'umath', 'reduction.c'),
- pjoin('src', 'umath', 'ufunc_object.c'),
- pjoin('src', 'umath', 'ufunc_type_resolution.c'),
- pjoin("src", "umath", "umathmodule.c"),
- ]
- else:
- sources = extension.sources
- return context.default_builder(extension,
- includes=includes,
- source=sources,
- use="npymath")
- context.register_builder("umath", build_ufunc)
-
- context.tweak_extension("multiarray_tests", use="npymath", includes=["src/private"])
- context.tweak_extension("umath_tests", use="npymath", includes=["src/private"])
diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py
index 10626fe9f..10f4a98c5 100644
--- a/numpy/core/fromnumeric.py
+++ b/numpy/core/fromnumeric.py
@@ -1371,7 +1371,7 @@ def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
def ravel(a, order='C'):
- """Return a flattened array.
+ """Return a contiguous flattened array.
A 1-D array, containing the elements of the input, is returned. A copy is
made only if needed.
@@ -1415,6 +1415,7 @@ def ravel(a, order='C'):
ndarray.flat : 1-D iterator over an array.
ndarray.flatten : 1-D array copy of the elements of an array
in row-major order.
+ ndarray.reshape : Change the shape of an array without changing its data.
Notes
-----
@@ -1425,6 +1426,9 @@ def ravel(a, order='C'):
the index along the last quickest. The opposite holds for
column-major, Fortran-style index ordering.
+ When a view is desired in as many cases as possible, ``arr.reshape(-1)``
+ may be preferable.
+
Examples
--------
It is equivalent to ``reshape(-1, order=order)``.
diff --git a/numpy/core/include/numpy/npy_3kcompat.h b/numpy/core/include/numpy/npy_3kcompat.h
index 72ddaf66b..cd9669798 100644
--- a/numpy/core/include/numpy/npy_3kcompat.h
+++ b/numpy/core/include/numpy/npy_3kcompat.h
@@ -325,7 +325,7 @@ PyObject_Cmp(PyObject *i1, PyObject *i2, int *cmp)
{
int v;
v = PyObject_RichCompareBool(i1, i2, Py_LT);
- if (v == 0) {
+ if (v == 1) {
*cmp = -1;
return 1;
}
@@ -334,7 +334,7 @@ PyObject_Cmp(PyObject *i1, PyObject *i2, int *cmp)
}
v = PyObject_RichCompareBool(i1, i2, Py_GT);
- if (v == 0) {
+ if (v == 1) {
*cmp = 1;
return 1;
}
@@ -343,7 +343,7 @@ PyObject_Cmp(PyObject *i1, PyObject *i2, int *cmp)
}
v = PyObject_RichCompareBool(i1, i2, Py_EQ);
- if (v == 0) {
+ if (v == 1) {
*cmp = 0;
return 1;
}
diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py
index 1b7dfca3e..5d4464ea7 100644
--- a/numpy/core/numeric.py
+++ b/numpy/core/numeric.py
@@ -258,8 +258,9 @@ def full(shape, fill_value, dtype=None, order='C'):
fill_value : scalar
Fill value.
dtype : data-type, optional
- The desired data-type for the array, e.g., `numpy.int8`. Default is
- is chosen as `np.array(fill_value).dtype`.
+ The desired data-type for the array, e.g., `np.int8`. Default
+ is `float`, but will change to `np.array(fill_value).dtype` in a
+ future release.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory.
@@ -290,6 +291,10 @@ def full(shape, fill_value, dtype=None, order='C'):
"""
a = empty(shape, dtype, order)
+ if array(fill_value).dtype != a.dtype:
+ warnings.warn(
+ "in the future, full(..., {0!r}) will return an array of {1!r}".
+ format(fill_value, array(fill_value).dtype), FutureWarning)
multiarray.copyto(a, fill_value, casting='unsafe')
return a
diff --git a/numpy/core/setup.py b/numpy/core/setup.py
index 6d9926d89..361bf9082 100644
--- a/numpy/core/setup.py
+++ b/numpy/core/setup.py
@@ -736,6 +736,7 @@ def configuration(parent_package='',top_path=None):
join('src', 'multiarray', 'array_assign.h'),
join('src', 'multiarray', 'buffer.h'),
join('src', 'multiarray', 'calculation.h'),
+ join('src', 'multiarray', 'cblasfuncs.h'),
join('src', 'multiarray', 'common.h'),
join('src', 'multiarray', 'convert_datatype.h'),
join('src', 'multiarray', 'convert.h'),
@@ -839,6 +840,8 @@ def configuration(parent_package='',top_path=None):
blas_info = get_info('blas_opt', 0)
if blas_info and ('HAVE_CBLAS', None) in blas_info.get('define_macros', []):
extra_info = blas_info
+ # These files are also in MANIFEST.in so that they are always in
+ # the source distribution independently of HAVE_CBLAS.
multiarray_src.extend([join('src', 'multiarray', 'cblasfuncs.c'),
join('src', 'multiarray', 'python_xerbla.c'),
])
diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src
index 68944a1bd..5aa7e6142 100644
--- a/numpy/core/src/multiarray/arraytypes.c.src
+++ b/numpy/core/src/multiarray/arraytypes.c.src
@@ -2784,7 +2784,7 @@ OBJECT_compare(PyObject **ip1, PyObject **ip2, PyArrayObject *NPY_UNUSED(ap))
}
return 1;
}
-#if defined(NPY_PY3K)
+
if (PyObject_RichCompareBool(*ip1, *ip2, Py_LT) == 1) {
return -1;
}
@@ -2794,9 +2794,6 @@ OBJECT_compare(PyObject **ip1, PyObject **ip2, PyArrayObject *NPY_UNUSED(ap))
else {
return 0;
}
-#else
- return PyObject_Compare(*ip1, *ip2);
-#endif
}
diff --git a/numpy/core/src/multiarray/conversion_utils.c b/numpy/core/src/multiarray/conversion_utils.c
index 3b9a10da5..88064c1d6 100644
--- a/numpy/core/src/multiarray/conversion_utils.c
+++ b/numpy/core/src/multiarray/conversion_utils.c
@@ -777,20 +777,16 @@ PyArray_PyIntAsIntp_ErrMsg(PyObject *o, const char * msg)
#endif
PyObject *obj, *err;
- if (!o) {
+ /*
+ * Be a bit stricter and not allow bools.
+ * np.bool_ is also disallowed as Boolean arrays do not currently
+ * support index.
+ */
+ if (!o || PyBool_Check(o) || PyArray_IsScalar(o, Bool)) {
PyErr_SetString(PyExc_TypeError, msg);
return -1;
}
- /* Be a bit stricter and not allow bools, np.bool_ is handled later */
- if (PyBool_Check(o)) {
- /* 2013-04-13, 1.8 */
- if (DEPRECATE("using a boolean instead of an integer"
- " will result in an error in the future") < 0) {
- return -1;
- }
- }
-
/*
* Since it is the usual case, first check if o is an integer. This is
* an exact check, since otherwise __index__ is used.
@@ -816,84 +812,22 @@ PyArray_PyIntAsIntp_ErrMsg(PyObject *o, const char * msg)
return (npy_intp)long_value;
}
- /* Disallow numpy.bool_. Boolean arrays do not currently support index. */
- if (PyArray_IsScalar(o, Bool)) {
- /* 2013-06-09, 1.8 */
- if (DEPRECATE("using a boolean instead of an integer"
- " will result in an error in the future") < 0) {
- return -1;
- }
- }
-
/*
* The most general case. PyNumber_Index(o) covers everything
* including arrays. In principle it may be possible to replace
* the whole function by PyIndex_AsSSize_t after deprecation.
*/
obj = PyNumber_Index(o);
- if (obj) {
+ if (obj == NULL) {
+ return -1;
+ }
#if (NPY_SIZEOF_LONG < NPY_SIZEOF_INTP)
- long_value = PyLong_AsLongLong(obj);
+ long_value = PyLong_AsLongLong(obj);
#else
- long_value = PyLong_AsLong(obj);
+ long_value = PyLong_AsLong(obj);
#endif
- Py_DECREF(obj);
- goto finish;
- }
- else {
- /*
- * Set the TypeError like PyNumber_Index(o) would after trying
- * the general case.
- */
- PyErr_Clear();
- }
+ Py_DECREF(obj);
- /*
- * For backward compatibility check the number C-Api number protcol
- * This should be removed up the finish label after deprecation.
- */
- if (Py_TYPE(o)->tp_as_number != NULL &&
- Py_TYPE(o)->tp_as_number->nb_int != NULL) {
- obj = Py_TYPE(o)->tp_as_number->nb_int(o);
- if (obj == NULL) {
- return -1;
- }
- #if (NPY_SIZEOF_LONG < NPY_SIZEOF_INTP)
- long_value = PyLong_AsLongLong(obj);
- #else
- long_value = PyLong_AsLong(obj);
- #endif
- Py_DECREF(obj);
- }
-#if !defined(NPY_PY3K)
- else if (Py_TYPE(o)->tp_as_number != NULL &&
- Py_TYPE(o)->tp_as_number->nb_long != NULL) {
- obj = Py_TYPE(o)->tp_as_number->nb_long(o);
- if (obj == NULL) {
- return -1;
- }
- #if (NPY_SIZEOF_LONG < NPY_SIZEOF_INTP)
- long_value = PyLong_AsLongLong(obj);
- #else
- long_value = PyLong_AsLong(obj);
- #endif
- Py_DECREF(obj);
- }
-#endif
- else {
- PyErr_SetString(PyExc_TypeError, msg);
- return -1;
- }
- /* Give a deprecation warning, unless there was already an error */
- if (!error_converting(long_value)) {
- /* 2013-04-13, 1.8 */
- if (DEPRECATE("using a non-integer number instead of an integer"
- " will result in an error in the future") < 0) {
- return -1;
- }
- }
-
- finish:
if (error_converting(long_value)) {
err = PyErr_Occurred();
/* Only replace TypeError's here, which are the normal errors. */
@@ -902,9 +836,9 @@ PyArray_PyIntAsIntp_ErrMsg(PyObject *o, const char * msg)
}
return -1;
}
-
goto overflow_check; /* silence unused warning */
- overflow_check:
+
+overflow_check:
#if (NPY_SIZEOF_LONG < NPY_SIZEOF_INTP)
#if (NPY_SIZEOF_LONGLONG > NPY_SIZEOF_INTP)
if ((long_value < NPY_MIN_INTP) || (long_value > NPY_MAX_INTP)) {
diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c
index b6e831498..42a12db14 100644
--- a/numpy/core/src/multiarray/mapping.c
+++ b/numpy/core/src/multiarray/mapping.c
@@ -285,35 +285,13 @@ prepare_index(PyArrayObject *self, PyObject *index,
/* Index is an ellipsis (`...`) */
if (obj == Py_Ellipsis) {
- /*
- * If there is more then one Ellipsis, it is replaced. Deprecated,
- * since it is hard to imagine anyone using two Ellipsis and
- * actually planning on all but the first being automatically
- * replaced with a slice.
- */
+ /* At most one ellipsis in an index */
if (index_type & HAS_ELLIPSIS) {
- /* 2013-04-14, 1.8 */
- if (DEPRECATE(
- "an index can only have a single Ellipsis (`...`); "
- "replace all but one with slices (`:`).") < 0) {
- goto failed_building_indices;
- }
- index_type |= HAS_SLICE;
-
- indices[curr_idx].type = HAS_SLICE;
- indices[curr_idx].object = PySlice_New(NULL, NULL, NULL);
-
- if (indices[curr_idx].object == NULL) {
- goto failed_building_indices;
- }
-
- used_ndim += 1;
- new_ndim += 1;
- curr_idx += 1;
- continue;
+ PyErr_Format(PyExc_IndexError,
+ "an index can only have a single ellipsis ('...')");
+ goto failed_building_indices;
}
index_type |= HAS_ELLIPSIS;
-
indices[curr_idx].type = HAS_ELLIPSIS;
indices[curr_idx].object = NULL;
/* number of slices it is worth, won't update if it is 0: */
@@ -415,102 +393,8 @@ prepare_index(PyArrayObject *self, PyObject *index,
goto failed_building_indices;
}
}
- /*
- * Special case to allow 0-d boolean indexing with
- * scalars. Should be removed after boolean-array
- * like as integer-array like deprecation.
- * (does not cover ufunc.at, because it does not use the
- * boolean special case, but that should not matter...)
- * Since all but strictly boolean indices are invalid,
- * there is no need for any further conversion tries.
- */
- else if (PyArray_NDIM(self) == 0) {
- arr = tmp_arr;
- }
else {
- /*
- * These Checks can be removed after deprecation, since
- * they should then be either correct already or error out
- * later just like a normal array.
- */
- if (PyArray_ISBOOL(tmp_arr)) {
- /* 2013-04-14, 1.8 */
- if (DEPRECATE_FUTUREWARNING(
- "in the future, boolean array-likes will be "
- "handled as a boolean array index") < 0) {
- Py_DECREF(tmp_arr);
- goto failed_building_indices;
- }
- if (PyArray_NDIM(tmp_arr) == 0) {
- /*
- * Need to raise an error here, since the
- * DeprecationWarning before was not triggered.
- * TODO: A `False` triggers a Deprecation *not* a
- * a FutureWarning.
- */
- PyErr_SetString(PyExc_IndexError,
- "in the future, 0-d boolean arrays will be "
- "interpreted as a valid boolean index");
- Py_DECREF(tmp_arr);
- goto failed_building_indices;
- }
- else {
- arr = tmp_arr;
- }
- }
- /*
- * Note: Down the road, the integers will be cast to intp.
- * The user has to make sure they can be safely cast.
- * If not, we might index wrong instead of an giving
- * an error.
- */
- else if (!PyArray_ISINTEGER(tmp_arr)) {
- if (PyArray_NDIM(tmp_arr) == 0) {
- /* match integer deprecation warning */
- /* 2013-09-25, 1.8 */
- if (DEPRECATE(
- "using a non-integer number instead of an "
- "integer will result in an error in the "
- "future") < 0) {
-
- /* The error message raised in the future */
- PyErr_SetString(PyExc_IndexError,
- "only integers, slices (`:`), ellipsis (`...`), "
- "numpy.newaxis (`None`) and integer or boolean "
- "arrays are valid indices");
- Py_DECREF((PyObject *)tmp_arr);
- goto failed_building_indices;
- }
- }
- else {
- /* 2013-09-25, 1.8 */
- if (DEPRECATE(
- "non integer (and non boolean) array-likes "
- "will not be accepted as indices in the "
- "future") < 0) {
-
- /* Error message to be raised in the future */
- PyErr_SetString(PyExc_IndexError,
- "non integer (and non boolean) array-likes will "
- "not be accepted as indices in the future");
- Py_DECREF((PyObject *)tmp_arr);
- goto failed_building_indices;
- }
- }
- }
-
- arr = (PyArrayObject *)PyArray_FromArray(tmp_arr,
- PyArray_DescrFromType(NPY_INTP),
- NPY_ARRAY_FORCECAST);
-
- if (arr == NULL) {
- /* Since this will be removed, handle this later */
- PyErr_Clear();
- arr = tmp_arr;
- }
- else {
- Py_DECREF((PyObject *)tmp_arr);
- }
+ arr = tmp_arr;
}
}
else {
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index e72c355dc..2c694f936 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -1443,13 +1443,9 @@ _equivalent_fields(PyObject *field1, PyObject *field2) {
if (field1 == NULL || field2 == NULL) {
return 0;
}
-#if defined(NPY_PY3K)
+
val = PyObject_RichCompareBool(field1, field2, Py_EQ);
if (val != 1 || PyErr_Occurred()) {
-#else
- val = PyObject_Compare(field1, field2);
- if (val != 0 || PyErr_Occurred()) {
-#endif
same = 0;
}
else {
@@ -1476,13 +1472,8 @@ _equivalent_subarrays(PyArray_ArrayDescr *sub1, PyArray_ArrayDescr *sub2)
return 0;
}
-#if defined(NPY_PY3K)
val = PyObject_RichCompareBool(sub1->shape, sub2->shape, Py_EQ);
if (val != 1 || PyErr_Occurred()) {
-#else
- val = PyObject_Compare(sub1->shape, sub2->shape);
- if (val != 0 || PyErr_Occurred()) {
-#endif
PyErr_Clear();
return 0;
}
@@ -2119,8 +2110,6 @@ array_fromfile(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds)
}
fp = npy_PyFile_Dup2(file, "rb", &orig_pos);
if (fp == NULL) {
- PyErr_SetString(PyExc_IOError,
- "first argument must be an open file");
Py_DECREF(file);
return NULL;
}
@@ -2255,8 +2244,10 @@ array_vdot(PyObject *NPY_UNUSED(dummy), PyObject *args)
{
int typenum;
char *ip1, *ip2, *op;
- npy_intp n, stride;
+ npy_intp n, stride1, stride2;
PyObject *op1, *op2;
+ npy_intp newdimptr[1] = {-1};
+ PyArray_Dims newdims = {newdimptr, 1};
PyArrayObject *ap1 = NULL, *ap2 = NULL, *ret = NULL;
PyArray_Descr *type;
PyArray_DotFunc *vdot;
@@ -2280,7 +2271,8 @@ array_vdot(PyObject *NPY_UNUSED(dummy), PyObject *args)
Py_DECREF(type);
goto fail;
}
- op1 = PyArray_Ravel(ap1, NPY_CORDER);
+
+ op1 = PyArray_Newshape(ap1, &newdims, NPY_CORDER);
if (op1 == NULL) {
Py_DECREF(type);
goto fail;
@@ -2292,7 +2284,7 @@ array_vdot(PyObject *NPY_UNUSED(dummy), PyObject *args)
if (ap2 == NULL) {
goto fail;
}
- op2 = PyArray_Ravel(ap2, NPY_CORDER);
+ op2 = PyArray_Newshape(ap2, &newdims, NPY_CORDER);
if (op2 == NULL) {
goto fail;
}
@@ -2312,7 +2304,8 @@ array_vdot(PyObject *NPY_UNUSED(dummy), PyObject *args)
}
n = PyArray_DIM(ap1, 0);
- stride = type->elsize;
+ stride1 = PyArray_STRIDE(ap1, 0);
+ stride2 = PyArray_STRIDE(ap2, 0);
ip1 = PyArray_DATA(ap1);
ip2 = PyArray_DATA(ap2);
op = PyArray_DATA(ret);
@@ -2340,11 +2333,11 @@ array_vdot(PyObject *NPY_UNUSED(dummy), PyObject *args)
}
if (n < 500) {
- vdot(ip1, stride, ip2, stride, op, n, NULL);
+ vdot(ip1, stride1, ip2, stride2, op, n, NULL);
}
else {
NPY_BEGIN_THREADS_DESCR(type);
- vdot(ip1, stride, ip2, stride, op, n, NULL);
+ vdot(ip1, stride1, ip2, stride2, op, n, NULL);
NPY_END_THREADS_DESCR(type);
}
diff --git a/numpy/core/src/multiarray/number.c b/numpy/core/src/multiarray/number.c
index 953a84eef..fec015a30 100644
--- a/numpy/core/src/multiarray/number.c
+++ b/numpy/core/src/multiarray/number.c
@@ -1025,18 +1025,11 @@ _array_copy_nice(PyArrayObject *self)
static PyObject *
array_index(PyArrayObject *v)
{
- if (!PyArray_ISINTEGER(v) || PyArray_SIZE(v) != 1) {
- PyErr_SetString(PyExc_TypeError, "only integer arrays with " \
- "one element can be converted to an index");
+ if (!PyArray_ISINTEGER(v) || PyArray_NDIM(v) != 0) {
+ PyErr_SetString(PyExc_TypeError,
+ "only integer scalar arrays can be converted to a scalar index");
return NULL;
}
- if (PyArray_NDIM(v) != 0) {
- /* 2013-04-20, 1.8 */
- if (DEPRECATE("converting an array with ndim > 0 to an index"
- " will result in an error in the future") < 0) {
- return NULL;
- }
- }
return PyArray_DESCR(v)->f->getitem(PyArray_DATA(v), v);
}
diff --git a/numpy/core/src/multiarray/shape.c b/numpy/core/src/multiarray/shape.c
index b679d6d5d..f46f820ca 100644
--- a/numpy/core/src/multiarray/shape.c
+++ b/numpy/core/src/multiarray/shape.c
@@ -940,55 +940,51 @@ PyArray_Ravel(PyArrayObject *arr, NPY_ORDER order)
order = NPY_FORTRANORDER;
}
}
+ else if (order == NPY_ANYORDER) {
+ order = PyArray_ISFORTRAN(arr) ? NPY_FORTRANORDER : NPY_CORDER;
+ }
- if (order != NPY_KEEPORDER) {
- return PyArray_Newshape(arr, &newdim, order);
+ if (order == NPY_CORDER && PyArray_IS_C_CONTIGUOUS(arr)) {
+ return PyArray_Newshape(arr, &newdim, NPY_CORDER);
+ }
+ else if (order == NPY_FORTRANORDER && PyArray_IS_F_CONTIGUOUS(arr)) {
+ return PyArray_Newshape(arr, &newdim, NPY_FORTRANORDER);
}
/* For KEEPORDER, check if we can make a flattened view */
- else {
+ else if (order == NPY_KEEPORDER) {
npy_stride_sort_item strideperm[NPY_MAXDIMS];
- npy_intp stride = 0, base_stride = NPY_MIN_INTP;
+ npy_intp stride;
int i, ndim = PyArray_NDIM(arr);
PyArray_CreateSortedStridePerm(PyArray_NDIM(arr),
PyArray_STRIDES(arr), strideperm);
+ /* The output array must be contiguous, so the first stride is fixed */
+ stride = PyArray_ITEMSIZE(arr);
+
for (i = ndim-1; i >= 0; --i) {
if (PyArray_DIM(arr, strideperm[i].perm) == 1) {
/* A size one dimension does not matter */
continue;
}
- if (base_stride == NPY_MIN_INTP) {
- stride = strideperm[i].stride;
- base_stride = stride;
- }
- else if (strideperm[i].stride != stride) {
+ if (strideperm[i].stride != stride) {
break;
}
stride *= PyArray_DIM(arr, strideperm[i].perm);
}
-#if NPY_RELAXED_STRIDES_CHECKING == 0
- /*
- * For tidyness, cannot be reached with relaxed strides checking
- * since the array is guaranteed contiguous (without, not sure...)
- */
- if (base_stride == NPY_MIN_INTP) {
- base_stride = PyArray_ITEMSIZE(arr);
- }
-#endif
-
/* If all the strides matched a contiguous layout, return a view */
if (i < 0) {
PyArrayObject *ret;
+ stride = PyArray_ITEMSIZE(arr);
val[0] = PyArray_SIZE(arr);
Py_INCREF(PyArray_DESCR(arr));
ret = (PyArrayObject *)PyArray_NewFromDescr(Py_TYPE(arr),
PyArray_DESCR(arr),
1, val,
- &base_stride,
+ &stride,
PyArray_BYTES(arr),
PyArray_FLAGS(arr),
(PyObject *)arr);
diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src
index e57dd5bd0..a46b9e7a8 100644
--- a/numpy/core/src/umath/loops.c.src
+++ b/numpy/core/src/umath/loops.c.src
@@ -2630,41 +2630,42 @@ OBJECT_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUS
NPY_NO_EXPORT void
OBJECT_sign(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
{
-#if defined(NPY_PY3K)
PyObject *zero = PyLong_FromLong(0);
+
UNARY_LOOP {
PyObject *in1 = *(PyObject **)ip1;
PyObject **out = (PyObject **)op1;
+ PyObject *ret = NULL;
int v;
- PyObject *ret;
- if (PyObject_Cmp(in1 ? in1 : Py_None, zero, &v) == -1) {
- return;
+
+ if (in1 == NULL) {
+ in1 = Py_None;
}
- ret = PyLong_FromLong(v);
- if (PyErr_Occurred()) {
- Py_DECREF(zero);
- return;
+
+ if ((v = PyObject_RichCompareBool(in1, zero, Py_LT)) == 1) {
+ ret = PyLong_FromLong(-1);
}
- Py_XDECREF(*out);
- *out = ret;
- }
- Py_DECREF(zero);
-#else
- PyObject *zero = PyInt_FromLong(0);
- UNARY_LOOP {
- PyObject *in1 = *(PyObject **)ip1;
- PyObject **out = (PyObject **)op1;
- PyObject *ret = PyInt_FromLong(
- PyObject_Compare(in1 ? in1 : Py_None, zero));
- if (PyErr_Occurred()) {
- Py_DECREF(zero);
- return;
+ else if (v == 0 &&
+ (v = PyObject_RichCompareBool(in1, zero, Py_GT)) == 1) {
+ ret = PyLong_FromLong(1);
+ }
+ else if (v == 0 &&
+ (v = PyObject_RichCompareBool(in1, zero, Py_EQ)) == 1) {
+ ret = PyLong_FromLong(0);
+ }
+ else if (v == 0) {
+ /* in1 is NaN */
+ PyErr_SetString(PyExc_TypeError,
+ "unorderable types for comparison");
+ }
+
+ if (ret == NULL) {
+ break;
}
Py_XDECREF(*out);
*out = ret;
}
- Py_DECREF(zero);
-#endif
+ Py_XDECREF(zero);
}
/*
diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py
index 8ec0f5e7f..e3aea7efb 100644
--- a/numpy/core/tests/test_deprecations.py
+++ b/numpy/core/tests/test_deprecations.py
@@ -115,237 +115,6 @@ class _DeprecationTestCase(object):
exceptions=tuple(), args=args, kwargs=kwargs)
-class TestFloatNonIntegerArgumentDeprecation(_DeprecationTestCase):
- """
- These test that ``DeprecationWarning`` is given when you try to use
- non-integers as arguments to for indexing and slicing e.g. ``a[0.0:5]``
- and ``a[0.5]``, or other functions like ``array.reshape(1., -1)``.
-
- After deprecation, changes need to be done inside conversion_utils.c
- in PyArray_PyIntAsIntp and possibly PyArray_IntpConverter.
- In iterators.c the function slice_GetIndices could be removed in favor
- of its python equivalent and in mapping.c the function _tuple_of_integers
- can be simplified (if ``np.array([1]).__index__()`` is also deprecated).
-
- As for the deprecation time-frame: via Ralf Gommers,
-
- "Hard to put that as a version number, since we don't know if the
- version after 1.8 will be 6 months or 2 years after. I'd say 2
- years is reasonable."
-
- I interpret this to mean 2 years after the 1.8 release. Possibly
- giving a PendingDeprecationWarning before that (which is visible
- by default)
-
- """
- message = "using a non-integer number instead of an integer " \
- "will result in an error in the future"
-
- def test_indexing(self):
- a = np.array([[[5]]])
-
- def assert_deprecated(*args, **kwargs):
- self.assert_deprecated(*args, exceptions=(IndexError,), **kwargs)
-
- assert_deprecated(lambda: a[0.0])
- assert_deprecated(lambda: a[0, 0.0])
- assert_deprecated(lambda: a[0.0, 0])
- assert_deprecated(lambda: a[0.0,:])
- assert_deprecated(lambda: a[:, 0.0])
- assert_deprecated(lambda: a[:, 0.0,:])
- assert_deprecated(lambda: a[0.0,:,:])
- assert_deprecated(lambda: a[0, 0, 0.0])
- assert_deprecated(lambda: a[0.0, 0, 0])
- assert_deprecated(lambda: a[0, 0.0, 0])
- assert_deprecated(lambda: a[-1.4])
- assert_deprecated(lambda: a[0, -1.4])
- assert_deprecated(lambda: a[-1.4, 0])
- assert_deprecated(lambda: a[-1.4,:])
- assert_deprecated(lambda: a[:, -1.4])
- assert_deprecated(lambda: a[:, -1.4,:])
- assert_deprecated(lambda: a[-1.4,:,:])
- assert_deprecated(lambda: a[0, 0, -1.4])
- assert_deprecated(lambda: a[-1.4, 0, 0])
- assert_deprecated(lambda: a[0, -1.4, 0])
-
- # Test that the slice parameter deprecation warning doesn't mask
- # the scalar index warning.
- assert_deprecated(lambda: a[0.0:, 0.0], num=2)
- assert_deprecated(lambda: a[0.0:, 0.0,:], num=2)
-
- def test_valid_indexing(self):
- a = np.array([[[5]]])
- assert_not_deprecated = self.assert_not_deprecated
-
- assert_not_deprecated(lambda: a[np.array([0])])
- assert_not_deprecated(lambda: a[[0, 0]])
- assert_not_deprecated(lambda: a[:, [0, 0]])
- assert_not_deprecated(lambda: a[:, 0,:])
- assert_not_deprecated(lambda: a[:,:,:])
-
- def test_slicing(self):
- a = np.array([[5]])
-
- def assert_deprecated(*args, **kwargs):
- self.assert_deprecated(*args, exceptions=(IndexError,), **kwargs)
-
- # start as float.
- assert_deprecated(lambda: a[0.0:])
- assert_deprecated(lambda: a[0:, 0.0:2])
- assert_deprecated(lambda: a[0.0::2, :0])
- assert_deprecated(lambda: a[0.0:1:2,:])
- assert_deprecated(lambda: a[:, 0.0:])
- # stop as float.
- assert_deprecated(lambda: a[:0.0])
- assert_deprecated(lambda: a[:0, 1:2.0])
- assert_deprecated(lambda: a[:0.0:2, :0])
- assert_deprecated(lambda: a[:0.0,:])
- assert_deprecated(lambda: a[:, 0:4.0:2])
- # step as float.
- assert_deprecated(lambda: a[::1.0])
- assert_deprecated(lambda: a[0:, :2:2.0])
- assert_deprecated(lambda: a[1::4.0, :0])
- assert_deprecated(lambda: a[::5.0,:])
- assert_deprecated(lambda: a[:, 0:4:2.0])
- # mixed.
- assert_deprecated(lambda: a[1.0:2:2.0], num=2)
- assert_deprecated(lambda: a[1.0::2.0], num=2)
- assert_deprecated(lambda: a[0:, :2.0:2.0], num=2)
- assert_deprecated(lambda: a[1.0:1:4.0, :0], num=2)
- assert_deprecated(lambda: a[1.0:5.0:5.0,:], num=3)
- assert_deprecated(lambda: a[:, 0.4:4.0:2.0], num=3)
- # should still get the DeprecationWarning if step = 0.
- assert_deprecated(lambda: a[::0.0], function_fails=True)
-
- def test_valid_slicing(self):
- a = np.array([[[5]]])
- assert_not_deprecated = self.assert_not_deprecated
-
- assert_not_deprecated(lambda: a[::])
- assert_not_deprecated(lambda: a[0:])
- assert_not_deprecated(lambda: a[:2])
- assert_not_deprecated(lambda: a[0:2])
- assert_not_deprecated(lambda: a[::2])
- assert_not_deprecated(lambda: a[1::2])
- assert_not_deprecated(lambda: a[:2:2])
- assert_not_deprecated(lambda: a[1:2:2])
-
- def test_non_integer_argument_deprecations(self):
- a = np.array([[5]])
-
- self.assert_deprecated(np.reshape, args=(a, (1., 1., -1)), num=2)
- self.assert_deprecated(np.reshape, args=(a, (np.array(1.), -1)))
- self.assert_deprecated(np.take, args=(a, [0], 1.))
- self.assert_deprecated(np.take, args=(a, [0], np.float64(1.)))
-
- def test_non_integer_sequence_multiplication(self):
- # Numpy scalar sequence multiply should not work with non-integers
- def mult(a, b):
- return a * b
-
- self.assert_deprecated(mult, args=([1], np.float_(3)))
- self.assert_not_deprecated(mult, args=([1], np.int_(3)))
-
- def test_reduce_axis_float_index(self):
- d = np.zeros((3,3,3))
- self.assert_deprecated(np.min, args=(d, 0.5))
- self.assert_deprecated(np.min, num=1, args=(d, (0.5, 1)))
- self.assert_deprecated(np.min, num=1, args=(d, (1, 2.2)))
- self.assert_deprecated(np.min, num=2, args=(d, (.2, 1.2)))
-
-
-class TestBooleanArgumentDeprecation(_DeprecationTestCase):
- """This tests that using a boolean as integer argument/indexing is
- deprecated.
-
- This should be kept in sync with TestFloatNonIntegerArgumentDeprecation
- and like it is handled in PyArray_PyIntAsIntp.
- """
- message = "using a boolean instead of an integer " \
- "will result in an error in the future"
-
- def test_bool_as_int_argument(self):
- a = np.array([[[1]]])
-
- self.assert_deprecated(np.reshape, args=(a, (True, -1)))
- self.assert_deprecated(np.reshape, args=(a, (np.bool_(True), -1)))
- # Note that operator.index(np.array(True)) does not work, a boolean
- # array is thus also deprecated, but not with the same message:
- assert_raises(TypeError, operator.index, np.array(True))
- self.assert_deprecated(np.take, args=(a, [0], False))
- self.assert_deprecated(lambda: a[False:True:True], exceptions=IndexError, num=3)
- self.assert_deprecated(lambda: a[False, 0], exceptions=IndexError)
- self.assert_deprecated(lambda: a[False, 0, 0], exceptions=IndexError)
-
-
-class TestArrayToIndexDeprecation(_DeprecationTestCase):
- """This tests that creating an an index from an array is deprecated
- if the array is not 0d.
-
- This can probably be deprecated somewhat faster then the integer
- deprecations. The deprecation period started with NumPy 1.8.
- For deprecation this needs changing of array_index in number.c
- """
- message = "converting an array with ndim \> 0 to an index will result " \
- "in an error in the future"
-
- def test_array_to_index_deprecation(self):
- # This drops into the non-integer deprecation, which is ignored here,
- # so no exception is expected. The raising is effectively tested above.
- a = np.array([[[1]]])
-
- self.assert_deprecated(operator.index, args=(np.array([1]),))
- self.assert_deprecated(np.reshape, args=(a, (a, -1)), exceptions=())
- self.assert_deprecated(np.take, args=(a, [0], a), exceptions=())
- # Check slicing. Normal indexing checks arrays specifically.
- self.assert_deprecated(lambda: a[a:a:a], exceptions=(), num=3)
-
-class TestNonIntegerArrayLike(_DeprecationTestCase):
- """Tests that array likes, i.e. lists give a deprecation warning
- when they cannot be safely cast to an integer.
- """
- message = "non integer \(and non boolean\) array-likes will not be " \
- "accepted as indices in the future"
-
- def test_basic(self):
- a = np.arange(10)
- self.assert_deprecated(a.__getitem__, args=([0.5, 1.5],),
- exceptions=IndexError)
- self.assert_deprecated(a.__getitem__, args=((['1', '2'],),),
- exceptions=IndexError)
-
- self.assert_not_deprecated(a.__getitem__, ([],))
-
- def test_boolean_futurewarning(self):
- a = np.arange(10)
- with warnings.catch_warnings():
- warnings.filterwarnings('always')
- assert_warns(FutureWarning, a.__getitem__, [True])
- # Unfortunatly, the deprecation warning takes precedence:
- #assert_warns(FutureWarning, a.__getitem__, True)
-
- with warnings.catch_warnings():
- warnings.filterwarnings('error')
- assert_raises(FutureWarning, a.__getitem__, [True])
- #assert_raises(FutureWarning, a.__getitem__, True)
-
-
-class TestMultipleEllipsisDeprecation(_DeprecationTestCase):
- message = "an index can only have a single Ellipsis \(`...`\); replace " \
- "all but one with slices \(`:`\)."
-
- def test_basic(self):
- a = np.arange(10)
- self.assert_deprecated(a.__getitem__, args=((Ellipsis, Ellipsis),))
-
- with warnings.catch_warnings():
- warnings.filterwarnings('ignore', '', DeprecationWarning)
- # Just check that this works:
- b = a[...,...]
- assert_array_equal(a, b)
- assert_raises(IndexError, a.__getitem__, ((Ellipsis, ) * 3,))
-
-
class TestBooleanUnaryMinusDeprecation(_DeprecationTestCase):
"""Test deprecation of unary boolean `-`. While + and * are well
defined, unary - is not and even a corrected form seems to have
@@ -605,5 +374,15 @@ class TestBooleanIndexShapeMismatchDeprecation():
arr.__getitem__, (slice(None), index))
+class TestFullDefaultDtype:
+ """np.full defaults to float when dtype is not set. In the future, it will
+ use the fill value's dtype.
+ """
+
+ def test_full_default_dtype(self):
+ assert_warns(FutureWarning, np.full, 1, 1)
+ assert_warns(FutureWarning, np.full, 1, None)
+
+
if __name__ == "__main__":
run_module_suite()
diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py
index 496664622..29f2ee7bd 100644
--- a/numpy/core/tests/test_dtype.py
+++ b/numpy/core/tests/test_dtype.py
@@ -535,7 +535,7 @@ class TestString(TestCase):
# Pull request #4722
np.array(["", ""]).astype(object)
-class TestDtypeAttributeDeletion(object):
+class TestDtypeAttributeDeletion(TestCase):
def test_dtype_non_writable_attributes_deletion(self):
dt = np.dtype(np.double)
@@ -552,6 +552,19 @@ class TestDtypeAttributeDeletion(object):
for s in attr:
assert_raises(AttributeError, delattr, dt, s)
+
+class TestDtypeAttributes(TestCase):
+ def test_descr_has_trailing_void(self):
+ # see gh-6359
+ dtype = np.dtype({
+ 'names': ['A', 'B'],
+ 'formats': ['f4', 'f4'],
+ 'offsets': [0, 8],
+ 'itemsize': 16})
+ new_dtype = np.dtype(dtype.descr)
+ assert_equal(new_dtype.itemsize, 16)
+
+
class TestDtypeAttributes(TestCase):
def test_name_builtin(self):
diff --git a/numpy/core/tests/test_indexing.py b/numpy/core/tests/test_indexing.py
index 4bc937e0b..38280d05e 100644
--- a/numpy/core/tests/test_indexing.py
+++ b/numpy/core/tests/test_indexing.py
@@ -3,6 +3,7 @@ from __future__ import division, absolute_import, print_function
import sys
import warnings
import functools
+import operator
import numpy as np
from numpy.core.multiarray_tests import array_indexing
@@ -21,6 +22,69 @@ except ImportError:
class TestIndexing(TestCase):
+ def test_index_no_floats(self):
+ a = np.array([[[5]]])
+
+ assert_raises(IndexError, lambda: a[0.0])
+ assert_raises(IndexError, lambda: a[0, 0.0])
+ assert_raises(IndexError, lambda: a[0.0, 0])
+ assert_raises(IndexError, lambda: a[0.0,:])
+ assert_raises(IndexError, lambda: a[:, 0.0])
+ assert_raises(IndexError, lambda: a[:, 0.0,:])
+ assert_raises(IndexError, lambda: a[0.0,:,:])
+ assert_raises(IndexError, lambda: a[0, 0, 0.0])
+ assert_raises(IndexError, lambda: a[0.0, 0, 0])
+ assert_raises(IndexError, lambda: a[0, 0.0, 0])
+ assert_raises(IndexError, lambda: a[-1.4])
+ assert_raises(IndexError, lambda: a[0, -1.4])
+ assert_raises(IndexError, lambda: a[-1.4, 0])
+ assert_raises(IndexError, lambda: a[-1.4,:])
+ assert_raises(IndexError, lambda: a[:, -1.4])
+ assert_raises(IndexError, lambda: a[:, -1.4,:])
+ assert_raises(IndexError, lambda: a[-1.4,:,:])
+ assert_raises(IndexError, lambda: a[0, 0, -1.4])
+ assert_raises(IndexError, lambda: a[-1.4, 0, 0])
+ assert_raises(IndexError, lambda: a[0, -1.4, 0])
+ assert_raises(IndexError, lambda: a[0.0:, 0.0])
+ assert_raises(IndexError, lambda: a[0.0:, 0.0,:])
+
+ def test_slicing_no_floats(self):
+ a = np.array([[5]])
+
+ # start as float.
+ assert_raises(IndexError, lambda: a[0.0:])
+ assert_raises(IndexError, lambda: a[0:, 0.0:2])
+ assert_raises(IndexError, lambda: a[0.0::2, :0])
+ assert_raises(IndexError, lambda: a[0.0:1:2,:])
+ assert_raises(IndexError, lambda: a[:, 0.0:])
+ # stop as float.
+ assert_raises(IndexError, lambda: a[:0.0])
+ assert_raises(IndexError, lambda: a[:0, 1:2.0])
+ assert_raises(IndexError, lambda: a[:0.0:2, :0])
+ assert_raises(IndexError, lambda: a[:0.0,:])
+ assert_raises(IndexError, lambda: a[:, 0:4.0:2])
+ # step as float.
+ assert_raises(IndexError, lambda: a[::1.0])
+ assert_raises(IndexError, lambda: a[0:, :2:2.0])
+ assert_raises(IndexError, lambda: a[1::4.0, :0])
+ assert_raises(IndexError, lambda: a[::5.0,:])
+ assert_raises(IndexError, lambda: a[:, 0:4:2.0])
+ # mixed.
+ assert_raises(IndexError, lambda: a[1.0:2:2.0])
+ assert_raises(IndexError, lambda: a[1.0::2.0])
+ assert_raises(IndexError, lambda: a[0:, :2.0:2.0])
+ assert_raises(IndexError, lambda: a[1.0:1:4.0, :0])
+ assert_raises(IndexError, lambda: a[1.0:5.0:5.0,:])
+ assert_raises(IndexError, lambda: a[:, 0.4:4.0:2.0])
+ # should still get the DeprecationWarning if step = 0.
+ assert_raises(IndexError, lambda: a[::0.0])
+
+ def test_index_no_array_to_index(self):
+ # No non-scalar arrays.
+ a = np.array([[[1]]])
+
+ assert_raises(IndexError, lambda: a[a:a:a])
+
def test_none_index(self):
# `None` index adds newaxis
a = np.array([1, 2, 3])
@@ -35,19 +99,9 @@ class TestIndexing(TestCase):
a = np.array(0)
assert_(isinstance(a[()], np.int_))
- # Regression, it needs to fall through integer and fancy indexing
- # cases, so need the with statement to ignore the non-integer error.
- with warnings.catch_warnings():
- warnings.filterwarnings('ignore', '', DeprecationWarning)
- a = np.array([1.])
- assert_(isinstance(a[0.], np.float_))
-
- a = np.array([np.array(1)], dtype=object)
- assert_(isinstance(a[0.], np.ndarray))
-
def test_same_kind_index_casting(self):
- # Indexes should be cast with same-kind and not safe, even if
- # that is somewhat unsafe. So test various different code paths.
+ # Indexes should be cast with same-kind and not safe, even if that
+ # is somewhat unsafe. So test various different code paths.
index = np.arange(5)
u_index = index.astype(np.uintp)
arr = np.arange(10)
@@ -85,7 +139,8 @@ class TestIndexing(TestCase):
[4, 5, 6],
[7, 8, 9]])
assert_equal(a[...], a)
- assert_(a[...].base is a) # `a[...]` was `a` in numpy <1.9.)
+ # `a[...]` was `a` in numpy <1.9.
+ assert_(a[...].base is a)
# Slicing with ellipsis can skip an
# arbitrary number of dimensions
@@ -645,7 +700,8 @@ class TestMultiIndexingAutomated(TestCase):
np.zeros([1]*31, dtype=int), # trigger too large array.
np.array([0., 1.])] # invalid datatype
# Some simpler indices that still cover a bit more
- self.simple_indices = [Ellipsis, None, -1, [1], np.array([True]), 'skip']
+ self.simple_indices = [Ellipsis, None, -1, [1], np.array([True]),
+ 'skip']
# Very simple ones to fill the rest:
self.fill_indices = [slice(None, None), 0]
@@ -719,16 +775,18 @@ class TestMultiIndexingAutomated(TestCase):
indx = np.array(indx, dtype=np.intp)
in_indices[i] = indx
elif indx.dtype.kind != 'b' and indx.dtype.kind != 'i':
- raise IndexError('arrays used as indices must be of integer (or boolean) type')
+ raise IndexError('arrays used as indices must be of '
+ 'integer (or boolean) type')
if indx.ndim != 0:
no_copy = False
ndim += 1
fancy_dim += 1
if arr.ndim - ndim < 0:
- # we can't take more dimensions then we have, not even for 0-d arrays.
- # since a[()] makes sense, but not a[(),]. We will raise an error
- # later on, unless a broadcasting error occurs first.
+ # we can't take more dimensions then we have, not even for 0-d
+ # arrays. since a[()] makes sense, but not a[(),]. We will
+ # raise an error later on, unless a broadcasting error occurs
+ # first.
raise IndexError
if ndim == 0 and None not in in_indices:
@@ -736,7 +794,8 @@ class TestMultiIndexingAutomated(TestCase):
return arr.copy(), no_copy
if ellipsis_pos is not None:
- in_indices[ellipsis_pos:ellipsis_pos+1] = [slice(None, None)] * (arr.ndim - ndim)
+ in_indices[ellipsis_pos:ellipsis_pos+1] = ([slice(None, None)] *
+ (arr.ndim - ndim))
for ax, indx in enumerate(in_indices):
if isinstance(indx, slice):
@@ -779,21 +838,23 @@ class TestMultiIndexingAutomated(TestCase):
if indx >= arr.shape[ax] or indx < -arr.shape[ax]:
raise IndexError
if indx.ndim == 0:
- # The index is a scalar. This used to be two fold, but if fancy
- # indexing was active, the check was done later, possibly
- # after broadcasting it away (1.7. or earlier). Now it is always
- # done.
+ # The index is a scalar. This used to be two fold, but if
+ # fancy indexing was active, the check was done later,
+ # possibly after broadcasting it away (1.7. or earlier).
+ # Now it is always done.
if indx >= arr.shape[ax] or indx < - arr.shape[ax]:
raise IndexError
- if len(indices) > 0 and indices[-1][0] == 'f' and ax != ellipsis_pos:
+ if (len(indices) > 0 and
+ indices[-1][0] == 'f' and
+ ax != ellipsis_pos):
# NOTE: There could still have been a 0-sized Ellipsis
# between them. Checked that with ellipsis_pos.
indices[-1].append(indx)
else:
# We have a fancy index that is not after an existing one.
- # NOTE: A 0-d array triggers this as well, while
- # one may expect it to not trigger it, since a scalar
- # would not be considered fancy indexing.
+ # NOTE: A 0-d array triggers this as well, while one may
+ # expect it to not trigger it, since a scalar would not be
+ # considered fancy indexing.
num_fancy += 1
indices.append(['f', indx])
@@ -854,13 +915,15 @@ class TestMultiIndexingAutomated(TestCase):
# Work around for a crash or IndexError with 'wrap'
# in some 0-sized cases.
try:
- mi = np.ravel_multi_index(indx[1:], orig_slice, mode='raise')
+ mi = np.ravel_multi_index(indx[1:], orig_slice,
+ mode='raise')
except:
# This happens with 0-sized orig_slice (sometimes?)
# here it is a ValueError, but indexing gives a:
raise IndexError('invalid index into 0-sized')
else:
- mi = np.ravel_multi_index(indx[1:], orig_slice, mode='wrap')
+ mi = np.ravel_multi_index(indx[1:], orig_slice,
+ mode='wrap')
else:
# Maybe never happens...
raise ValueError
@@ -962,9 +1025,12 @@ class TestMultiIndexingAutomated(TestCase):
# it is aligned to the left. This is probably correct for
# consistency with arr[boolean_array,] also no broadcasting
# is done at all
- self._check_multi_index(self.a, (np.zeros_like(self.a, dtype=bool),))
- self._check_multi_index(self.a, (np.zeros_like(self.a, dtype=bool)[..., 0],))
- self._check_multi_index(self.a, (np.zeros_like(self.a, dtype=bool)[None, ...],))
+ self._check_multi_index(
+ self.a, (np.zeros_like(self.a, dtype=bool),))
+ self._check_multi_index(
+ self.a, (np.zeros_like(self.a, dtype=bool)[..., 0],))
+ self._check_multi_index(
+ self.a, (np.zeros_like(self.a, dtype=bool)[None, ...],))
def test_multidim(self):
# Automatically test combinations with complex indexes on 2nd (or 1st)
@@ -1003,6 +1069,119 @@ class TestMultiIndexingAutomated(TestCase):
for index in self.complex_indices:
self._check_single_index(a, index)
+class TestFloatNonIntegerArgument(TestCase):
+ """
+ These test that ``TypeError`` is raised when you try to use
+ non-integers as arguments to for indexing and slicing e.g. ``a[0.0:5]``
+ and ``a[0.5]``, or other functions like ``array.reshape(1., -1)``.
+
+ """
+ def test_valid_indexing(self):
+ # These should raise no errors.
+ a = np.array([[[5]]])
+
+ a[np.array([0])]
+ a[[0, 0]]
+ a[:, [0, 0]]
+ a[:, 0,:]
+ a[:,:,:]
+
+ def test_valid_slicing(self):
+ # These should raise no errors.
+ a = np.array([[[5]]])
+
+ a[::]
+ a[0:]
+ a[:2]
+ a[0:2]
+ a[::2]
+ a[1::2]
+ a[:2:2]
+ a[1:2:2]
+
+ def test_non_integer_argument_errors(self):
+ a = np.array([[5]])
+
+ assert_raises(TypeError, np.reshape, a, (1., 1., -1))
+ assert_raises(TypeError, np.reshape, a, (np.array(1.), -1))
+ assert_raises(TypeError, np.take, a, [0], 1.)
+ assert_raises(TypeError, np.take, a, [0], np.float64(1.))
+
+ def test_non_integer_sequence_multiplication(self):
+ # Numpy scalar sequence multiply should not work with non-integers
+ def mult(a, b):
+ return a * b
+
+ assert_raises(TypeError, mult, [1], np.float_(3))
+ # following should be OK
+ mult([1], np.int_(3))
+
+ def test_reduce_axis_float_index(self):
+ d = np.zeros((3,3,3))
+ assert_raises(TypeError, np.min, d, 0.5)
+ assert_raises(TypeError, np.min, d, (0.5, 1))
+ assert_raises(TypeError, np.min, d, (1, 2.2))
+ assert_raises(TypeError, np.min, d, (.2, 1.2))
+
+
+class TestBooleanArgumentErrors(TestCase):
+ """Using a boolean as integer argument/indexing is an error.
+
+ """
+ def test_bool_as_int_argument(self):
+ a = np.array([[[1]]])
+
+ assert_raises(TypeError, np.reshape, a, (True, -1))
+ assert_raises(TypeError, np.reshape, a, (np.bool_(True), -1))
+ # Note that operator.index(np.array(True)) does not work, a boolean
+ # array is thus also deprecated, but not with the same message:
+ assert_raises(TypeError, operator.index, np.array(True))
+ assert_raises(TypeError, np.take, args=(a, [0], False))
+ assert_raises(IndexError, lambda: a[False:True:True])
+ assert_raises(IndexError, lambda: a[False, 0])
+ assert_raises(IndexError, lambda: a[False, 0, 0])
+
+
+class TestArrayToIndexDeprecation(TestCase):
+ """Creating an an index from array not 0-D is an error.
+
+ """
+ def test_array_to_index_error(self):
+ # so no exception is expected. The raising is effectively tested above.
+ a = np.array([[[1]]])
+
+ assert_raises(TypeError, operator.index, np.array([1]))
+ assert_raises(TypeError, np.reshape, a, (a, -1))
+ assert_raises(TypeError, np.take, a, [0], a)
+
+
+class TestNonIntegerArrayLike(TestCase):
+ """Tests that array_likes only valid if can safely cast to integer.
+
+ For instance, lists give IndexError when they cannot be safely cast to
+ an integer.
+
+ """
+ def test_basic(self):
+ a = np.arange(10)
+
+ assert_raises(IndexError, a.__getitem__, [0.5, 1.5])
+ assert_raises(IndexError, a.__getitem__, (['1', '2'],))
+
+ # The following is valid
+ a.__getitem__([])
+
+
+class TestMultipleEllipsisError(TestCase):
+ """An index can only have a single ellipsis.
+
+ """
+ def test_basic(self):
+ a = np.arange(10)
+ assert_raises(IndexError, lambda: a[..., ...])
+ assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 2,))
+ assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 3,))
+
class TestCApiAccess(TestCase):
def test_getitem(self):
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index a2667172c..872f9bde4 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -2075,41 +2075,37 @@ class TestMethods(TestCase):
assert_equal(a.ravel(order='K'), [2, 3, 0, 1])
assert_(a.ravel(order='K').flags.owndata)
+ # Test simple 1-d copy behaviour:
+ a = np.arange(10)[::2]
+ assert_(a.ravel('K').flags.owndata)
+ assert_(a.ravel('C').flags.owndata)
+ assert_(a.ravel('F').flags.owndata)
+
# Not contiguous and 1-sized axis with non matching stride
a = np.arange(2**3 * 2)[::2]
a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
strides = list(a.strides)
strides[1] = 123
a.strides = strides
- assert_(np.may_share_memory(a.ravel(order='K'), a))
+ assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('K'), np.arange(0, 15, 2))
- # General case of possible ravel that is not contiguous but
- # works and includes a 1-sized axis with non matching stride
- a = a.swapaxes(-1, -2) # swap back to C-order
- assert_(np.may_share_memory(a.ravel(order='C'), a))
- assert_(np.may_share_memory(a.ravel(order='K'), a))
-
- a = a.T # swap all to Fortran order
- assert_(np.may_share_memory(a.ravel(order='F'), a))
+ # contiguous and 1-sized axis with non matching stride works:
+ a = np.arange(2**3)
+ a = a.reshape(2, 1, 2, 2).swapaxes(-1, -2)
+ strides = list(a.strides)
+ strides[1] = 123
+ a.strides = strides
assert_(np.may_share_memory(a.ravel(order='K'), a))
+ assert_equal(a.ravel(order='K'), np.arange(2**3))
- # Test negative strides:
+ # Test negative strides (not very interesting since non-contiguous):
a = np.arange(4)[::-1].reshape(2, 2)
- assert_(np.may_share_memory(a.ravel(order='C'), a))
- assert_(np.may_share_memory(a.ravel(order='K'), a))
+ assert_(a.ravel(order='C').flags.owndata)
+ assert_(a.ravel(order='K').flags.owndata)
assert_equal(a.ravel('C'), [3, 2, 1, 0])
assert_equal(a.ravel('K'), [3, 2, 1, 0])
- # Test keeporder with weirdly strided 1-sized dims (1-d first stride)
- a = np.arange(8)[::2].reshape(1, 2, 2, 1) # neither C, nor F order
- strides = list(a.strides)
- strides[0] = -12
- strides[-1] = 0
- a.strides = strides
- assert_(np.may_share_memory(a.ravel(order='K'), a))
- assert_equal(a.ravel('K'), a.ravel('C'))
-
# 1-element tidy strides test (NPY_RELAXED_STRIDES_CHECKING):
a = np.array([[1]])
a.strides = (123, 432)
@@ -2125,7 +2121,7 @@ class TestMethods(TestCase):
assert_equal(a.ravel(order), [0])
assert_(np.may_share_memory(a.ravel(order), a))
- #Test that certain non-inplace ravels work right (mostly) for 'K':
+ # Test that certain non-inplace ravels work right (mostly) for 'K':
b = np.arange(2**4 * 2)[::2].reshape(2, 2, 2, 2)
a = b[..., ::2]
assert_equal(a.ravel('K'), [0, 4, 8, 12, 16, 20, 24, 28])
@@ -2139,6 +2135,22 @@ class TestMethods(TestCase):
assert_equal(a.ravel('A'), [0, 2, 4, 6, 8, 10, 12, 14])
assert_equal(a.ravel('F'), [0, 8, 4, 12, 2, 10, 6, 14])
+ def test_ravel_subclass(self):
+ class ArraySubclass(np.ndarray):
+ pass
+
+ a = np.arange(10).view(ArraySubclass)
+ assert_(isinstance(a.ravel('C'), ArraySubclass))
+ assert_(isinstance(a.ravel('F'), ArraySubclass))
+ assert_(isinstance(a.ravel('A'), ArraySubclass))
+ assert_(isinstance(a.ravel('K'), ArraySubclass))
+
+ a = np.arange(10)[::2].view(ArraySubclass)
+ assert_(isinstance(a.ravel('C'), ArraySubclass))
+ assert_(isinstance(a.ravel('F'), ArraySubclass))
+ assert_(isinstance(a.ravel('A'), ArraySubclass))
+ assert_(isinstance(a.ravel('K'), ArraySubclass))
+
def test_swapaxes(self):
a = np.arange(1*2*3*4).reshape(1, 2, 3, 4).copy()
idx = np.indices(a.shape)
@@ -3997,6 +4009,28 @@ class TestVdot(TestCase):
assert_equal(np.vdot(b, a), res)
assert_equal(np.vdot(b, b), res)
+ def test_vdot_uncontiguous(self):
+ for size in [2, 1000]:
+ # Different sizes match different branches in vdot.
+ a = np.zeros((size, 2, 2))
+ b = np.zeros((size, 2, 2))
+ a[:, 0, 0] = np.arange(size)
+ b[:, 0, 0] = np.arange(size) + 1
+ # Make a and b uncontiguous:
+ a = a[..., 0]
+ b = b[..., 0]
+
+ assert_equal(np.vdot(a, b),
+ np.vdot(a.flatten(), b.flatten()))
+ assert_equal(np.vdot(a, b.copy()),
+ np.vdot(a.flatten(), b.flatten()))
+ assert_equal(np.vdot(a.copy(), b),
+ np.vdot(a.flatten(), b.flatten()))
+ assert_equal(np.vdot(a.copy('F'), b),
+ np.vdot(a.flatten(), b.flatten()))
+ assert_equal(np.vdot(a, b.copy('F')),
+ np.vdot(a.flatten(), b.flatten()))
+
class TestDot(TestCase):
def setUp(self):
@@ -5390,6 +5424,16 @@ def test_array_interface():
assert_equal(np.array(ArrayLike()), 1)
+def test_array_interface_itemsize():
+ # See gh-6361
+ my_dtype = np.dtype({'names': ['A', 'B'], 'formats': ['f4', 'f4'],
+ 'offsets': [0, 8], 'itemsize': 16})
+ a = np.ones(10, dtype=my_dtype)
+ descr_t = np.dtype(a.__array_interface__['descr'])
+ typestr_t = np.dtype(a.__array_interface__['typestr'])
+ assert_equal(descr_t.itemsize, typestr_t.itemsize)
+
+
def test_flat_element_deletion():
it = np.ones(3).flat
try:
diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py
index 033fac37d..ebf8e0380 100644
--- a/numpy/core/tests/test_umath.py
+++ b/numpy/core/tests/test_umath.py
@@ -935,6 +935,22 @@ class TestSign(TestCase):
assert_equal(res, tgt)
assert_equal(out, tgt)
+ def test_sign_dtype_object(self):
+ # In reference to github issue #6229
+
+ foo = np.array([-.1, 0, .1])
+ a = np.sign(foo.astype(np.object))
+ b = np.sign(foo)
+
+ assert_array_equal(a, b)
+
+ def test_sign_dtype_nan_object(self):
+ # In reference to github issue #6229
+ def test_nan():
+ foo = np.array([np.nan])
+ a = np.sign(foo.astype(np.object))
+
+ assert_raises(TypeError, test_nan)
class TestMinMax(TestCase):
def test_minmax_blocked(self):
diff --git a/numpy/distutils/exec_command.py b/numpy/distutils/exec_command.py
index f751a8ca3..9fa09cd51 100644
--- a/numpy/distutils/exec_command.py
+++ b/numpy/distutils/exec_command.py
@@ -441,8 +441,10 @@ def _exec_command( command, use_shell=None, use_tee = None, **env ):
se_flush()
if _so_has_fileno:
os.dup2(so_dup, so_fileno)
+ os.close(so_dup)
if _se_has_fileno:
os.dup2(se_dup, se_fileno)
+ os.close(se_dup)
fout.close()
fout = open_latin1(outfile, 'r')
diff --git a/numpy/distutils/fcompiler/compaq.py b/numpy/distutils/fcompiler/compaq.py
index 5162b168c..2dd6c01e6 100644
--- a/numpy/distutils/fcompiler/compaq.py
+++ b/numpy/distutils/fcompiler/compaq.py
@@ -74,7 +74,7 @@ class CompaqVisualFCompiler(FCompiler):
fc_exe = 'DF'
if sys.platform=='win32':
- from distutils.msvccompiler import MSVCCompiler
+ from numpy.distutils.msvccompiler import MSVCCompiler
try:
m = MSVCCompiler()
diff --git a/numpy/distutils/fcompiler/intel.py b/numpy/distutils/fcompiler/intel.py
index ef0bcc30b..28624918d 100644
--- a/numpy/distutils/fcompiler/intel.py
+++ b/numpy/distutils/fcompiler/intel.py
@@ -10,6 +10,7 @@ compilers = ['IntelFCompiler', 'IntelVisualFCompiler',
'IntelItaniumFCompiler', 'IntelItaniumVisualFCompiler',
'IntelEM64VisualFCompiler', 'IntelEM64TFCompiler']
+
def intel_version_match(type):
# Match against the important stuff in the version string
return simple_version_match(start=r'Intel.*?Fortran.*?(?:%s).*?Version' % (type,))
@@ -45,17 +46,16 @@ class IntelFCompiler(BaseIntelFCompiler):
}
pic_flags = ['-fPIC']
- module_dir_switch = '-module ' # Don't remove ending space!
+ module_dir_switch = '-module ' # Don't remove ending space!
module_include_switch = '-I'
def get_flags_free(self):
- return ["-FR"]
+ return ['-FR']
def get_flags(self):
return ['-fPIC']
def get_flags_opt(self):
- #return ['-i8 -xhost -openmp -fp-model strict']
return ['-xhost -openmp -fp-model strict']
def get_flags_arch(self):
@@ -120,11 +120,10 @@ class IntelEM64TFCompiler(IntelFCompiler):
return ['-fPIC']
def get_flags_opt(self):
- #return ['-i8 -xhost -openmp -fp-model strict']
- return ['-xhost -openmp -fp-model strict']
+ return ['-openmp -fp-model strict']
def get_flags_arch(self):
- return []
+ return ['-xSSE4.2']
# Is there no difference in the version string between the above compilers
# and the Visual compilers?
@@ -145,18 +144,18 @@ class IntelVisualFCompiler(BaseIntelFCompiler):
executables = {
'version_cmd' : None,
- 'compiler_f77' : [None, "-FI", "-w90", "-w95"],
- 'compiler_fix' : [None, "-FI", "-4L72", "-w"],
+ 'compiler_f77' : [None],
+ 'compiler_fix' : [None],
'compiler_f90' : [None],
- 'linker_so' : ['<F90>', "-shared"],
+ 'linker_so' : [None],
'archiver' : [ar_exe, "/verbose", "/OUT:"],
'ranlib' : None
}
compile_switch = '/c '
- object_switch = '/Fo' #No space after /Fo!
- library_switch = '/OUT:' #No space after /OUT:!
- module_dir_switch = '/module:' #No space after /module:
+ object_switch = '/Fo' # No space after /Fo!
+ library_switch = '/OUT:' # No space after /OUT:!
+ module_dir_switch = '/module:' # No space after /module:
module_include_switch = '/I'
def get_flags(self):
@@ -164,7 +163,7 @@ class IntelVisualFCompiler(BaseIntelFCompiler):
return opt
def get_flags_free(self):
- return ["-FR"]
+ return []
def get_flags_debug(self):
return ['/4Yb', '/d2']
@@ -185,7 +184,7 @@ class IntelItaniumVisualFCompiler(IntelVisualFCompiler):
version_match = intel_version_match('Itanium')
- possible_executables = ['efl'] # XXX this is a wild guess
+ possible_executables = ['efl'] # XXX this is a wild guess
ar_exe = IntelVisualFCompiler.ar_exe
executables = {
@@ -206,7 +205,7 @@ class IntelEM64VisualFCompiler(IntelVisualFCompiler):
version_match = simple_version_match(start='Intel\(R\).*?64,')
def get_flags_arch(self):
- return ["/arch:SSE2"]
+ return ['/QxSSE4.2']
if __name__ == '__main__':
diff --git a/numpy/distutils/intelccompiler.py b/numpy/distutils/intelccompiler.py
index db6ef80bd..a1f34e304 100644
--- a/numpy/distutils/intelccompiler.py
+++ b/numpy/distutils/intelccompiler.py
@@ -1,10 +1,12 @@
from __future__ import division, absolute_import, print_function
-import sys
+import platform
from distutils.unixccompiler import UnixCCompiler
from numpy.distutils.exec_command import find_executable
from numpy.distutils.ccompiler import simple_version_match
+if platform.system() == 'Windows':
+ from numpy.distutils.msvc9compiler import MSVCCompiler
class IntelCCompiler(UnixCCompiler):
@@ -15,14 +17,15 @@ class IntelCCompiler(UnixCCompiler):
def __init__(self, verbose=0, dry_run=0, force=0):
UnixCCompiler.__init__(self, verbose, dry_run, force)
- self.cc_exe = 'icc -fPIC'
+ self.cc_exe = ('icc -fPIC -fp-model strict -O3 '
+ '-fomit-frame-pointer -openmp')
compiler = self.cc_exe
self.set_executables(compiler=compiler,
compiler_so=compiler,
compiler_cxx=compiler,
archiver='xiar' + ' cru',
- linker_exe=compiler,
- linker_so=compiler + ' -shared')
+ linker_exe=compiler + ' -shared-intel',
+ linker_so=compiler + ' -shared -shared-intel')
class IntelItaniumCCompiler(IntelCCompiler):
@@ -40,24 +43,23 @@ class IntelEM64TCCompiler(UnixCCompiler):
A modified Intel x86_64 compiler compatible with a 64bit GCC-built Python.
"""
compiler_type = 'intelem'
- cc_exe = 'icc -m64 -fPIC'
- cc_args = "-fPIC"
+ cc_exe = 'icc -m64'
+ cc_args = '-fPIC'
def __init__(self, verbose=0, dry_run=0, force=0):
UnixCCompiler.__init__(self, verbose, dry_run, force)
- self.cc_exe = 'icc -m64 -fPIC'
+ self.cc_exe = ('icc -m64 -fPIC -fp-model strict -O3 '
+ '-fomit-frame-pointer -openmp -xSSE4.2')
compiler = self.cc_exe
self.set_executables(compiler=compiler,
compiler_so=compiler,
compiler_cxx=compiler,
archiver='xiar' + ' cru',
- linker_exe=compiler,
- linker_so=compiler + ' -shared')
+ linker_exe=compiler + ' -shared-intel',
+ linker_so=compiler + ' -shared -shared-intel')
-if sys.platform == 'win32':
- from distutils.msvc9compiler import MSVCCompiler
-
+if platform.system() == 'Windows':
class IntelCCompilerW(MSVCCompiler):
"""
A modified Intel compiler compatible with an MSVC-built Python.
@@ -72,11 +74,11 @@ if sys.platform == 'win32':
def initialize(self, plat_name=None):
MSVCCompiler.initialize(self, plat_name)
- self.cc = self.find_exe("icl.exe")
- self.lib = self.find_exe("xilib")
- self.linker = self.find_exe("xilink")
+ self.cc = self.find_exe('icl.exe')
+ self.lib = self.find_exe('xilib')
+ self.linker = self.find_exe('xilink')
self.compile_options = ['/nologo', '/O3', '/MD', '/W3',
- '/Qstd=c99']
+ '/Qstd=c99', '/QxSSE4.2']
self.compile_options_debug = ['/nologo', '/Od', '/MDd', '/W3',
'/Qstd=c99', '/Z7', '/D_DEBUG']
@@ -91,4 +93,3 @@ if sys.platform == 'win32':
MSVCCompiler.__init__(self, verbose, dry_run, force)
version_match = simple_version_match(start='Intel\(R\).*?64,')
self.__version = version_match
-
diff --git a/numpy/distutils/msvc9compiler.py b/numpy/distutils/msvc9compiler.py
new file mode 100644
index 000000000..636165bd5
--- /dev/null
+++ b/numpy/distutils/msvc9compiler.py
@@ -0,0 +1,23 @@
+import os
+import distutils.msvc9compiler
+from distutils.msvc9compiler import *
+
+
+class MSVCCompiler(distutils.msvc9compiler.MSVCCompiler):
+ def __init__(self, verbose=0, dry_run=0, force=0):
+ distutils.msvc9compiler.MSVCCompiler.__init__(self, verbose, dry_run, force)
+
+ def initialize(self, plat_name=None):
+ environ_lib = os.getenv('lib')
+ environ_include = os.getenv('include')
+ distutils.msvc9compiler.MSVCCompiler.initialize(self, plat_name)
+ if environ_lib is not None:
+ os.environ['lib'] = environ_lib + os.environ['lib']
+ if environ_include is not None:
+ os.environ['include'] = environ_include + os.environ['include']
+
+ def manifest_setup_ldargs(self, output_filename, build_temp, ld_args):
+ ld_args.append('/MANIFEST')
+ distutils.msvc9compiler.MSVCCompiler.manifest_setup_ldargs(self,
+ output_filename,
+ build_temp, ld_args)
diff --git a/numpy/distutils/msvccompiler.py b/numpy/distutils/msvccompiler.py
new file mode 100644
index 000000000..0d28f6b9f
--- /dev/null
+++ b/numpy/distutils/msvccompiler.py
@@ -0,0 +1,17 @@
+import os
+import distutils.msvccompiler
+from distutils.msvccompiler import *
+
+
+class MSVCCompiler(distutils.msvccompiler.MSVCCompiler):
+ def __init__(self, verbose=0, dry_run=0, force=0):
+ distutils.msvccompiler.MSVCCompiler.__init__(self, verbose, dry_run, force)
+
+ def initialize(self, plat_name=None):
+ environ_lib = os.getenv('lib')
+ environ_include = os.getenv('include')
+ distutils.msvccompiler.MSVCCompiler.initialize(self, plat_name)
+ if environ_lib is not None:
+ os.environ['lib'] = environ_lib + os.environ['lib']
+ if environ_include is not None:
+ os.environ['include'] = environ_include + os.environ['include']
diff --git a/numpy/distutils/npy_pkg_config.py b/numpy/distutils/npy_pkg_config.py
index ceab906a4..6156439e1 100644
--- a/numpy/distutils/npy_pkg_config.py
+++ b/numpy/distutils/npy_pkg_config.py
@@ -3,7 +3,6 @@ from __future__ import division, absolute_import, print_function
import sys
import re
import os
-import shlex
if sys.version_info[0] < 3:
from ConfigParser import SafeConfigParser, NoOptionError
@@ -56,35 +55,23 @@ def parse_flags(line):
* 'ignored'
"""
- lexer = shlex.shlex(line)
- lexer.whitespace_split = True
-
d = {'include_dirs': [], 'library_dirs': [], 'libraries': [],
- 'macros': [], 'ignored': []}
- def next_token(t):
- if t.startswith('-I'):
- if len(t) > 2:
- d['include_dirs'].append(t[2:])
- else:
- t = lexer.get_token()
- d['include_dirs'].append(t)
- elif t.startswith('-L'):
- if len(t) > 2:
- d['library_dirs'].append(t[2:])
+ 'macros': [], 'ignored': []}
+
+ flags = (' ' + line).split(' -')
+ for flag in flags:
+ flag = '-' + flag
+ if len(flag) > 0:
+ if flag.startswith('-I'):
+ d['include_dirs'].append(flag[2:].strip())
+ elif flag.startswith('-L'):
+ d['library_dirs'].append(flag[2:].strip())
+ elif flag.startswith('-l'):
+ d['libraries'].append(flag[2:].strip())
+ elif flag.startswith('-D'):
+ d['macros'].append(flag[2:].strip())
else:
- t = lexer.get_token()
- d['library_dirs'].append(t)
- elif t.startswith('-l'):
- d['libraries'].append(t[2:])
- elif t.startswith('-D'):
- d['macros'].append(t[2:])
- else:
- d['ignored'].append(t)
- return lexer.get_token()
-
- t = lexer.get_token()
- while t:
- t = next_token(t)
+ d['ignored'].append(flag)
return d
diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py
index 90c053298..9dd48e2dc 100644
--- a/numpy/distutils/system_info.py
+++ b/numpy/distutils/system_info.py
@@ -999,8 +999,8 @@ class mkl_info(system_info):
plt = '64'
#l = 'mkl_ipf'
elif cpu.is_Xeon():
- plt = 'em64t'
- #l = 'mkl_em64t'
+ plt = 'intel64'
+ #l = 'mkl_intel64'
else:
plt = '32'
#l = 'mkl_ia32'
@@ -1703,6 +1703,10 @@ class openblas_info(blas_info):
if info is None:
return
+ # Add extra info for OpenBLAS
+ extra_info = self.calc_extra_info()
+ dict_append(info, **extra_info)
+
if not self.check_embedded_lapack(info):
return
@@ -1729,13 +1733,19 @@ class openblas_lapack_info(openblas_info):
}"""
src = os.path.join(tmpdir, 'source.c')
out = os.path.join(tmpdir, 'a.out')
+ # Add the additional "extra" arguments
+ try:
+ extra_args = info['extra_link_args']
+ except:
+ extra_args = []
try:
with open(src, 'wt') as f:
f.write(s)
obj = c.compile([src], output_dir=tmpdir)
try:
c.link_executable(obj, out, libraries=info['libraries'],
- library_dirs=info['library_dirs'])
+ library_dirs=info['library_dirs'],
+ extra_postargs=extra_args)
res = True
except distutils.ccompiler.LinkError:
res = False
@@ -1752,7 +1762,8 @@ class openblas_lapack_info(openblas_info):
obj = c.compile([src], output_dir=tmpdir)
try:
c.link_executable(obj, out, libraries=info['libraries'],
- library_dirs=info['library_dirs'])
+ library_dirs=info['library_dirs'],
+ extra_postargs=extra_args)
res = True
except distutils.ccompiler.LinkError:
res = False
diff --git a/numpy/doc/basics.py b/numpy/doc/basics.py
index 86a3984c2..745bff15a 100644
--- a/numpy/doc/basics.py
+++ b/numpy/doc/basics.py
@@ -142,5 +142,44 @@ identical behaviour between arrays and scalars, irrespective of whether the
value is inside an array or not. NumPy scalars also have many of the same
methods arrays do.
+Extended Precision
+==================
+
+Python's floating-point numbers are usually 64-bit floating-point numbers,
+nearly equivalent to ``np.float64``. In some unusual situations it may be
+useful to use floating-point numbers with more precision. Whether this
+is possible in numpy depends on the hardware and on the development
+environment: specifically, x86 machines provide hardware floating-point
+with 80-bit precision, and while most C compilers provide this as their
+``long double`` type, MSVC (standard for Windows builds) makes
+``long double`` identical to ``double`` (64 bits). Numpy makes the
+compiler's ``long double`` available as ``np.longdouble`` (and
+``np.clongdouble`` for the complex numbers). You can find out what your
+numpy provides with``np.finfo(np.longdouble)``.
+
+Numpy does not provide a dtype with more precision than C
+``long double``s; in particular, the 128-bit IEEE quad precision
+data type (FORTRAN's ``REAL*16``) is not available.
+
+For efficient memory alignment, ``np.longdouble`` is usually stored
+padded with zero bits, either to 96 or 128 bits. Which is more efficient
+depends on hardware and development environment; typically on 32-bit
+systems they are padded to 96 bits, while on 64-bit systems they are
+typically padded to 128 bits. ``np.longdouble`` is padded to the system
+default; ``np.float96`` and ``np.float128`` are provided for users who
+want specific padding. In spite of the names, ``np.float96`` and
+``np.float128`` provide only as much precision as ``np.longdouble``,
+that is, 80 bits on most x86 machines and 64 bits in standard
+Windows builds.
+
+Be warned that even if ``np.longdouble`` offers more precision than
+python ``float``, it is easy to lose that extra precision, since
+python often forces values to pass through ``float``. For example,
+the ``%`` formatting operator requires its arguments to be converted
+to standard python types, and it is therefore impossible to preserve
+extended precision even if many decimal places are requested. It can
+be useful to test your code with the value
+``1 + np.finfo(np.longdouble).eps``.
+
"""
from __future__ import division, absolute_import, print_function
diff --git a/numpy/fft/bento.info b/numpy/fft/bento.info
deleted file mode 100644
index 7627b319e..000000000
--- a/numpy/fft/bento.info
+++ /dev/null
@@ -1,6 +0,0 @@
-HookFile: bscript
-
-Library:
- Extension: fftpack_lite
- Sources:
- fftpack_litemodule.c, fftpack.c
diff --git a/numpy/fft/bscript b/numpy/fft/bscript
deleted file mode 100644
index ac1506496..000000000
--- a/numpy/fft/bscript
+++ /dev/null
@@ -1,7 +0,0 @@
-from bento.commands import hooks
-
-@hooks.pre_build
-def build(context):
- context.tweak_extension("fftpack_lite",
- includes=["../core/include", "../core/include/numpy",
- "../core", "../core/src/private"])
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index 820168663..007ff42a4 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -1122,19 +1122,28 @@ def gradient(f, *varargs, **kwargs):
----------
f : array_like
An N-dimensional array containing samples of a scalar function.
- varargs : list of scalar, optional
+ varargs : scalar or list of scalar, optional
N scalars specifying the sample distances for each dimension,
i.e. `dx`, `dy`, `dz`, ... Default distance: 1.
+ single scalar specifies sample distance for all dimensions.
+ if `axis` is given, the number of varargs must equal the number of axes.
edge_order : {1, 2}, optional
Gradient is calculated using N\ :sup:`th` order accurate differences
at the boundaries. Default: 1.
.. versionadded:: 1.9.1
+ axis : None or int or tuple of ints, optional
+ Gradient is calculated only along the given axis or axes
+ The default (axis = None) is to calculate the gradient for all the axes of the input array.
+ axis may be negative, in which case it counts from the last to the first axis.
+
+ .. versionadded:: 1.11.0
+
Returns
-------
gradient : list of ndarray
- Each element of `list` has the same shape as `f` giving the derivative
+ Each element of `list` has the same shape as `f` giving the derivative
of `f` with respect to each dimension.
Examples
@@ -1145,10 +1154,10 @@ def gradient(f, *varargs, **kwargs):
>>> np.gradient(x, 2)
array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ])
- For two dimensional arrays, the return will be two arrays ordered by
- axis. In this example the first array stands for the gradient in
+ For two dimensional arrays, the return will be two arrays ordered by
+ axis. In this example the first array stands for the gradient in
rows and the second one in columns direction:
-
+
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
[array([[ 2., 2., -1.],
[ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ],
@@ -1159,15 +1168,38 @@ def gradient(f, *varargs, **kwargs):
>>> y = x**2
>>> np.gradient(y, dx, edge_order=2)
array([-0., 2., 4., 6., 8.])
+
+ The axis keyword can be used to specify a subset of axes of which the gradient is calculated
+ >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float), axis=0)
+ array([[ 2., 2., -1.],
+ [ 2., 2., -1.]])
"""
f = np.asanyarray(f)
N = len(f.shape) # number of dimensions
+
+ axes = kwargs.pop('axis', None)
+ if axes is None:
+ axes = tuple(range(N))
+ # check axes to have correct type and no duplicate entries
+ if isinstance(axes, int):
+ axes = (axes,)
+ if not isinstance(axes, tuple):
+ raise TypeError("A tuple of integers or a single integer is required")
+
+ # normalize axis values:
+ axes = tuple(x + N if x < 0 else x for x in axes)
+ if max(axes) >= N or min(axes) < 0:
+ raise ValueError("'axis' entry is out of bounds")
+
+ if len(set(axes)) != len(axes):
+ raise ValueError("duplicate value in 'axis'")
+
n = len(varargs)
if n == 0:
dx = [1.0]*N
elif n == 1:
dx = [varargs[0]]*N
- elif n == N:
+ elif n == len(axes):
dx = list(varargs)
else:
raise SyntaxError(
@@ -1211,7 +1243,7 @@ def gradient(f, *varargs, **kwargs):
else:
y = f
- for axis in range(N):
+ for i, axis in enumerate(axes):
if y.shape[axis] < 2:
raise ValueError(
@@ -1267,7 +1299,7 @@ def gradient(f, *varargs, **kwargs):
out[slice1] = (3.0*y[slice2] - 4.0*y[slice3] + y[slice4])/2.0
# divide by step size
- out /= dx[axis]
+ out /= dx[i]
outvals.append(out)
# reset the slice object in this dimension to ":"
@@ -1276,7 +1308,7 @@ def gradient(f, *varargs, **kwargs):
slice3[axis] = slice(None)
slice4[axis] = slice(None)
- if N == 1:
+ if len(axes) == 1:
return outvals[0]
else:
return outvals
diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py
index 26c2aab04..b2beef0a8 100644
--- a/numpy/lib/shape_base.py
+++ b/numpy/lib/shape_base.py
@@ -424,7 +424,7 @@ def array_split(ary, indices_or_sections, axis=0):
# This "kludge" was introduced here to replace arrays shaped (0, 10)
# or similar with an array shaped (0,).
# There seems no need for this, so give a FutureWarning to remove later.
- if sub_arys[-1].size == 0 and sub_arys[-1].ndim != 1:
+ if any(arr.size == 0 and arr.ndim != 1 for arr in sub_arys):
warnings.warn("in the future np.array_split will retain the shape of "
"arrays with a zero size, instead of replacing them by "
"`array([])`, which always has a shape of (0,).",
diff --git a/numpy/lib/tests/test__version.py b/numpy/lib/tests/test__version.py
index bbafe68eb..993c9d507 100644
--- a/numpy/lib/tests/test__version.py
+++ b/numpy/lib/tests/test__version.py
@@ -48,6 +48,19 @@ def test_dev_a_b_rc_mixed():
assert_(NumpyVersion('1.9.0a2.dev-6acvda54') < '1.9.0a2')
+def test_dev0_version():
+ assert_(NumpyVersion('1.9.0.dev0+Unknown') < '1.9.0')
+ for ver in ['1.9.0', '1.9.0a1', '1.9.0b2', '1.9.0b2.dev0+ffffffff']:
+ assert_(NumpyVersion('1.9.0.dev0+f16acvda') < ver)
+
+ assert_(NumpyVersion('1.9.0.dev0+f16acvda') == '1.9.0.dev0+11111111')
+
+
+def test_dev0_a_b_rc_mixed():
+ assert_(NumpyVersion('1.9.0a2.dev0+f16acvda') == '1.9.0a2.dev0+11111111')
+ assert_(NumpyVersion('1.9.0a2.dev0+6acvda54') < '1.9.0a2')
+
+
def test_raises():
for ver in ['1.9', '1,9.0', '1.7.x']:
assert_raises(ValueError, NumpyVersion, ver)
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index af9315d83..5e758fb89 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -601,6 +601,31 @@ class TestGradient(TestCase):
num_error = np.abs((np.gradient(y, dx, edge_order=2) / analytical) - 1)
assert_(np.all(num_error < 0.03) == True)
+ def test_specific_axes(self):
+ # Testing that gradient can work on a given axis only
+ v = [[1, 1], [3, 4]]
+ x = np.array(v)
+ dx = [np.array([[2., 3.], [2., 3.]]),
+ np.array([[0., 0.], [1., 1.]])]
+ assert_array_equal(gradient(x, axis=0), dx[0])
+ assert_array_equal(gradient(x, axis=1), dx[1])
+ assert_array_equal(gradient(x, axis=-1), dx[1])
+ assert_array_equal(gradient(x, axis=(1,0)), [dx[1], dx[0]])
+
+ # test axis=None which means all axes
+ assert_almost_equal(gradient(x, axis=None), [dx[0], dx[1]])
+ # and is the same as no axis keyword given
+ assert_almost_equal(gradient(x, axis=None), gradient(x))
+
+ # test vararg order
+ assert_array_equal(gradient(x, 2, 3, axis=(1,0)), [dx[1]/2.0, dx[0]/3.0])
+ # test maximal number of varargs
+ assert_raises(SyntaxError, gradient, x, 1, 2, axis=1)
+
+ assert_raises(ValueError, gradient, x, axis=3)
+ assert_raises(ValueError, gradient, x, axis=-3)
+ assert_raises(TypeError, gradient, x, axis=[1,])
+
class TestAngle(TestCase):
@@ -1888,19 +1913,25 @@ class TestBincount(TestCase):
def test_with_incorrect_minlength(self):
x = np.array([], dtype=int)
- assert_raises_regex(TypeError, "an integer is required",
+ assert_raises_regex(TypeError,
+ "'str' object cannot be interpreted",
lambda: np.bincount(x, minlength="foobar"))
- assert_raises_regex(ValueError, "must be positive",
+ assert_raises_regex(ValueError,
+ "must be positive",
lambda: np.bincount(x, minlength=-1))
- assert_raises_regex(ValueError, "must be positive",
+ assert_raises_regex(ValueError,
+ "must be positive",
lambda: np.bincount(x, minlength=0))
x = np.arange(5)
- assert_raises_regex(TypeError, "an integer is required",
+ assert_raises_regex(TypeError,
+ "'str' object cannot be interpreted",
lambda: np.bincount(x, minlength="foobar"))
- assert_raises_regex(ValueError, "minlength must be positive",
+ assert_raises_regex(ValueError,
+ "minlength must be positive",
lambda: np.bincount(x, minlength=-1))
- assert_raises_regex(ValueError, "minlength must be positive",
+ assert_raises_regex(ValueError,
+ "minlength must be positive",
lambda: np.bincount(x, minlength=0))
diff --git a/numpy/lib/tests/test_shape_base.py b/numpy/lib/tests/test_shape_base.py
index 3f2d8d5b4..8ab72b9f9 100644
--- a/numpy/lib/tests/test_shape_base.py
+++ b/numpy/lib/tests/test_shape_base.py
@@ -111,6 +111,15 @@ class TestArraySplit(TestCase):
compare_results(res, desired)
assert_(a.dtype.type is res[-1].dtype.type)
+ # Same thing for manual splits:
+ res = assert_warns(FutureWarning, array_split, a, [0, 1, 2], axis=0)
+
+ # After removing the FutureWarning, the last should be zeros((0, 10))
+ desired = [np.array([]), np.array([np.arange(10)]),
+ np.array([np.arange(10)])]
+ compare_results(res, desired)
+ assert_(a.dtype.type is res[-1].dtype.type)
+
def test_integer_split_2D_cols(self):
a = np.array([np.arange(10), np.arange(10)])
res = array_split(a, 3, axis=-1)
diff --git a/numpy/linalg/bento.info b/numpy/linalg/bento.info
deleted file mode 100644
index 52d036753..000000000
--- a/numpy/linalg/bento.info
+++ /dev/null
@@ -1,21 +0,0 @@
-HookFile: bscript
-
-Library:
- Extension: _umath_linalg
- Sources:
- umath_linalg.c.src,
- lapack_lite/blas_lite.c,
- lapack_lite/dlamch.c,
- lapack_lite/dlapack_lite.c,
- lapack_lite/f2c_lite.c,
- lapack_lite/python_xerbla.c,
- lapack_lite/zlapack_lite.c
- Extension: lapack_lite
- Sources:
- lapack_lite/blas_lite.c,
- lapack_lite/dlamch.c,
- lapack_lite/dlapack_lite.c,
- lapack_lite/f2c_lite.c,
- lapack_litemodule.c,
- lapack_lite/python_xerbla.c,
- lapack_lite/zlapack_lite.c
diff --git a/numpy/linalg/bscript b/numpy/linalg/bscript
deleted file mode 100644
index 70fdd9de3..000000000
--- a/numpy/linalg/bscript
+++ /dev/null
@@ -1,26 +0,0 @@
-from bento.commands.hooks \
- import \
- pre_build
-
-@pre_build
-def pbuild(context):
- bld = context.waf_context
-
- def build_lapack_lite(extension):
- kw = {}
- kw["use"] = "npymath"
- if bld.env.HAS_LAPACK:
- for s in ['python_xerbla.c', 'zlapack_lite.c', 'dlapack_lite.c',
- 'blas_lite.c', 'dlamch.c', 'f2c_lite.c']:
- extension.sources.pop(extension.sources.index('lapack_lite/' + s))
- kw["use"] = "npymath LAPACK"
-
- includes = ["../core/include", "../core/include/numpy", "../core",
- "../core/src/private"]
- return context.default_builder(extension,
- includes=includes,
- **kw)
-
- context.register_builder("lapack_lite", build_lapack_lite)
- context.register_builder("_umath_linalg", build_lapack_lite)
-
diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py
index cf5b314ac..f5cb3cb77 100644
--- a/numpy/linalg/linalg.py
+++ b/numpy/linalg/linalg.py
@@ -1012,9 +1012,10 @@ def eig(a):
w : (..., M) array
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered. The resulting
- array will be always be of complex type. When `a` is real
- the resulting eigenvalues will be real (0 imaginary part) or
- occur in conjugate pairs
+ array will be of complex type, unless the imaginary part is
+ zero in which case it will be cast to a real type. When `a`
+ is real the resulting eigenvalues will be real (0 imaginary
+ part) or occur in conjugate pairs
v : (..., M, M) array
The normalized (unit "length") eigenvectors, such that the
@@ -1381,7 +1382,7 @@ def cond(x, p=None):
Parameters
----------
- x : (M, N) array_like
+ x : (..., M, N) array_like
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm:
@@ -1450,12 +1451,12 @@ def cond(x, p=None):
0.70710678118654746
"""
- x = asarray(x) # in case we have a matrix
+ x = asarray(x) # in case we have a matrix
if p is None:
s = svd(x, compute_uv=False)
- return s[0]/s[-1]
+ return s[..., 0]/s[..., -1]
else:
- return norm(x, p)*norm(inv(x), p)
+ return norm(x, p, axis=(-2, -1)) * norm(inv(x), p, axis=(-2, -1))
def matrix_rank(M, tol=None):
diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py
index aedcc6a95..7c577d86f 100644
--- a/numpy/linalg/tests/test_linalg.py
+++ b/numpy/linalg/tests/test_linalg.py
@@ -556,7 +556,12 @@ class TestCondSVD(LinalgTestCase, LinalgGeneralizedTestCase):
def do(self, a, b):
c = asarray(a) # a might be a matrix
s = linalg.svd(c, compute_uv=False)
- old_assert_almost_equal(s[0] / s[-1], linalg.cond(a), decimal=5)
+ old_assert_almost_equal(
+ s[..., 0] / s[..., -1], linalg.cond(a), decimal=5)
+
+ def test_stacked_arrays_explicitly(self):
+ A = np.array([[1., 2., 1.], [0, -2., 0], [6., 2., 3.]])
+ assert_equal(linalg.cond(A), linalg.cond(A[None, ...])[0])
class TestCond2(LinalgTestCase):
@@ -564,7 +569,12 @@ class TestCond2(LinalgTestCase):
def do(self, a, b):
c = asarray(a) # a might be a matrix
s = linalg.svd(c, compute_uv=False)
- old_assert_almost_equal(s[0] / s[-1], linalg.cond(a, 2), decimal=5)
+ old_assert_almost_equal(
+ s[..., 0] / s[..., -1], linalg.cond(a, 2), decimal=5)
+
+ def test_stacked_arrays_explicitly(self):
+ A = np.array([[1., 2., 1.], [0, -2., 0], [6., 2., 3.]])
+ assert_equal(linalg.cond(A, 2), linalg.cond(A[None, ...], 2)[0])
class TestCondInf(object):
diff --git a/numpy/ma/core.py b/numpy/ma/core.py
index ca6698492..61f0c12a8 100644
--- a/numpy/ma/core.py
+++ b/numpy/ma/core.py
@@ -7536,7 +7536,7 @@ class _convert2ma:
doc = sig + doc
return doc
- def __call__(self, a, *args, **params):
+ def __call__(self, *args, **params):
# Find the common parameters to the call and the definition
_extras = self._extras
common_params = set(params).intersection(_extras)
@@ -7544,7 +7544,7 @@ class _convert2ma:
for p in common_params:
_extras[p] = params.pop(p)
# Get the result
- result = self._func.__call__(a, *args, **params).view(MaskedArray)
+ result = self._func.__call__(*args, **params).view(MaskedArray)
if "fill_value" in common_params:
result.fill_value = _extras.get("fill_value", None)
if "hardmask" in common_params:
diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py
index d2b984084..aa6ce5db9 100644
--- a/numpy/ma/tests/test_core.py
+++ b/numpy/ma/tests/test_core.py
@@ -1670,6 +1670,18 @@ class TestFillingValues(TestCase):
a = identity(3, fill_value=0., dtype=complex)
assert_equal(a.fill_value, 0.)
+ def test_shape_argument(self):
+ # Test that shape can be provides as an argument
+ # GH issue 6106
+ a = empty(shape=(3, ))
+ assert_equal(a.shape, (3, ))
+
+ a = ones(shape=(3, ), dtype=float)
+ assert_equal(a.shape, (3, ))
+
+ a = zeros(shape=(3, ), dtype=complex)
+ assert_equal(a.shape, (3, ))
+
def test_fillvalue_in_view(self):
# Test the behavior of fill_value in view
diff --git a/numpy/random/bento.info b/numpy/random/bento.info
deleted file mode 100644
index f51da0131..000000000
--- a/numpy/random/bento.info
+++ /dev/null
@@ -1,9 +0,0 @@
-HookFile: bscript
-
-Library:
- Extension: mtrand
- Sources:
- mtrand/mtrand.c,
- mtrand/randomkit.c,
- mtrand/initarray.c,
- mtrand/distributions.c
diff --git a/numpy/random/bscript b/numpy/random/bscript
deleted file mode 100644
index cecc65e33..000000000
--- a/numpy/random/bscript
+++ /dev/null
@@ -1,38 +0,0 @@
-import os
-import sys
-
-from bento.commands import hooks
-import waflib
-
-@hooks.post_configure
-def configure(context):
- conf = context.waf_context
-
- conf.env.USE_WINCRYPT = False
- if conf.check_declaration("_WIN32", mandatory=False):
- conf.env.USE_WINCRYPT = True
-
- conf.env.NEEDS_MINGW32_WORKAROUND = False
- if sys.platform == "win32" and conf.check_declaration("__GNUC__", mandatory=False):
- conf.env.NEEDS_MINGW32_WORKAROUND = True
-
-@hooks.pre_build
-def build(context):
- bld = context.waf_context
-
- if bld.env.NEEDS_MINGW32_WORKAROUND:
- raise NotImplementedError("Check for mingw time workaround stuff")
-
- def builder(extension):
- includes = ["../core/include", "../core/include/numpy", "../core",
- "../core/src/private"]
- kw = {}
- # enable unix large file support on 32 bit systems
- # (64 bit off_t, lseek -> lseek64 etc.)
- kw['defines'] = ['_FILE_OFFSET_BITS=64',
- '_LARGEFILE_SOURCE=1',
- '_LARGEFILE64_SOURCE=1']
- if bld.env.USE_WINCRYPT:
- kw["lib"] = "ADVAPI32"
- return context.default_builder(extension, includes=includes, **kw)
- context.register_builder("mtrand", builder)
diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand/mtrand.pyx
index 9861204d9..59f9dcd6b 100644
--- a/numpy/random/mtrand/mtrand.pyx
+++ b/numpy/random/mtrand/mtrand.pyx
@@ -3520,7 +3520,7 @@ cdef class RandomState:
.. math:: P(x;l, m, r) = \\begin{cases}
\\frac{2(x-l)}{(r-l)(m-l)}& \\text{for $l \\leq x \\leq m$},\\\\
- \\frac{2(m-x)}{(r-l)(r-m)}& \\text{for $m \\leq x \\leq r$},\\\\
+ \\frac{2(r-x)}{(r-l)(r-m)}& \\text{for $m \\leq x \\leq r$},\\\\
0& \\text{otherwise}.
\\end{cases}
diff --git a/numpy/setup.py b/numpy/setup.py
index 58bb7dc93..4ccdaeea5 100644
--- a/numpy/setup.py
+++ b/numpy/setup.py
@@ -6,7 +6,6 @@ def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('numpy', parent_package, top_path)
- # If you update this list, then also update the file numpy/bento.info
config.add_subpackage('compat')
config.add_subpackage('core')
config.add_subpackage('distutils')