summaryrefslogtreecommitdiff
path: root/numpy
diff options
context:
space:
mode:
Diffstat (limited to 'numpy')
-rw-r--r--numpy/__init__.py87
-rw-r--r--numpy/__init__.pyi107
-rw-r--r--numpy/core/_add_newdocs.py19
-rw-r--r--numpy/core/_type_aliases.py59
-rw-r--r--numpy/core/fromnumeric.py4
-rw-r--r--numpy/core/function_base.py32
-rw-r--r--numpy/core/numerictypes.py4
-rw-r--r--numpy/core/src/multiarray/ctors.c2
-rw-r--r--numpy/core/src/multiarray/descriptor.c6
-rw-r--r--numpy/core/src/multiarray/einsum.c.src159
-rw-r--r--numpy/core/src/multiarray/mapping.c4
-rw-r--r--numpy/core/src/umath/simd.inc.src8
-rw-r--r--numpy/core/tests/test_api.py4
-rw-r--r--numpy/core/tests/test_deprecations.py45
-rw-r--r--numpy/core/tests/test_dtype.py15
-rw-r--r--numpy/core/tests/test_function_base.py36
-rw-r--r--numpy/core/tests/test_multiarray.py4
-rw-r--r--numpy/core/tests/test_regression.py7
-rw-r--r--numpy/core/tests/test_scalar_ctors.py2
-rw-r--r--numpy/core/tests/test_umath.py10
-rw-r--r--numpy/core/tests/test_umath_accuracy.py6
-rw-r--r--numpy/core/tests/test_umath_complex.py36
-rw-r--r--numpy/lib/tests/test_nanfunctions.py2
-rw-r--r--numpy/ma/extras.py11
-rw-r--r--numpy/polynomial/_polybase.py6
-rw-r--r--numpy/random/bit_generator.pyx11
-rw-r--r--numpy/random/tests/test_seed_sequence.py28
-rw-r--r--numpy/testing/tests/test_utils.py33
-rw-r--r--numpy/tests/test_public_api.py28
-rw-r--r--numpy/tests/typing/fail/fromnumeric.py10
-rw-r--r--numpy/tests/typing/fail/scalars.py14
-rw-r--r--numpy/tests/typing/pass/dtype.py3
-rw-r--r--numpy/tests/typing/pass/scalars.py4
33 files changed, 543 insertions, 263 deletions
diff --git a/numpy/__init__.py b/numpy/__init__.py
index e6a24f0d1..550fb1772 100644
--- a/numpy/__init__.py
+++ b/numpy/__init__.py
@@ -136,6 +136,9 @@ else:
__all__ = ['ModuleDeprecationWarning',
'VisibleDeprecationWarning']
+ # mapping of {name: (value, deprecation_msg)}
+ __deprecated_attrs__ = {}
+
# Allow distributors to run custom init code
from . import _distributor_init
@@ -156,11 +159,35 @@ else:
from . import matrixlib as _mat
from .matrixlib import *
- # Make these accessible from numpy name-space
- # but not imported in from numpy import *
- # TODO[gh-6103]: Deprecate these
- from builtins import bool, int, float, complex, object, str
- from .compat import long, unicode
+ # Deprecations introduced in NumPy 1.20.0, 2020-06-06
+ import builtins as _builtins
+ __deprecated_attrs__.update({
+ n: (
+ getattr(_builtins, n),
+ "`np.{n}` is a deprecated alias for the builtin `{n}`. "
+ "Use `{n}` by itself, which is identical in behavior, to silence "
+ "this warning. "
+ "If you specifically wanted the numpy scalar type, use `np.{n}_` "
+ "here."
+ .format(n=n)
+ )
+ for n in ["bool", "int", "float", "complex", "object", "str"]
+ })
+ __deprecated_attrs__.update({
+ n: (
+ getattr(compat, n),
+ "`np.{n}` is a deprecated alias for `np.compat.{n}`. "
+ "Use `np.compat.{n}` by itself, which is identical in behavior, "
+ "to silence this warning. "
+ "In the likely event your code does not need to work on Python 2 "
+ "you can use the builtin ``{n2}`` for which ``np.compat.{n}`` is "
+ "itself an alias. "
+ "If you specifically wanted the numpy scalar type, use `np.{n2}_` "
+ "here."
+ .format(n=n, n2=n2)
+ )
+ for n, n2 in [("long", "int"), ("unicode", "str")]
+ })
from .core import round, abs, max, min
# now that numpy modules are imported, can initialize limits
@@ -172,8 +199,10 @@ else:
__all__.extend(lib.__all__)
__all__.extend(['linalg', 'fft', 'random', 'ctypeslib', 'ma'])
- # These are added by `from .core import *` and `core.__all__`, but we
- # overwrite them above with builtins we do _not_ want to export.
+ # These are exported by np.core, but are replaced by the builtins below
+ # remove them to ensure that we don't end up with `np.long == np.int_`,
+ # which would be a breaking change.
+ del long, unicode
__all__.remove('long')
__all__.remove('unicode')
@@ -196,25 +225,33 @@ else:
numarray = 'removed'
if sys.version_info[:2] >= (3, 7):
- # Importing Tester requires importing all of UnitTest which is not a
- # cheap import Since it is mainly used in test suits, we lazy import it
- # here to save on the order of 10 ms of import time for most users
- #
- # The previous way Tester was imported also had a side effect of adding
- # the full `numpy.testing` namespace
- #
# module level getattr is only supported in 3.7 onwards
# https://www.python.org/dev/peps/pep-0562/
def __getattr__(attr):
+ # Emit warnings for deprecated attributes
+ try:
+ val, msg = __deprecated_attrs__[attr]
+ except KeyError:
+ pass
+ else:
+ warnings.warn(msg, DeprecationWarning, stacklevel=2)
+ return val
+
+ # Importing Tester requires importing all of UnitTest which is not a
+ # cheap import Since it is mainly used in test suits, we lazy import it
+ # here to save on the order of 10 ms of import time for most users
+ #
+ # The previous way Tester was imported also had a side effect of adding
+ # the full `numpy.testing` namespace
if attr == 'testing':
import numpy.testing as testing
return testing
elif attr == 'Tester':
from .testing import Tester
return Tester
- else:
- raise AttributeError("module {!r} has no attribute "
- "{!r}".format(__name__, attr))
+
+ raise AttributeError("module {!r} has no attribute "
+ "{!r}".format(__name__, attr))
def __dir__():
return list(globals().keys() | {'Tester', 'testing'})
@@ -224,6 +261,13 @@ else:
# no-one else in the world is using it (though I hope not)
from .testing import Tester
+ # We weren't able to emit a warning about these, so keep them around
+ globals().update({
+ k: v
+ for k, (v, msg) in __deprecated_attrs__.items()
+ })
+
+
# Pytest testing
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
@@ -279,12 +323,11 @@ else:
error_message = "{}: {}".format(w[-1].category.__name__, str(w[-1].message))
msg = (
"Polyfit sanity test emitted a warning, most likely due "
- "to using a buggy Accelerate backend. "
- "If you compiled yourself, "
- "see site.cfg.example for information. "
+ "to using a buggy Accelerate backend. If you compiled "
+ "yourself, more information is available at "
+ "https://numpy.org/doc/stable/user/building.html#accelerated-blas-lapack-libraries "
"Otherwise report this to the vendor "
- "that provided NumPy.\n{}\n".format(
- error_message))
+ "that provided NumPy.\n{}\n".format(error_message))
raise RuntimeError(msg)
del _mac_os_check
diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi
index 5031893ed..f9218391e 100644
--- a/numpy/__init__.pyi
+++ b/numpy/__init__.pyi
@@ -51,7 +51,12 @@ _NdArraySubClass = TypeVar("_NdArraySubClass", bound=ndarray)
class dtype:
names: Optional[Tuple[str, ...]]
- def __init__(self, obj: DtypeLike, align: bool = ..., copy: bool = ...) -> None: ...
+ def __init__(
+ self,
+ dtype: DtypeLike,
+ align: bool = ...,
+ copy: bool = ...,
+ ) -> None: ...
def __eq__(self, other: DtypeLike) -> bool: ...
def __ne__(self, other: DtypeLike) -> bool: ...
def __gt__(self, other: DtypeLike) -> bool: ...
@@ -382,18 +387,18 @@ class _real_generic(generic): # type: ignore
class number(generic): ... # type: ignore
class bool_(_real_generic):
- def __init__(self, value: object = ...) -> None: ...
+ def __init__(self, __value: object = ...) -> None: ...
class object_(generic):
- def __init__(self, value: object = ...) -> None: ...
+ def __init__(self, __value: object = ...) -> None: ...
class datetime64:
@overload
def __init__(
- self, _data: Union[datetime64, str, dt.datetime] = ..., _format: str = ...
+ self, __value: Union[datetime64, str, dt.datetime] = ..., __format: str = ...
) -> None: ...
@overload
- def __init__(self, _data: int, _format: str) -> None: ...
+ def __init__(self, __value: int, __format: str) -> None: ...
def __add__(self, other: Union[timedelta64, int]) -> datetime64: ...
def __sub__(self, other: Union[timedelta64, datetime64, int]) -> timedelta64: ...
@@ -401,19 +406,19 @@ class integer(number, _real_generic): ... # type: ignore
class signedinteger(integer): ... # type: ignore
class int8(signedinteger):
- def __init__(self, value: SupportsInt = ...) -> None: ...
+ def __init__(self, __value: SupportsInt = ...) -> None: ...
class int16(signedinteger):
- def __init__(self, value: SupportsInt = ...) -> None: ...
+ def __init__(self, __value: SupportsInt = ...) -> None: ...
class int32(signedinteger):
- def __init__(self, value: SupportsInt = ...) -> None: ...
+ def __init__(self, __value: SupportsInt = ...) -> None: ...
class int64(signedinteger):
- def __init__(self, value: SupportsInt = ...) -> None: ...
+ def __init__(self, __value: SupportsInt = ...) -> None: ...
class timedelta64(signedinteger):
- def __init__(self, _data: Any = ..., _format: str = ...) -> None: ...
+ def __init__(self, __value: Any = ..., __format: str = ...) -> None: ...
@overload
def __add__(self, other: Union[timedelta64, int]) -> timedelta64: ...
@overload
@@ -433,34 +438,34 @@ class timedelta64(signedinteger):
class unsignedinteger(integer): ... # type: ignore
class uint8(unsignedinteger):
- def __init__(self, value: SupportsInt = ...) -> None: ...
+ def __init__(self, __value: SupportsInt = ...) -> None: ...
class uint16(unsignedinteger):
- def __init__(self, value: SupportsInt = ...) -> None: ...
+ def __init__(self, __value: SupportsInt = ...) -> None: ...
class uint32(unsignedinteger):
- def __init__(self, value: SupportsInt = ...) -> None: ...
+ def __init__(self, __value: SupportsInt = ...) -> None: ...
class uint64(unsignedinteger):
- def __init__(self, value: SupportsInt = ...) -> None: ...
+ def __init__(self, __value: SupportsInt = ...) -> None: ...
class inexact(number): ... # type: ignore
class floating(inexact, _real_generic): ... # type: ignore
class float16(floating):
- def __init__(self, value: SupportsFloat = ...) -> None: ...
+ def __init__(self, __value: SupportsFloat = ...) -> None: ...
class float32(floating):
- def __init__(self, value: SupportsFloat = ...) -> None: ...
+ def __init__(self, __value: SupportsFloat = ...) -> None: ...
class float64(floating):
- def __init__(self, value: SupportsFloat = ...) -> None: ...
+ def __init__(self, __value: SupportsFloat = ...) -> None: ...
class complexfloating(inexact): ... # type: ignore
class complex64(complexfloating):
def __init__(
- self, value: Union[SupportsInt, SupportsFloat, SupportsComplex] = ...
+ self, __value: Union[SupportsInt, SupportsFloat, SupportsComplex] = ...
) -> None: ...
@property
def real(self) -> float32: ...
@@ -469,7 +474,7 @@ class complex64(complexfloating):
class complex128(complexfloating):
def __init__(
- self, value: Union[SupportsInt, SupportsFloat, SupportsComplex] = ...
+ self, __value: Union[SupportsInt, SupportsFloat, SupportsComplex] = ...
) -> None: ...
@property
def real(self) -> float64: ...
@@ -479,24 +484,24 @@ class complex128(complexfloating):
class flexible(_real_generic): ... # type: ignore
class void(flexible):
- def __init__(self, value: Union[int, integer, bool_, bytes, bytes_]): ...
+ def __init__(self, __value: Union[int, integer, bool_, bytes, bytes_]): ...
class character(_real_generic): ... # type: ignore
class bytes_(character):
@overload
- def __init__(self, value: object = ...) -> None: ...
+ def __init__(self, __value: object = ...) -> None: ...
@overload
def __init__(
- self, value: object, encoding: str = ..., errors: str = ...
+ self, __value: Union[str, str_], encoding: str = ..., errors: str = ...
) -> None: ...
class str_(character):
@overload
- def __init__(self, value: object = ...) -> None: ...
+ def __init__(self, __value: object = ...) -> None: ...
@overload
def __init__(
- self, value: object, encoding: str = ..., errors: str = ...
+ self, __value: Union[bytes, bytes_], encoding: str = ..., errors: str = ...
) -> None: ...
# TODO(alan): Platform dependent types
@@ -936,21 +941,18 @@ def reshape(a: ArrayLike, newshape: _ShapeLike, order: _Order = ...) -> ndarray:
@overload
def choose(
a: _ScalarIntOrBool,
- choices: Union[Sequence[ArrayLike], ndarray],
+ choices: ArrayLike,
out: Optional[ndarray] = ...,
mode: _Mode = ...,
) -> _ScalarIntOrBool: ...
@overload
def choose(
- a: _IntOrBool,
- choices: Union[Sequence[ArrayLike], ndarray],
- out: Optional[ndarray] = ...,
- mode: _Mode = ...,
+ a: _IntOrBool, choices: ArrayLike, out: Optional[ndarray] = ..., mode: _Mode = ...
) -> Union[integer, bool_]: ...
@overload
def choose(
a: _ArrayLikeIntOrBool,
- choices: Union[Sequence[ArrayLike], ndarray],
+ choices: ArrayLike,
out: Optional[ndarray] = ...,
mode: _Mode = ...,
) -> ndarray: ...
@@ -960,9 +962,7 @@ def repeat(
def put(
a: ndarray, ind: _ArrayLikeIntOrBool, v: ArrayLike, mode: _Mode = ...
) -> None: ...
-def swapaxes(
- a: Union[Sequence[ArrayLike], ndarray], axis1: int, axis2: int
-) -> ndarray: ...
+def swapaxes(a: ArrayLike, axis1: int, axis2: int) -> ndarray: ...
def transpose(
a: ArrayLike, axes: Union[None, Sequence[int], ndarray] = ...
) -> ndarray: ...
@@ -998,54 +998,42 @@ def argpartition(
order: Union[None, str, Sequence[str]] = ...,
) -> ndarray: ...
def sort(
- a: Union[Sequence[ArrayLike], ndarray],
+ a: ArrayLike,
axis: Optional[int] = ...,
kind: Optional[_SortKind] = ...,
order: Union[None, str, Sequence[str]] = ...,
) -> ndarray: ...
def argsort(
- a: Union[Sequence[ArrayLike], ndarray],
+ a: ArrayLike,
axis: Optional[int] = ...,
kind: Optional[_SortKind] = ...,
order: Union[None, str, Sequence[str]] = ...,
) -> ndarray: ...
@overload
-def argmax(
- a: Union[Sequence[ArrayLike], ndarray],
- axis: None = ...,
- out: Optional[ndarray] = ...,
-) -> integer: ...
+def argmax(a: ArrayLike, axis: None = ..., out: Optional[ndarray] = ...) -> integer: ...
@overload
def argmax(
- a: Union[Sequence[ArrayLike], ndarray],
- axis: int = ...,
- out: Optional[ndarray] = ...,
+ a: ArrayLike, axis: int = ..., out: Optional[ndarray] = ...
) -> Union[integer, ndarray]: ...
@overload
-def argmin(
- a: Union[Sequence[ArrayLike], ndarray],
- axis: None = ...,
- out: Optional[ndarray] = ...,
-) -> integer: ...
+def argmin(a: ArrayLike, axis: None = ..., out: Optional[ndarray] = ...) -> integer: ...
@overload
def argmin(
- a: Union[Sequence[ArrayLike], ndarray],
- axis: int = ...,
- out: Optional[ndarray] = ...,
+ a: ArrayLike, axis: int = ..., out: Optional[ndarray] = ...
) -> Union[integer, ndarray]: ...
@overload
def searchsorted(
- a: Union[Sequence[ArrayLike], ndarray],
+ a: ArrayLike,
v: _Scalar,
side: _Side = ...,
- sorter: Union[None, Sequence[_IntOrBool], ndarray] = ..., # 1D int array
+ sorter: Optional[_ArrayLikeIntOrBool] = ..., # 1D int array
) -> integer: ...
@overload
def searchsorted(
- a: Union[Sequence[ArrayLike], ndarray],
+ a: ArrayLike,
v: ArrayLike,
side: _Side = ...,
- sorter: Union[None, Sequence[_IntOrBool], ndarray] = ..., # 1D int array
+ sorter: Optional[_ArrayLikeIntOrBool] = ..., # 1D int array
) -> ndarray: ...
def resize(a: ArrayLike, new_shape: _ShapeLike) -> ndarray: ...
@overload
@@ -1053,13 +1041,10 @@ def squeeze(a: _ScalarGeneric, axis: Optional[_ShapeLike] = ...) -> _ScalarGener
@overload
def squeeze(a: ArrayLike, axis: Optional[_ShapeLike] = ...) -> ndarray: ...
def diagonal(
- a: Union[Sequence[Sequence[ArrayLike]], ndarray], # >= 2D array
- offset: int = ...,
- axis1: int = ...,
- axis2: int = ...,
+ a: ArrayLike, offset: int = ..., axis1: int = ..., axis2: int = ... # >= 2D array
) -> ndarray: ...
def trace(
- a: Union[Sequence[Sequence[ArrayLike]], ndarray], # >= 2D array
+ a: ArrayLike, # >= 2D array
offset: int = ...,
axis1: int = ...,
axis2: int = ...,
@@ -1070,7 +1055,7 @@ def ravel(a: ArrayLike, order: _Order = ...) -> ndarray: ...
def nonzero(a: ArrayLike) -> Tuple[ndarray, ...]: ...
def shape(a: ArrayLike) -> _Shape: ...
def compress(
- condition: Union[Sequence[_Bool], ndarray], # 1D bool array
+ condition: ArrayLike, # 1D bool array
a: ArrayLike,
axis: Optional[int] = ...,
out: Optional[ndarray] = ...,
diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py
index 688238af3..d0ed3d381 100644
--- a/numpy/core/_add_newdocs.py
+++ b/numpy/core/_add_newdocs.py
@@ -1525,7 +1525,7 @@ add_newdoc('numpy.core.multiarray', 'c_einsum',
Controls the memory layout of the output. 'C' means it should
be C contiguous. 'F' means it should be Fortran contiguous,
'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise.
- 'K' means it should be as close to the layout as the inputs as
+ 'K' means it should be as close to the layout of the inputs as
is possible, including arbitrarily permuted axes.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
@@ -3936,18 +3936,17 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('tobytes', """
Construct Python bytes containing the raw data bytes in the array.
Constructs Python bytes showing a copy of the raw contents of
- data memory. The bytes object can be produced in either 'C' or 'Fortran',
- or 'Any' order (the default is 'C'-order). 'Any' order means C-order
- unless the F_CONTIGUOUS flag in the array is set, in which case it
- means 'Fortran' order.
+ data memory. The bytes object is produced in C-order by default.
+ This behavior is controlled by the ``order`` parameter.
.. versionadded:: 1.9.0
Parameters
----------
- order : {'C', 'F', None}, optional
- Order of the data for multidimensional arrays:
- C, Fortran, or the same as for the original array.
+ order : {'C', 'F', 'A'}, optional
+ Controls the memory layout of the bytes object. 'C' means C-order,
+ 'F' means F-order, 'A' (short for *Any*) means 'F' if `a` is
+ Fortran contiguous, 'C' otherwise. Default is 'C'.
Returns
-------
@@ -5142,7 +5141,7 @@ add_newdoc('numpy.core', 'ufunc', ('at',
add_newdoc('numpy.core.multiarray', 'dtype',
"""
- dtype(obj, align=False, copy=False)
+ dtype(dtype, align=False, copy=False)
Create a data type object.
@@ -5152,7 +5151,7 @@ add_newdoc('numpy.core.multiarray', 'dtype',
Parameters
----------
- obj
+ dtype
Object to be converted to a data type object.
align : bool, optional
Add padding to the fields to match what a C compiler would output
diff --git a/numpy/core/_type_aliases.py b/numpy/core/_type_aliases.py
index c26431443..de90fd818 100644
--- a/numpy/core/_type_aliases.py
+++ b/numpy/core/_type_aliases.py
@@ -11,40 +11,19 @@ and sometimes other mappings too.
.. data:: sctypeDict
Similar to `allTypes`, but maps a broader set of aliases to their types.
-.. data:: sctypeNA
- NumArray-compatible names for the scalar types. Contains not only
- ``name: type`` mappings, but ``char: name`` mappings too.
-
- .. deprecated:: 1.16
-
.. data:: sctypes
A dictionary keyed by a "type group" string, providing a list of types
under that group.
"""
-import warnings
from numpy.compat import unicode
-from numpy._globals import VisibleDeprecationWarning
-from numpy.core._string_helpers import english_lower, english_capitalize
+from numpy.core._string_helpers import english_lower
from numpy.core.multiarray import typeinfo, dtype
from numpy.core._dtype import _kind_name
sctypeDict = {} # Contains all leaf-node scalar types with aliases
-class TypeNADict(dict):
- def __getitem__(self, key):
- # 2018-06-24, 1.16
- warnings.warn('sctypeNA and typeNA will be removed in v1.18 '
- 'of numpy', VisibleDeprecationWarning, stacklevel=2)
- return dict.__getitem__(self, key)
- def get(self, key, default=None):
- # 2018-06-24, 1.16
- warnings.warn('sctypeNA and typeNA will be removed in v1.18 '
- 'of numpy', VisibleDeprecationWarning, stacklevel=2)
- return dict.get(self, key, default)
-
-sctypeNA = TypeNADict() # Contails all leaf-node types -> numarray type equivalences
allTypes = {} # Collect the types we will add to the module
@@ -127,27 +106,24 @@ def _add_aliases():
if name in ('longdouble', 'clongdouble') and myname in allTypes:
continue
- base_capitalize = english_capitalize(base)
- if base == 'complex':
- na_name = '%s%d' % (base_capitalize, bit//2)
- elif base == 'bool':
- na_name = base_capitalize
- else:
- na_name = "%s%d" % (base_capitalize, bit)
-
allTypes[myname] = info.type
# add mapping for both the bit name and the numarray name
sctypeDict[myname] = info.type
- sctypeDict[na_name] = info.type
# add forward, reverse, and string mapping to numarray
- sctypeNA[na_name] = info.type
- sctypeNA[info.type] = na_name
- sctypeNA[info.char] = na_name
-
sctypeDict[char] = info.type
- sctypeNA[char] = na_name
+
+ # Add deprecated numeric-style type aliases manually, at some point
+ # we may want to deprecate the lower case "bytes0" version as well.
+ for name in ["Bytes0", "Datetime64", "Str0", "Uint32", "Uint64"]:
+ if english_lower(name) not in allTypes:
+ # Only one of Uint32 or Uint64, aliases of `np.uintp`, was (and is) defined, note that this
+ # is not UInt32/UInt64 (capital i), which is removed.
+ continue
+ allTypes[name] = allTypes[english_lower(name)]
+ sctypeDict[name] = sctypeDict[english_lower(name)]
+
_add_aliases()
def _add_integer_aliases():
@@ -157,20 +133,15 @@ def _add_integer_aliases():
u_info = _concrete_typeinfo[u_ctype]
bits = i_info.bits # same for both
- for info, charname, intname, Intname in [
- (i_info,'i%d' % (bits//8,), 'int%d' % bits, 'Int%d' % bits),
- (u_info,'u%d' % (bits//8,), 'uint%d' % bits, 'UInt%d' % bits)]:
+ for info, charname, intname in [
+ (i_info,'i%d' % (bits//8,), 'int%d' % bits),
+ (u_info,'u%d' % (bits//8,), 'uint%d' % bits)]:
if bits not in seen_bits:
# sometimes two different types have the same number of bits
# if so, the one iterated over first takes precedence
allTypes[intname] = info.type
sctypeDict[intname] = info.type
- sctypeDict[Intname] = info.type
sctypeDict[charname] = info.type
- sctypeNA[Intname] = info.type
- sctypeNA[charname] = info.type
- sctypeNA[info.type] = Intname
- sctypeNA[info.char] = Intname
seen_bits.add(bits)
diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py
index 2b88ccedf..412d9fe6a 100644
--- a/numpy/core/fromnumeric.py
+++ b/numpy/core/fromnumeric.py
@@ -649,6 +649,10 @@ def transpose(a, axes=None):
>>> np.transpose(x, (1, 0, 2)).shape
(2, 1, 3)
+ >>> x = np.ones((2, 3, 4, 5))
+ >>> np.transpose(x).shape
+ (5, 4, 3, 2)
+
"""
return _wrapfunc(a, 'transpose', axes)
diff --git a/numpy/core/function_base.py b/numpy/core/function_base.py
index 9e46f0ea5..f57e95742 100644
--- a/numpy/core/function_base.py
+++ b/numpy/core/function_base.py
@@ -52,8 +52,10 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None,
If True, return (`samples`, `step`), where `step` is the spacing
between samples.
dtype : dtype, optional
- The type of the output array. If `dtype` is not given, infer the data
- type from the other input arguments.
+ The type of the output array. If `dtype` is not given, the data type
+ is inferred from `start` and `stop`. The inferred dtype will never be
+ an integer; `float` is chosen even if the arguments would produce an
+ array of integers.
.. versionadded:: 1.9.0
@@ -202,8 +204,10 @@ def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None,
``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.
Default is 10.0.
dtype : dtype
- The type of the output array. If `dtype` is not given, infer the data
- type from the other input arguments.
+ The type of the output array. If `dtype` is not given, the data type
+ is inferred from `start` and `stop`. The inferred type will never be
+ an integer; `float` is chosen even if the arguments would produce an
+ array of integers.
axis : int, optional
The axis in the result to store the samples. Relevant only if start
or stop are array-like. By default (0), the samples will be along a
@@ -297,8 +301,10 @@ def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0):
If true, `stop` is the last sample. Otherwise, it is not included.
Default is True.
dtype : dtype
- The type of the output array. If `dtype` is not given, infer the data
- type from the other input arguments.
+ The type of the output array. If `dtype` is not given, the data type
+ is inferred from `start` and `stop`. The inferred dtype will never be
+ an integer; `float` is chosen even if the arguments would produce an
+ array of integers.
axis : int, optional
The axis in the result to store the samples. Relevant only if start
or stop are array-like. By default (0), the samples will be along a
@@ -408,8 +414,18 @@ def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0):
log_start = _nx.log10(start)
log_stop = _nx.log10(stop)
- result = out_sign * logspace(log_start, log_stop, num=num,
- endpoint=endpoint, base=10.0, dtype=dtype)
+ result = logspace(log_start, log_stop, num=num,
+ endpoint=endpoint, base=10.0, dtype=dtype)
+
+ # Make sure the endpoints match the start and stop arguments. This is
+ # necessary because np.exp(np.log(x)) is not necessarily equal to x.
+ if num > 0:
+ result[0] = start
+ if num > 1 and endpoint:
+ result[-1] = stop
+
+ result = out_sign * result
+
if axis != 0:
result = _nx.moveaxis(result, 0, axis)
diff --git a/numpy/core/numerictypes.py b/numpy/core/numerictypes.py
index aac741612..2a015f48f 100644
--- a/numpy/core/numerictypes.py
+++ b/numpy/core/numerictypes.py
@@ -91,7 +91,7 @@ from numpy.core.multiarray import (
from numpy.core.overrides import set_module
# we add more at the bottom
-__all__ = ['sctypeDict', 'sctypeNA', 'typeDict', 'typeNA', 'sctypes',
+__all__ = ['sctypeDict', 'typeDict', 'sctypes',
'ScalarType', 'obj2sctype', 'cast', 'nbytes', 'sctype2char',
'maximum_sctype', 'issctype', 'typecodes', 'find_common_type',
'issubdtype', 'datetime_data', 'datetime_as_string',
@@ -106,7 +106,6 @@ from ._string_helpers import (
from ._type_aliases import (
sctypeDict,
- sctypeNA,
allTypes,
bitname,
sctypes,
@@ -512,7 +511,6 @@ typecodes = {'Character':'c',
# backwards compatibility --- deprecated name
typeDict = sctypeDict
-typeNA = sctypeNA
# b -> boolean
# u -> unsigned integer
diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c
index 3c3bcb387..c7d149577 100644
--- a/numpy/core/src/multiarray/ctors.c
+++ b/numpy/core/src/multiarray/ctors.c
@@ -3580,7 +3580,7 @@ array_from_text(PyArray_Descr *dtype, npy_intp num, char const *sep, size_t *nre
npy_intp i;
char *dptr, *clean_sep, *tmp;
int err = 0;
- int stop_reading_flag; /* -1 indicates end reached; -2 a parsing error */
+ int stop_reading_flag = 0; /* -1 means end reached; -2 a parsing error */
npy_intp thisbuf = 0;
npy_intp size;
npy_intp bytes, totalbytes;
diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c
index 8d884bc00..4ebd5640d 100644
--- a/numpy/core/src/multiarray/descriptor.c
+++ b/numpy/core/src/multiarray/descriptor.c
@@ -1678,14 +1678,14 @@ _convert_from_str(PyObject *obj, int align)
}
/* Check for a deprecated Numeric-style typecode */
- char *dep_tps[] = {"Bool", "Complex", "Float", "Int",
- "Object0", "String0", "Timedelta64",
- "Unicode0", "UInt", "Void0"};
+ /* `Uint` has deliberately weird uppercasing */
+ char *dep_tps[] = {"Bytes", "Datetime64", "Str", "Uint"};
int ndep_tps = sizeof(dep_tps) / sizeof(dep_tps[0]);
for (int i = 0; i < ndep_tps; ++i) {
char *dep_tp = dep_tps[i];
if (strncmp(type, dep_tp, strlen(dep_tp)) == 0) {
+ /* Deprecated 2020-06-09, NumPy 1.20 */
if (DEPRECATE("Numeric-style type codes are "
"deprecated and will result in "
"an error in the future.") < 0) {
diff --git a/numpy/core/src/multiarray/einsum.c.src b/numpy/core/src/multiarray/einsum.c.src
index b914e5bb3..2538e05c6 100644
--- a/numpy/core/src/multiarray/einsum.c.src
+++ b/numpy/core/src/multiarray/einsum.c.src
@@ -31,9 +31,6 @@
#define EINSUM_USE_SSE1 0
#endif
-/*
- * TODO: Only some SSE2 for float64 is implemented.
- */
#ifdef NPY_HAVE_SSE2_INTRINSICS
#define EINSUM_USE_SSE2 1
#else
@@ -276,6 +273,8 @@ static void
#if EINSUM_USE_SSE1 && @float32@
__m128 a, b;
+#elif EINSUM_USE_SSE2 && @float64@
+ __m128d a, b;
#endif
NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_two (%d)\n",
@@ -319,6 +318,29 @@ finish_after_unrolled_loop:
/* Finish off the loop */
goto finish_after_unrolled_loop;
}
+#elif EINSUM_USE_SSE2 && @float64@
+ /* Use aligned instructions if possible */
+ if (EINSUM_IS_SSE_ALIGNED(data0) && EINSUM_IS_SSE_ALIGNED(data1) &&
+ EINSUM_IS_SSE_ALIGNED(data_out)) {
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+/**begin repeat2
+ * #i = 0, 2, 4, 6#
+ */
+ a = _mm_mul_pd(_mm_load_pd(data0+@i@), _mm_load_pd(data1+@i@));
+ b = _mm_add_pd(a, _mm_load_pd(data_out+@i@));
+ _mm_store_pd(data_out+@i@, b);
+/**end repeat2**/
+ data0 += 8;
+ data1 += 8;
+ data_out += 8;
+ }
+
+ /* Finish off the loop */
+ goto finish_after_unrolled_loop;
+ }
#endif
/* Unroll the loop by 8 */
@@ -333,6 +355,14 @@ finish_after_unrolled_loop:
b = _mm_add_ps(a, _mm_loadu_ps(data_out+@i@));
_mm_storeu_ps(data_out+@i@, b);
/**end repeat2**/
+#elif EINSUM_USE_SSE2 && @float64@
+/**begin repeat2
+ * #i = 0, 2, 4, 6#
+ */
+ a = _mm_mul_pd(_mm_loadu_pd(data0+@i@), _mm_loadu_pd(data1+@i@));
+ b = _mm_add_pd(a, _mm_loadu_pd(data_out+@i@));
+ _mm_storeu_pd(data_out+@i@, b);
+/**end repeat2**/
#else
/**begin repeat2
* #i = 0, 1, 2, 3, 4, 5, 6, 7#
@@ -491,6 +521,8 @@ static void
#if EINSUM_USE_SSE1 && @float32@
__m128 a, b, value1_sse;
+#elif EINSUM_USE_SSE2 && @float64@
+ __m128d a, b, value1_sse;
#endif
NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_stride0_outcontig_two (%d)\n",
@@ -534,6 +566,29 @@ finish_after_unrolled_loop:
/* Finish off the loop */
goto finish_after_unrolled_loop;
}
+#elif EINSUM_USE_SSE2 && @float64@
+ value1_sse = _mm_set1_pd(value1);
+
+ /* Use aligned instructions if possible */
+ if (EINSUM_IS_SSE_ALIGNED(data0) && EINSUM_IS_SSE_ALIGNED(data_out)) {
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+/**begin repeat2
+ * #i = 0, 2, 4, 6#
+ */
+ a = _mm_mul_pd(_mm_load_pd(data0+@i@), value1_sse);
+ b = _mm_add_pd(a, _mm_load_pd(data_out+@i@));
+ _mm_store_pd(data_out+@i@, b);
+/**end repeat2**/
+ data0 += 8;
+ data_out += 8;
+ }
+
+ /* Finish off the loop */
+ goto finish_after_unrolled_loop;
+ }
#endif
/* Unroll the loop by 8 */
@@ -548,6 +603,14 @@ finish_after_unrolled_loop:
b = _mm_add_ps(a, _mm_loadu_ps(data_out+@i@));
_mm_storeu_ps(data_out+@i@, b);
/**end repeat2**/
+#elif EINSUM_USE_SSE2 && @float64@
+/**begin repeat2
+ * #i = 0, 2, 4, 6#
+ */
+ a = _mm_mul_pd(_mm_loadu_pd(data0+@i@), value1_sse);
+ b = _mm_add_pd(a, _mm_loadu_pd(data_out+@i@));
+ _mm_storeu_pd(data_out+@i@, b);
+/**end repeat2**/
#else
/**begin repeat2
* #i = 0, 1, 2, 3, 4, 5, 6, 7#
@@ -735,6 +798,8 @@ static void
#if EINSUM_USE_SSE1 && @float32@
__m128 a, accum_sse = _mm_setzero_ps();
+#elif EINSUM_USE_SSE2 && @float64@
+ __m128d a, accum_sse = _mm_setzero_pd();
#endif
NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_stride0_contig_outstride0_two (%d)\n",
@@ -772,15 +837,38 @@ finish_after_unrolled_loop:
/**end repeat2**/
data1 += 8;
}
-
-#if EINSUM_USE_SSE1 && @float32@
/* Add the four SSE values and put in accum */
a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1));
accum_sse = _mm_add_ps(a, accum_sse);
a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2));
accum_sse = _mm_add_ps(a, accum_sse);
_mm_store_ss(&accum, accum_sse);
-#endif
+
+ /* Finish off the loop */
+ goto finish_after_unrolled_loop;
+ }
+#elif EINSUM_USE_SSE2 && @float64@
+ /* Use aligned instructions if possible */
+ if (EINSUM_IS_SSE_ALIGNED(data1)) {
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+
+/**begin repeat2
+ * #i = 0, 2, 4, 6#
+ */
+ /*
+ * NOTE: This accumulation changes the order, so will likely
+ * produce slightly different results.
+ */
+ accum_sse = _mm_add_pd(accum_sse, _mm_load_pd(data1+@i@));
+/**end repeat2**/
+ data1 += 8;
+ }
+ /* Add the two SSE2 values and put in accum */
+ a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1));
+ accum_sse = _mm_add_pd(a, accum_sse);
+ _mm_store_sd(&accum, accum_sse);
/* Finish off the loop */
goto finish_after_unrolled_loop;
@@ -801,6 +889,16 @@ finish_after_unrolled_loop:
*/
accum_sse = _mm_add_ps(accum_sse, _mm_loadu_ps(data1+@i@));
/**end repeat2**/
+#elif EINSUM_USE_SSE2 && @float64@
+/**begin repeat2
+ * #i = 0, 2, 4, 6#
+ */
+ /*
+ * NOTE: This accumulation changes the order, so will likely
+ * produce slightly different results.
+ */
+ accum_sse = _mm_add_pd(accum_sse, _mm_loadu_pd(data1+@i@));
+/**end repeat2**/
#else
/**begin repeat2
* #i = 0, 1, 2, 3, 4, 5, 6, 7#
@@ -818,6 +916,11 @@ finish_after_unrolled_loop:
a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2));
accum_sse = _mm_add_ps(a, accum_sse);
_mm_store_ss(&accum, accum_sse);
+#elif EINSUM_USE_SSE2 && @float64@
+ /* Add the two SSE2 values and put in accum */
+ a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1));
+ accum_sse = _mm_add_pd(a, accum_sse);
+ _mm_store_sd(&accum, accum_sse);
#endif
/* Finish off the loop */
@@ -834,6 +937,8 @@ static void
#if EINSUM_USE_SSE1 && @float32@
__m128 a, accum_sse = _mm_setzero_ps();
+#elif EINSUM_USE_SSE2 && @float64@
+ __m128d a, accum_sse = _mm_setzero_pd();
#endif
NPY_EINSUM_DBG_PRINT1("@name@_sum_of_products_contig_stride0_outstride0_two (%d)\n",
@@ -871,16 +976,37 @@ finish_after_unrolled_loop:
/**end repeat2**/
data0 += 8;
}
-
-#if EINSUM_USE_SSE1 && @float32@
/* Add the four SSE values and put in accum */
a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(2,3,0,1));
accum_sse = _mm_add_ps(a, accum_sse);
a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2));
accum_sse = _mm_add_ps(a, accum_sse);
_mm_store_ss(&accum, accum_sse);
-#endif
+ /* Finish off the loop */
+ goto finish_after_unrolled_loop;
+ }
+#elif EINSUM_USE_SSE2 && @float64@
+ /* Use aligned instructions if possible */
+ if (EINSUM_IS_SSE_ALIGNED(data0)) {
+ /* Unroll the loop by 8 */
+ while (count >= 8) {
+ count -= 8;
+/**begin repeat2
+ * #i = 0, 2, 4, 6#
+ */
+ /*
+ * NOTE: This accumulation changes the order, so will likely
+ * produce slightly different results.
+ */
+ accum_sse = _mm_add_pd(accum_sse, _mm_load_pd(data0+@i@));
+/**end repeat2**/
+ data0 += 8;
+ }
+ /* Add the two SSE2 values and put in accum */
+ a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1));
+ accum_sse = _mm_add_pd(a, accum_sse);
+ _mm_store_sd(&accum, accum_sse);
/* Finish off the loop */
goto finish_after_unrolled_loop;
}
@@ -900,6 +1026,16 @@ finish_after_unrolled_loop:
*/
accum_sse = _mm_add_ps(accum_sse, _mm_loadu_ps(data0+@i@));
/**end repeat2**/
+#elif EINSUM_USE_SSE2 && @float64@
+/**begin repeat2
+ * #i = 0, 2, 4, 6#
+ */
+ /*
+ * NOTE: This accumulation changes the order, so will likely
+ * produce slightly different results.
+ */
+ accum_sse = _mm_add_pd(accum_sse, _mm_loadu_pd(data0+@i@));
+/**end repeat2**/
#else
/**begin repeat2
* #i = 0, 1, 2, 3, 4, 5, 6, 7#
@@ -917,6 +1053,11 @@ finish_after_unrolled_loop:
a = _mm_shuffle_ps(accum_sse, accum_sse, _MM_SHUFFLE(1,0,3,2));
accum_sse = _mm_add_ps(a, accum_sse);
_mm_store_ss(&accum, accum_sse);
+#elif EINSUM_USE_SSE2 && @float64@
+ /* Add the two SSE2 values and put in accum */
+ a = _mm_shuffle_pd(accum_sse, accum_sse, _MM_SHUFFLE2(0,1));
+ accum_sse = _mm_add_pd(a, accum_sse);
+ _mm_store_sd(&accum, accum_sse);
#endif
/* Finish off the loop */
diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c
index 7aefbfc38..f73cb48d9 100644
--- a/numpy/core/src/multiarray/mapping.c
+++ b/numpy/core/src/multiarray/mapping.c
@@ -2480,8 +2480,6 @@ PyArray_MapIterCheckIndices(PyArrayMapIterObject *mit)
int i;
NPY_BEGIN_THREADS_DEF;
- intp_type = PyArray_DescrFromType(NPY_INTP);
-
if (NpyIter_GetIterSize(mit->outer) == 0) {
/*
* When the outer iteration is empty, the indices broadcast to an
@@ -2493,6 +2491,8 @@ PyArray_MapIterCheckIndices(PyArrayMapIterObject *mit)
return 0;
}
+ intp_type = PyArray_DescrFromType(NPY_INTP);
+
NPY_BEGIN_THREADS;
for (i=0; i < mit->numiter; i++) {
diff --git a/numpy/core/src/umath/simd.inc.src b/numpy/core/src/umath/simd.inc.src
index 48e89915c..e6414e29e 100644
--- a/numpy/core/src/umath/simd.inc.src
+++ b/numpy/core/src/umath/simd.inc.src
@@ -2698,17 +2698,17 @@ static NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void
/* process elements using glibc for large elements */
if (my_trig_op == npy_compute_cos) {
- for (int ii = 0; iglibc_mask != 0; ii++) {
+ for (int ii = 0, jj = 0; iglibc_mask != 0; ii++, jj += stride) {
if (iglibc_mask & 0x01) {
- op[ii] = npy_cosf(ip[ii]);
+ op[ii] = npy_cosf(ip[jj]);
}
iglibc_mask = iglibc_mask >> 1;
}
}
else {
- for (int ii = 0; iglibc_mask != 0; ii++) {
+ for (int ii = 0, jj = 0; iglibc_mask != 0; ii++, jj += stride) {
if (iglibc_mask & 0x01) {
- op[ii] = npy_sinf(ip[ii]);
+ op[ii] = npy_sinf(ip[jj]);
}
iglibc_mask = iglibc_mask >> 1;
}
diff --git a/numpy/core/tests/test_api.py b/numpy/core/tests/test_api.py
index 2600d409a..5d079d9d2 100644
--- a/numpy/core/tests/test_api.py
+++ b/numpy/core/tests/test_api.py
@@ -291,7 +291,7 @@ def test_array_astype_warning(t):
@pytest.mark.parametrize(["dtype", "out_dtype"],
[(np.bytes_, np.bool_),
- (np.unicode, np.bool_),
+ (np.unicode_, np.bool_),
(np.dtype("S10,S9"), np.dtype("?,?"))])
def test_string_to_boolean_cast(dtype, out_dtype):
"""
@@ -305,7 +305,7 @@ def test_string_to_boolean_cast(dtype, out_dtype):
@pytest.mark.parametrize(["dtype", "out_dtype"],
[(np.bytes_, np.bool_),
- (np.unicode, np.bool_),
+ (np.unicode_, np.bool_),
(np.dtype("S10,S9"), np.dtype("?,?"))])
def test_string_to_boolean_cast_errors(dtype, out_dtype):
"""
diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py
index 01924410f..239d20c9d 100644
--- a/numpy/core/tests/test_deprecations.py
+++ b/numpy/core/tests/test_deprecations.py
@@ -9,6 +9,7 @@ import warnings
import pytest
import tempfile
import re
+import sys
import numpy as np
from numpy.testing import (
@@ -313,19 +314,14 @@ class TestBinaryReprInsufficientWidthParameterForRepresentation(_DeprecationTest
class TestNumericStyleTypecodes(_DeprecationTestCase):
"""
- Deprecate the old numeric-style dtypes, which are especially
- confusing for complex types, e.g. Complex32 -> complex64. When the
- deprecation cycle is complete, the check for the strings should be
- removed from PyArray_DescrConverter in descriptor.c, and the
- deprecated keys should not be added as capitalized aliases in
- _add_aliases in numerictypes.py.
+ Most numeric style typecodes were previously deprecated (and removed)
+ in 1.20. This also deprecates the remaining ones.
"""
+ # 2020-06-09, NumPy 1.20
def test_all_dtypes(self):
- deprecated_types = [
- 'Bool', 'Complex32', 'Complex64', 'Float16', 'Float32', 'Float64',
- 'Int8', 'Int16', 'Int32', 'Int64', 'Object0', 'Timedelta64',
- 'UInt8', 'UInt16', 'UInt32', 'UInt64', 'Void0'
- ]
+ deprecated_types = ['Bytes0', 'Datetime64', 'Str0']
+ # Depending on intp size, either Uint32 or Uint64 is defined:
+ deprecated_types.append(f"U{np.dtype(np.intp).name}")
for dt in deprecated_types:
self.assert_deprecated(np.dtype, exceptions=(TypeError,),
args=(dt,))
@@ -438,14 +434,6 @@ class TestGeneratorSum(_DeprecationTestCase):
self.assert_deprecated(np.sum, args=((i for i in range(5)),))
-class TestSctypeNA(_VisibleDeprecationTestCase):
- # 2018-06-24, 1.16
- def test_sctypeNA(self):
- self.assert_deprecated(lambda: np.sctypeNA['?'])
- self.assert_deprecated(lambda: np.typeNA['?'])
- self.assert_deprecated(lambda: np.typeNA.get('?'))
-
-
class TestPositiveOnNonNumerical(_DeprecationTestCase):
# 2018-06-28, 1.16.0
def test_positive_on_non_number(self):
@@ -655,3 +643,22 @@ class TestNonExactMatchDeprecation(_DeprecationTestCase):
self.assert_deprecated(lambda: np.ravel_multi_index(arr, (7, 6), mode='Cilp'))
# using completely different word with first character as R
self.assert_deprecated(lambda: np.searchsorted(arr[0], 4, side='Random'))
+
+
+class TestDeprecatedGlobals(_DeprecationTestCase):
+ # 2020-06-06
+ @pytest.mark.skipif(
+ sys.version_info < (3, 7),
+ reason='module-level __getattr__ not supported')
+ def test_type_aliases(self):
+ # from builtins
+ self.assert_deprecated(lambda: np.bool)
+ self.assert_deprecated(lambda: np.int)
+ self.assert_deprecated(lambda: np.float)
+ self.assert_deprecated(lambda: np.complex)
+ self.assert_deprecated(lambda: np.object)
+ self.assert_deprecated(lambda: np.str)
+
+ # from np.compat
+ self.assert_deprecated(lambda: np.long)
+ self.assert_deprecated(lambda: np.unicode)
diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py
index 73aa01de6..2e2b0dbe2 100644
--- a/numpy/core/tests/test_dtype.py
+++ b/numpy/core/tests/test_dtype.py
@@ -86,6 +86,15 @@ class TestBuiltin:
assert_raises(TypeError, np.dtype, 'q8')
assert_raises(TypeError, np.dtype, 'Q8')
+ @pytest.mark.parametrize("dtype",
+ ['Bool', 'Complex32', 'Complex64', 'Float16', 'Float32', 'Float64',
+ 'Int8', 'Int16', 'Int32', 'Int64', 'Object0', 'Timedelta64',
+ 'UInt8', 'UInt16', 'UInt32', 'UInt64', 'Void0',
+ "Float128", "Complex128"])
+ def test_numeric_style_types_are_invalid(self, dtype):
+ with assert_raises(TypeError):
+ np.dtype(dtype)
+
@pytest.mark.parametrize(
'value',
['m8', 'M8', 'datetime64', 'timedelta64',
@@ -1047,6 +1056,11 @@ def test_invalid_dtype_string():
assert_raises(TypeError, np.dtype, u'Fl\xfcgel')
+def test_keyword_argument():
+ # test for https://github.com/numpy/numpy/pull/16574#issuecomment-642660971
+ assert np.dtype(dtype=np.float64) == np.dtype(np.float64)
+
+
class TestFromDTypeAttribute:
def test_simple(self):
class dt:
@@ -1324,4 +1338,3 @@ class TestFromCTypes:
pair_type = np.dtype('{},{}'.format(*pair))
expected = np.dtype([('f0', pair[0]), ('f1', pair[1])])
assert_equal(pair_type, expected)
-
diff --git a/numpy/core/tests/test_function_base.py b/numpy/core/tests/test_function_base.py
index 2197ef0cd..62a9772c8 100644
--- a/numpy/core/tests/test_function_base.py
+++ b/numpy/core/tests/test_function_base.py
@@ -1,6 +1,6 @@
from numpy import (
logspace, linspace, geomspace, dtype, array, sctypes, arange, isnan,
- ndarray, sqrt, nextafter, stack
+ ndarray, sqrt, nextafter, stack, errstate
)
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal, assert_allclose,
@@ -113,6 +113,40 @@ class TestGeomspace:
assert_array_equal(y, [-100, -10, -1])
assert_array_equal(y.imag, 0)
+ def test_boundaries_match_start_and_stop_exactly(self):
+ # make sure that the boundaries of the returned array exactly
+ # equal 'start' and 'stop' - this isn't obvious because
+ # np.exp(np.log(x)) isn't necessarily exactly equal to x
+ start = 0.3
+ stop = 20.3
+
+ y = geomspace(start, stop, num=1)
+ assert_equal(y[0], start)
+
+ y = geomspace(start, stop, num=1, endpoint=False)
+ assert_equal(y[0], start)
+
+ y = geomspace(start, stop, num=3)
+ assert_equal(y[0], start)
+ assert_equal(y[-1], stop)
+
+ y = geomspace(start, stop, num=3, endpoint=False)
+ assert_equal(y[0], start)
+
+ def test_nan_interior(self):
+ with errstate(invalid='ignore'):
+ y = geomspace(-3, 3, num=4)
+
+ assert_equal(y[0], -3.0)
+ assert_(isnan(y[1:-1]).all())
+ assert_equal(y[3], 3.0)
+
+ with errstate(invalid='ignore'):
+ y = geomspace(-3, 3, num=4, endpoint=False)
+
+ assert_equal(y[0], -3.0)
+ assert_(isnan(y[1:]).all())
+
def test_complex(self):
# Purely imaginary
y = geomspace(1j, 16j, num=5)
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index e116077f9..09adddf6d 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -4702,6 +4702,10 @@ class TestIO:
e = np.array([-25041670086757, 104783749223640], dtype=np.int64)
assert_array_equal(d, e)
+ def test_fromstring_count0(self):
+ d = np.fromstring("1,2", sep=",", dtype=np.int64, count=0)
+ assert d.shape == (0,)
+
def test_empty_files_binary(self):
with open(self.filename, 'w') as f:
pass
diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py
index 96a6d810f..cf18a5d93 100644
--- a/numpy/core/tests/test_regression.py
+++ b/numpy/core/tests/test_regression.py
@@ -42,13 +42,6 @@ class TestRegression:
b = pickle.load(f)
assert_array_equal(a, b)
- def test_typeNA(self):
- # Issue gh-515
- with suppress_warnings() as sup:
- sup.filter(np.VisibleDeprecationWarning)
- assert_equal(np.typeNA[np.int64], 'Int64')
- assert_equal(np.typeNA[np.uint64], 'UInt64')
-
def test_dtype_names(self):
# Ticket #35
# Should succeed
diff --git a/numpy/core/tests/test_scalar_ctors.py b/numpy/core/tests/test_scalar_ctors.py
index 7645a0853..cd518274a 100644
--- a/numpy/core/tests/test_scalar_ctors.py
+++ b/numpy/core/tests/test_scalar_ctors.py
@@ -65,7 +65,7 @@ class TestExtraArgs:
def test_bool(self):
with pytest.raises(TypeError):
- np.bool(False, garbage=True)
+ np.bool_(False, garbage=True)
def test_void(self):
with pytest.raises(TypeError):
diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py
index 91acd6ac3..f836af168 100644
--- a/numpy/core/tests/test_umath.py
+++ b/numpy/core/tests/test_umath.py
@@ -890,15 +890,17 @@ class TestAVXFloat32Transcendental:
sizes = np.arange(2,100)
for ii in sizes:
x_f32 = np.float32(np.random.uniform(low=0.01,high=88.1,size=ii))
+ x_f32_large = x_f32.copy()
+ x_f32_large[3:-1:4] = 120000.0
exp_true = np.exp(x_f32)
log_true = np.log(x_f32)
- sin_true = np.sin(x_f32)
- cos_true = np.cos(x_f32)
+ sin_true = np.sin(x_f32_large)
+ cos_true = np.cos(x_f32_large)
for jj in strides:
assert_array_almost_equal_nulp(np.exp(x_f32[::jj]), exp_true[::jj], nulp=2)
assert_array_almost_equal_nulp(np.log(x_f32[::jj]), log_true[::jj], nulp=2)
- assert_array_almost_equal_nulp(np.sin(x_f32[::jj]), sin_true[::jj], nulp=2)
- assert_array_almost_equal_nulp(np.cos(x_f32[::jj]), cos_true[::jj], nulp=2)
+ assert_array_almost_equal_nulp(np.sin(x_f32_large[::jj]), sin_true[::jj], nulp=2)
+ assert_array_almost_equal_nulp(np.cos(x_f32_large[::jj]), cos_true[::jj], nulp=2)
class TestLogAddExp(_FilterInvalids):
def test_logaddexp_values(self):
diff --git a/numpy/core/tests/test_umath_accuracy.py b/numpy/core/tests/test_umath_accuracy.py
index e3c2eb025..33080edbb 100644
--- a/numpy/core/tests/test_umath_accuracy.py
+++ b/numpy/core/tests/test_umath_accuracy.py
@@ -57,9 +57,3 @@ class TestAccuracy:
outval = outval[perm]
maxulperr = data_subset['ulperr'].max()
assert_array_max_ulp(npfunc(inval), outval, maxulperr)
-
- def test_ignore_nan_ulperror(self):
- # Ignore ULP differences between various NAN's
- nan1_f32 = np.array(str_to_float('0xffffffff'), dtype=np.float32)
- nan2_f32 = np.array(str_to_float('0x7fddbfbf'), dtype=np.float32)
- assert_array_max_ulp(nan1_f32, nan2_f32, 0)
diff --git a/numpy/core/tests/test_umath_complex.py b/numpy/core/tests/test_umath_complex.py
index a21158420..a626219c5 100644
--- a/numpy/core/tests/test_umath_complex.py
+++ b/numpy/core/tests/test_umath_complex.py
@@ -545,25 +545,25 @@ class TestSpecialComplexAVX(object):
@pytest.mark.parametrize("stride", [-4,-2,-1,1,2,4])
@pytest.mark.parametrize("astype", [np.complex64, np.complex128])
def test_array(self, stride, astype):
- arr = np.array([np.complex(np.nan , np.nan),
- np.complex(np.nan , np.inf),
- np.complex(np.inf , np.nan),
- np.complex(np.inf , np.inf),
- np.complex(0. , np.inf),
- np.complex(np.inf , 0.),
- np.complex(0. , 0.),
- np.complex(0. , np.nan),
- np.complex(np.nan , 0.)], dtype=astype)
+ arr = np.array([complex(np.nan , np.nan),
+ complex(np.nan , np.inf),
+ complex(np.inf , np.nan),
+ complex(np.inf , np.inf),
+ complex(0. , np.inf),
+ complex(np.inf , 0.),
+ complex(0. , 0.),
+ complex(0. , np.nan),
+ complex(np.nan , 0.)], dtype=astype)
abs_true = np.array([np.nan, np.inf, np.inf, np.inf, np.inf, np.inf, 0., np.nan, np.nan], dtype=arr.real.dtype)
- sq_true = np.array([np.complex(np.nan, np.nan),
- np.complex(np.nan, np.nan),
- np.complex(np.nan, np.nan),
- np.complex(np.nan, np.inf),
- np.complex(-np.inf, np.nan),
- np.complex(np.inf, np.nan),
- np.complex(0., 0.),
- np.complex(np.nan, np.nan),
- np.complex(np.nan, np.nan)], dtype=astype)
+ sq_true = np.array([complex(np.nan, np.nan),
+ complex(np.nan, np.nan),
+ complex(np.nan, np.nan),
+ complex(np.nan, np.inf),
+ complex(-np.inf, np.nan),
+ complex(np.inf, np.nan),
+ complex(0., 0.),
+ complex(np.nan, np.nan),
+ complex(np.nan, np.nan)], dtype=astype)
assert_equal(np.abs(arr[::stride]), abs_true[::stride])
with np.errstate(invalid='ignore'):
assert_equal(np.square(arr[::stride]), sq_true[::stride])
diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py
index db563e30c..e0f723a3c 100644
--- a/numpy/lib/tests/test_nanfunctions.py
+++ b/numpy/lib/tests/test_nanfunctions.py
@@ -957,7 +957,7 @@ def test__replace_nan():
""" Test that _replace_nan returns the original array if there are no
NaNs, not a copy.
"""
- for dtype in [np.bool, np.int32, np.int64]:
+ for dtype in [np.bool_, np.int32, np.int64]:
arr = np.array([0, 1], dtype=dtype)
result, mask = _replace_nan(arr, 0)
assert mask is None
diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py
index f86ebf551..8ede29da1 100644
--- a/numpy/ma/extras.py
+++ b/numpy/ma/extras.py
@@ -244,11 +244,6 @@ class _fromnxfunction:
the new masked array version of the function. A note on application
of the function to the mask is appended.
- .. warning::
- If the function docstring already contained a Notes section, the
- new docstring will have two Notes sections instead of appending a note
- to the existing section.
-
Parameters
----------
None
@@ -258,9 +253,9 @@ class _fromnxfunction:
doc = getattr(npfunc, '__doc__', None)
if doc:
sig = self.__name__ + ma.get_object_signature(npfunc)
- locdoc = "Notes\n-----\nThe function is applied to both the _data"\
- " and the _mask, if any."
- return '\n'.join((sig, doc, locdoc))
+ doc = ma.doc_note(doc, "The function is applied to both the _data "
+ "and the _mask, if any.")
+ return '\n\n'.join((sig, doc))
return
def __call__(self, *args, **params):
diff --git a/numpy/polynomial/_polybase.py b/numpy/polynomial/_polybase.py
index 30887b670..b5341ba37 100644
--- a/numpy/polynomial/_polybase.py
+++ b/numpy/polynomial/_polybase.py
@@ -919,10 +919,8 @@ class ABCPolyBase(abc.ABC):
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
- y : array_like, shape (M,) or (M, K)
- y-coordinates of the sample points. Several data sets of sample
- points sharing the same x-coordinates can be fitted at once by
- passing in a 2D-array that contains one dataset per column.
+ y : array_like, shape (M,)
+ y-coordinates of the M sample points ``(x[i], y[i])``.
deg : int or 1-D array_like
Degree(s) of the fitting polynomials. If `deg` is a single integer
all terms up to and including the `deg`'th term are included in the
diff --git a/numpy/random/bit_generator.pyx b/numpy/random/bit_generator.pyx
index f145ec13d..3c52a9933 100644
--- a/numpy/random/bit_generator.pyx
+++ b/numpy/random/bit_generator.pyx
@@ -382,13 +382,22 @@ cdef class SeedSequence():
-------
entropy_array : 1D uint32 array
"""
- # Convert run-entropy, program-entropy, and the spawn key into uint32
+ # Convert run-entropy and the spawn key into uint32
# arrays and concatenate them.
# We MUST have at least some run-entropy. The others are optional.
assert self.entropy is not None
run_entropy = _coerce_to_uint32_array(self.entropy)
spawn_entropy = _coerce_to_uint32_array(self.spawn_key)
+ if len(spawn_entropy) > 0 and len(run_entropy) < self.pool_size:
+ # Explicitly fill out the entropy with 0s to the pool size to avoid
+ # conflict with spawn keys. We changed this in 1.19.0 to fix
+ # gh-16539. In order to preserve stream-compatibility with
+ # unspawned SeedSequences with small entropy inputs, we only do
+ # this when a spawn_key is specified.
+ diff = self.pool_size - len(run_entropy)
+ run_entropy = np.concatenate(
+ [run_entropy, np.zeros(diff, dtype=np.uint32)])
entropy_array = np.concatenate([run_entropy, spawn_entropy])
return entropy_array
diff --git a/numpy/random/tests/test_seed_sequence.py b/numpy/random/tests/test_seed_sequence.py
index fe23680ed..f08cf80fa 100644
--- a/numpy/random/tests/test_seed_sequence.py
+++ b/numpy/random/tests/test_seed_sequence.py
@@ -1,5 +1,5 @@
import numpy as np
-from numpy.testing import assert_array_equal
+from numpy.testing import assert_array_equal, assert_array_compare
from numpy.random import SeedSequence
@@ -52,3 +52,29 @@ def test_reference_data():
assert_array_equal(state, expected)
state64 = ss.generate_state(len(expected64), dtype=np.uint64)
assert_array_equal(state64, expected64)
+
+
+def test_zero_padding():
+ """ Ensure that the implicit zero-padding does not cause problems.
+ """
+ # Ensure that large integers are inserted in little-endian fashion to avoid
+ # trailing 0s.
+ ss0 = SeedSequence(42)
+ ss1 = SeedSequence(42 << 32)
+ assert_array_compare(
+ np.not_equal,
+ ss0.generate_state(4),
+ ss1.generate_state(4))
+
+ # Ensure backwards compatibility with the original 0.17 release for small
+ # integers and no spawn key.
+ expected42 = np.array([3444837047, 2669555309, 2046530742, 3581440988],
+ dtype=np.uint32)
+ assert_array_equal(SeedSequence(42).generate_state(4), expected42)
+
+ # Regression test for gh-16539 to ensure that the implicit 0s don't
+ # conflict with spawn keys.
+ assert_array_compare(
+ np.not_equal,
+ SeedSequence(42, spawn_key=(0,)).generate_state(4),
+ expected42)
diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py
index b899e94f4..6a6cc664a 100644
--- a/numpy/testing/tests/test_utils.py
+++ b/numpy/testing/tests/test_utils.py
@@ -941,6 +941,17 @@ class TestArrayAlmostEqualNulp:
assert_raises(AssertionError, assert_array_almost_equal_nulp,
x, y, nulp)
+ def test_float64_ignore_nan(self):
+ # Ignore ULP differences between various NAN's
+ # Note that MIPS may reverse quiet and signaling nans
+ # so we use the builtin version as a base.
+ offset = np.uint64(0xffffffff)
+ nan1_i64 = np.array(np.nan, dtype=np.float64).view(np.uint64)
+ nan2_i64 = nan1_i64 ^ offset # nan payload on MIPS is all ones.
+ nan1_f64 = nan1_i64.view(np.float64)
+ nan2_f64 = nan2_i64.view(np.float64)
+ assert_array_max_ulp(nan1_f64, nan2_f64, 0)
+
def test_float32_pass(self):
nulp = 5
x = np.linspace(-20, 20, 50, dtype=np.float32)
@@ -971,6 +982,17 @@ class TestArrayAlmostEqualNulp:
assert_raises(AssertionError, assert_array_almost_equal_nulp,
x, y, nulp)
+ def test_float32_ignore_nan(self):
+ # Ignore ULP differences between various NAN's
+ # Note that MIPS may reverse quiet and signaling nans
+ # so we use the builtin version as a base.
+ offset = np.uint32(0xffff)
+ nan1_i32 = np.array(np.nan, dtype=np.float32).view(np.uint32)
+ nan2_i32 = nan1_i32 ^ offset # nan payload on MIPS is all ones.
+ nan1_f32 = nan1_i32.view(np.float32)
+ nan2_f32 = nan2_i32.view(np.float32)
+ assert_array_max_ulp(nan1_f32, nan2_f32, 0)
+
def test_float16_pass(self):
nulp = 5
x = np.linspace(-4, 4, 10, dtype=np.float16)
@@ -1001,6 +1023,17 @@ class TestArrayAlmostEqualNulp:
assert_raises(AssertionError, assert_array_almost_equal_nulp,
x, y, nulp)
+ def test_float16_ignore_nan(self):
+ # Ignore ULP differences between various NAN's
+ # Note that MIPS may reverse quiet and signaling nans
+ # so we use the builtin version as a base.
+ offset = np.uint16(0xff)
+ nan1_i16 = np.array(np.nan, dtype=np.float16).view(np.uint16)
+ nan2_i16 = nan1_i16 ^ offset # nan payload on MIPS is all ones.
+ nan1_f16 = nan1_i16.view(np.float16)
+ nan2_f16 = nan2_i16.view(np.float16)
+ assert_array_max_ulp(nan1_f16, nan2_f16, 0)
+
def test_complex128_pass(self):
nulp = 5
x = np.linspace(-20, 20, 50, dtype=np.float64)
diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py
index beaf38e5a..df0e04285 100644
--- a/numpy/tests/test_public_api.py
+++ b/numpy/tests/test_public_api.py
@@ -54,18 +54,22 @@ def test_numpy_namespace():
'show_config': 'numpy.__config__.show',
'who': 'numpy.lib.utils.who',
}
- # These built-in types are re-exported by numpy.
- builtins = {
- 'bool': 'builtins.bool',
- 'complex': 'builtins.complex',
- 'float': 'builtins.float',
- 'int': 'builtins.int',
- 'long': 'builtins.int',
- 'object': 'builtins.object',
- 'str': 'builtins.str',
- 'unicode': 'builtins.str',
- }
- whitelist = dict(undocumented, **builtins)
+ if sys.version_info < (3, 7):
+ # These built-in types are re-exported by numpy.
+ builtins = {
+ 'bool': 'builtins.bool',
+ 'complex': 'builtins.complex',
+ 'float': 'builtins.float',
+ 'int': 'builtins.int',
+ 'long': 'builtins.int',
+ 'object': 'builtins.object',
+ 'str': 'builtins.str',
+ 'unicode': 'builtins.str',
+ }
+ whitelist = dict(undocumented, **builtins)
+ else:
+ # after 3.7, we override dir to not show these members
+ whitelist = undocumented
bad_results = check_dir(np)
# pytest gives better error messages with the builtin assert than with
# assert_equal
diff --git a/numpy/tests/typing/fail/fromnumeric.py b/numpy/tests/typing/fail/fromnumeric.py
index f158a1071..7455ce722 100644
--- a/numpy/tests/typing/fail/fromnumeric.py
+++ b/numpy/tests/typing/fail/fromnumeric.py
@@ -22,11 +22,9 @@ np.choose(A, mode="bob") # E: No overload variant of "choose" matches argument
np.repeat(a, None) # E: Argument 2 to "repeat" has incompatible type
np.repeat(A, 1, axis=1.0) # E: Argument "axis" to "repeat" has incompatible type
-np.swapaxes(a, 0, 0) # E: Argument 1 to "swapaxes" has incompatible type
np.swapaxes(A, None, 1) # E: Argument 2 to "swapaxes" has incompatible type
np.swapaxes(A, 1, [0]) # E: Argument 3 to "swapaxes" has incompatible type
-np.transpose(a, axes=1) # E: Argument "axes" to "transpose" has incompatible type
np.transpose(A, axes=1.0) # E: Argument "axes" to "transpose" has incompatible type
np.partition(a, None) # E: Argument 2 to "partition" has incompatible type
@@ -53,25 +51,20 @@ np.argpartition(
A, 0, order=range(5) # E: Argument "order" to "argpartition" has incompatible type
)
-np.sort(a) # E: Argument 1 to "sort" has incompatible type
np.sort(A, axis="bob") # E: Argument "axis" to "sort" has incompatible type
np.sort(A, kind="bob") # E: Argument "kind" to "sort" has incompatible type
np.sort(A, order=range(5)) # E: Argument "order" to "sort" has incompatible type
-np.argsort(a) # E: Argument 1 to "argsort" has incompatible type
np.argsort(A, axis="bob") # E: Argument "axis" to "argsort" has incompatible type
np.argsort(A, kind="bob") # E: Argument "kind" to "argsort" has incompatible type
np.argsort(A, order=range(5)) # E: Argument "order" to "argsort" has incompatible type
-np.argmax(a) # E: No overload variant of "argmax" matches argument type
np.argmax(A, axis="bob") # E: No overload variant of "argmax" matches argument type
np.argmax(A, kind="bob") # E: No overload variant of "argmax" matches argument type
-np.argmin(a) # E: No overload variant of "argmin" matches argument type
np.argmin(A, axis="bob") # E: No overload variant of "argmin" matches argument type
np.argmin(A, kind="bob") # E: No overload variant of "argmin" matches argument type
-np.searchsorted(a, 0) # E: No overload variant of "searchsorted" matches argument type
np.searchsorted( # E: No overload variant of "searchsorted" matches argument type
A[0], 0, side="bob"
)
@@ -83,19 +76,16 @@ np.resize(A, 1.0) # E: Argument 2 to "resize" has incompatible type
np.squeeze(A, 1.0) # E: No overload variant of "squeeze" matches argument type
-np.diagonal(a) # E: Argument 1 to "diagonal" has incompatible type
np.diagonal(A, offset=None) # E: Argument "offset" to "diagonal" has incompatible type
np.diagonal(A, axis1="bob") # E: Argument "axis1" to "diagonal" has incompatible type
np.diagonal(A, axis2=[]) # E: Argument "axis2" to "diagonal" has incompatible type
-np.trace(a) # E: Argument 1 to "trace" has incompatible type
np.trace(A, offset=None) # E: Argument "offset" to "trace" has incompatible type
np.trace(A, axis1="bob") # E: Argument "axis1" to "trace" has incompatible type
np.trace(A, axis2=[]) # E: Argument "axis2" to "trace" has incompatible type
np.ravel(a, order="bob") # E: Argument "order" to "ravel" has incompatible type
-np.compress(True, A) # E: Argument 1 to "compress" has incompatible type
np.compress(
[True], A, axis=1.0 # E: Argument "axis" to "compress" has incompatible type
)
diff --git a/numpy/tests/typing/fail/scalars.py b/numpy/tests/typing/fail/scalars.py
index 0dfc55124..5d7221895 100644
--- a/numpy/tests/typing/fail/scalars.py
+++ b/numpy/tests/typing/fail/scalars.py
@@ -65,3 +65,17 @@ np.floating(1) # E: Cannot instantiate abstract class
np.complexfloating(1) # E: Cannot instantiate abstract class
np.character("test") # E: Cannot instantiate abstract class
np.flexible(b"test") # E: Cannot instantiate abstract class
+
+np.float64(value=0.0) # E: Unexpected keyword argument
+np.int64(value=0) # E: Unexpected keyword argument
+np.uint64(value=0) # E: Unexpected keyword argument
+np.complex128(value=0.0j) # E: Unexpected keyword argument
+np.str_(value='bob') # E: No overload variant
+np.bytes_(value=b'test') # E: No overload variant
+np.void(value=b'test') # E: Unexpected keyword argument
+np.bool_(value=True) # E: Unexpected keyword argument
+np.datetime64(value="2019") # E: No overload variant
+np.timedelta64(value=0) # E: Unexpected keyword argument
+
+np.bytes_(b"hello", encoding='utf-8') # E: No overload variant
+np.str_("hello", encoding='utf-8') # E: No overload variant
diff --git a/numpy/tests/typing/pass/dtype.py b/numpy/tests/typing/pass/dtype.py
new file mode 100644
index 000000000..f954fdd44
--- /dev/null
+++ b/numpy/tests/typing/pass/dtype.py
@@ -0,0 +1,3 @@
+import numpy as np
+
+np.dtype(dtype=np.int64)
diff --git a/numpy/tests/typing/pass/scalars.py b/numpy/tests/typing/pass/scalars.py
index bd055673b..7de182626 100644
--- a/numpy/tests/typing/pass/scalars.py
+++ b/numpy/tests/typing/pass/scalars.py
@@ -34,7 +34,11 @@ np.float32(16)
np.float64(3.0)
np.bytes_(b"hello")
+np.bytes_("hello", 'utf-8')
+np.bytes_("hello", encoding='utf-8')
np.str_("hello")
+np.str_(b"hello", 'utf-8')
+np.str_(b"hello", encoding='utf-8')
# Protocols
float(np.int8(4))