h"]
+_IntCCodes = Literal["intc", "i", "=i", "i"]
+_IntPCodes = Literal["intp", "int0", "p", "=p", "p"]
+_IntCodes = Literal["long", "int", "int_", "l", "=l", "l"]
+_LongLongCodes = Literal["longlong", "q", "=q", "q"]
+
+_UByteCodes = Literal["ubyte", "B", "=B", "B"]
+_UShortCodes = Literal["ushort", "H", "=H", "H"]
+_UIntCCodes = Literal["uintc", "I", "=I", "I"]
+_UIntPCodes = Literal["uintp", "uint0", "P", "=P", "P"]
+_UIntCodes = Literal["ulong", "uint", "L", "=L", "L"]
+_ULongLongCodes = Literal["ulonglong", "Q", "=Q", "Q"]
+
+_HalfCodes = Literal["half", "e", "=e", "e"]
+_SingleCodes = Literal["single", "f", "=f", "f"]
+_DoubleCodes = Literal["double", "float", "float_", "d", "=d", "d"]
+_LongDoubleCodes = Literal["longdouble", "longfloat", "g", "=g", "g"]
+
+_CSingleCodes = Literal["csingle", "singlecomplex", "F", "=F", "F"]
+_CDoubleCodes = Literal["cdouble", "complex", "complex_", "cfloat", "D", "=D", "D"]
+_CLongDoubleCodes = Literal["clongdouble", "clongfloat", "longcomplex", "G", "=G", "G"]
+
+_StrCodes = Literal["str", "str_", "str0", "unicode", "unicode_", "U", "=U", "U"]
+_BytesCodes = Literal["bytes", "bytes_", "bytes0", "S", "=S", "S"]
+_VoidCodes = Literal["void", "void0", "V", "=V", "V"]
+_ObjectCodes = Literal["object", "object_", "O", "=O", "O"]
+
+_DT64Codes = Literal[
+ "datetime64", "=datetime64", "datetime64",
+ "datetime64[Y]", "=datetime64[Y]", "datetime64[Y]",
+ "datetime64[M]", "=datetime64[M]", "datetime64[M]",
+ "datetime64[W]", "=datetime64[W]", "datetime64[W]",
+ "datetime64[D]", "=datetime64[D]", "datetime64[D]",
+ "datetime64[h]", "=datetime64[h]", "datetime64[h]",
+ "datetime64[m]", "=datetime64[m]", "datetime64[m]",
+ "datetime64[s]", "=datetime64[s]", "datetime64[s]",
+ "datetime64[ms]", "=datetime64[ms]", "datetime64[ms]",
+ "datetime64[us]", "=datetime64[us]", "datetime64[us]",
+ "datetime64[ns]", "=datetime64[ns]", "datetime64[ns]",
+ "datetime64[ps]", "=datetime64[ps]", "datetime64[ps]",
+ "datetime64[fs]", "=datetime64[fs]", "datetime64[fs]",
+ "datetime64[as]", "=datetime64[as]", "datetime64[as]",
+ "M", "=M", "M",
+ "M8", "=M8", "M8",
+ "M8[Y]", "=M8[Y]", "M8[Y]",
+ "M8[M]", "=M8[M]", "M8[M]",
+ "M8[W]", "=M8[W]", "M8[W]",
+ "M8[D]", "=M8[D]", "M8[D]",
+ "M8[h]", "=M8[h]", "M8[h]",
+ "M8[m]", "=M8[m]", "M8[m]",
+ "M8[s]", "=M8[s]", "M8[s]",
+ "M8[ms]", "=M8[ms]", "M8[ms]",
+ "M8[us]", "=M8[us]", "M8[us]",
+ "M8[ns]", "=M8[ns]", "M8[ns]",
+ "M8[ps]", "=M8[ps]", "M8[ps]",
+ "M8[fs]", "=M8[fs]", "M8[fs]",
+ "M8[as]", "=M8[as]", "M8[as]",
+]
+_TD64Codes = Literal[
+ "timedelta64", "=timedelta64", "timedelta64",
+ "timedelta64[Y]", "=timedelta64[Y]", "timedelta64[Y]",
+ "timedelta64[M]", "=timedelta64[M]", "timedelta64[M]",
+ "timedelta64[W]", "=timedelta64[W]", "timedelta64[W]",
+ "timedelta64[D]", "=timedelta64[D]", "timedelta64[D]",
+ "timedelta64[h]", "=timedelta64[h]", "timedelta64[h]",
+ "timedelta64[m]", "=timedelta64[m]", "timedelta64[m]",
+ "timedelta64[s]", "=timedelta64[s]", "timedelta64[s]",
+ "timedelta64[ms]", "=timedelta64[ms]", "timedelta64[ms]",
+ "timedelta64[us]", "=timedelta64[us]", "timedelta64[us]",
+ "timedelta64[ns]", "=timedelta64[ns]", "timedelta64[ns]",
+ "timedelta64[ps]", "=timedelta64[ps]", "timedelta64[ps]",
+ "timedelta64[fs]", "=timedelta64[fs]", "timedelta64[fs]",
+ "timedelta64[as]", "=timedelta64[as]", "timedelta64[as]",
+ "m", "=m", "m",
+ "m8", "=m8", "m8",
+ "m8[Y]", "=m8[Y]", "m8[Y]",
+ "m8[M]", "=m8[M]", "m8[M]",
+ "m8[W]", "=m8[W]", "m8[W]",
+ "m8[D]", "=m8[D]", "m8[D]",
+ "m8[h]", "=m8[h]", "m8[h]",
+ "m8[m]", "=m8[m]", "m8[m]",
+ "m8[s]", "=m8[s]", "m8[s]",
+ "m8[ms]", "=m8[ms]", "m8[ms]",
+ "m8[us]", "=m8[us]", "m8[us]",
+ "m8[ns]", "=m8[ns]", "m8[ns]",
+ "m8[ps]", "=m8[ps]", "m8[ps]",
+ "m8[fs]", "=m8[fs]", "m8[fs]",
+ "m8[as]", "=m8[as]", "m8[as]",
+]
diff --git a/Lib/numpy/_typing/_dtype_like.py b/Lib/numpy/_typing/_dtype_like.py
new file mode 100644
index 00000000000000..e92e17dd20cf58
--- /dev/null
+++ b/Lib/numpy/_typing/_dtype_like.py
@@ -0,0 +1,249 @@
+from typing import (
+ Any,
+ List,
+ Sequence,
+ Tuple,
+ Union,
+ Type,
+ TypeVar,
+ Protocol,
+ TypedDict,
+ runtime_checkable,
+)
+
+import numpy as np
+
+from ._shape import _ShapeLike
+from ._generic_alias import _DType as DType
+
+from ._char_codes import (
+ _BoolCodes,
+ _UInt8Codes,
+ _UInt16Codes,
+ _UInt32Codes,
+ _UInt64Codes,
+ _Int8Codes,
+ _Int16Codes,
+ _Int32Codes,
+ _Int64Codes,
+ _Float16Codes,
+ _Float32Codes,
+ _Float64Codes,
+ _Complex64Codes,
+ _Complex128Codes,
+ _ByteCodes,
+ _ShortCodes,
+ _IntCCodes,
+ _IntPCodes,
+ _IntCodes,
+ _LongLongCodes,
+ _UByteCodes,
+ _UShortCodes,
+ _UIntCCodes,
+ _UIntPCodes,
+ _UIntCodes,
+ _ULongLongCodes,
+ _HalfCodes,
+ _SingleCodes,
+ _DoubleCodes,
+ _LongDoubleCodes,
+ _CSingleCodes,
+ _CDoubleCodes,
+ _CLongDoubleCodes,
+ _DT64Codes,
+ _TD64Codes,
+ _StrCodes,
+ _BytesCodes,
+ _VoidCodes,
+ _ObjectCodes,
+)
+
+_SCT = TypeVar("_SCT", bound=np.generic)
+_DType_co = TypeVar("_DType_co", covariant=True, bound=DType[Any])
+
+_DTypeLikeNested = Any # TODO: wait for support for recursive types
+
+
+# Mandatory keys
+class _DTypeDictBase(TypedDict):
+ names: Sequence[str]
+ formats: Sequence[_DTypeLikeNested]
+
+
+# Mandatory + optional keys
+class _DTypeDict(_DTypeDictBase, total=False):
+ # Only `str` elements are usable as indexing aliases,
+ # but `titles` can in principle accept any object
+ offsets: Sequence[int]
+ titles: Sequence[Any]
+ itemsize: int
+ aligned: bool
+
+
+# A protocol for anything with the dtype attribute
+@runtime_checkable
+class _SupportsDType(Protocol[_DType_co]):
+ @property
+ def dtype(self) -> _DType_co: ...
+
+
+# A subset of `npt.DTypeLike` that can be parametrized w.r.t. `np.generic`
+_DTypeLike = Union[
+ "np.dtype[_SCT]",
+ Type[_SCT],
+ _SupportsDType["np.dtype[_SCT]"],
+]
+
+
+# Would create a dtype[np.void]
+_VoidDTypeLike = Union[
+ # (flexible_dtype, itemsize)
+ Tuple[_DTypeLikeNested, int],
+ # (fixed_dtype, shape)
+ Tuple[_DTypeLikeNested, _ShapeLike],
+ # [(field_name, field_dtype, field_shape), ...]
+ #
+ # The type here is quite broad because NumPy accepts quite a wide
+ # range of inputs inside the list; see the tests for some
+ # examples.
+ List[Any],
+ # {'names': ..., 'formats': ..., 'offsets': ..., 'titles': ...,
+ # 'itemsize': ...}
+ _DTypeDict,
+ # (base_dtype, new_dtype)
+ Tuple[_DTypeLikeNested, _DTypeLikeNested],
+]
+
+# Anything that can be coerced into numpy.dtype.
+# Reference: https://docs.scipy.org/doc/numpy/reference/arrays.dtypes.html
+DTypeLike = Union[
+ DType[Any],
+ # default data type (float64)
+ None,
+ # array-scalar types and generic types
+ Type[Any], # NOTE: We're stuck with `Type[Any]` due to object dtypes
+ # anything with a dtype attribute
+ _SupportsDType[DType[Any]],
+ # character codes, type strings or comma-separated fields, e.g., 'float64'
+ str,
+ _VoidDTypeLike,
+]
+
+# NOTE: while it is possible to provide the dtype as a dict of
+# dtype-like objects (e.g. `{'field1': ..., 'field2': ..., ...}`),
+# this syntax is officially discourged and
+# therefore not included in the Union defining `DTypeLike`.
+#
+# See https://github.com/numpy/numpy/issues/16891 for more details.
+
+# Aliases for commonly used dtype-like objects.
+# Note that the precision of `np.number` subclasses is ignored herein.
+_DTypeLikeBool = Union[
+ Type[bool],
+ Type[np.bool_],
+ DType[np.bool_],
+ _SupportsDType[DType[np.bool_]],
+ _BoolCodes,
+]
+_DTypeLikeUInt = Union[
+ Type[np.unsignedinteger],
+ DType[np.unsignedinteger],
+ _SupportsDType[DType[np.unsignedinteger]],
+ _UInt8Codes,
+ _UInt16Codes,
+ _UInt32Codes,
+ _UInt64Codes,
+ _UByteCodes,
+ _UShortCodes,
+ _UIntCCodes,
+ _UIntPCodes,
+ _UIntCodes,
+ _ULongLongCodes,
+]
+_DTypeLikeInt = Union[
+ Type[int],
+ Type[np.signedinteger],
+ DType[np.signedinteger],
+ _SupportsDType[DType[np.signedinteger]],
+ _Int8Codes,
+ _Int16Codes,
+ _Int32Codes,
+ _Int64Codes,
+ _ByteCodes,
+ _ShortCodes,
+ _IntCCodes,
+ _IntPCodes,
+ _IntCodes,
+ _LongLongCodes,
+]
+_DTypeLikeFloat = Union[
+ Type[float],
+ Type[np.floating],
+ DType[np.floating],
+ _SupportsDType[DType[np.floating]],
+ _Float16Codes,
+ _Float32Codes,
+ _Float64Codes,
+ _HalfCodes,
+ _SingleCodes,
+ _DoubleCodes,
+ _LongDoubleCodes,
+]
+_DTypeLikeComplex = Union[
+ Type[complex],
+ Type[np.complexfloating],
+ DType[np.complexfloating],
+ _SupportsDType[DType[np.complexfloating]],
+ _Complex64Codes,
+ _Complex128Codes,
+ _CSingleCodes,
+ _CDoubleCodes,
+ _CLongDoubleCodes,
+]
+_DTypeLikeDT64 = Union[
+ Type[np.timedelta64],
+ DType[np.timedelta64],
+ _SupportsDType[DType[np.timedelta64]],
+ _TD64Codes,
+]
+_DTypeLikeTD64 = Union[
+ Type[np.datetime64],
+ DType[np.datetime64],
+ _SupportsDType[DType[np.datetime64]],
+ _DT64Codes,
+]
+_DTypeLikeStr = Union[
+ Type[str],
+ Type[np.str_],
+ DType[np.str_],
+ _SupportsDType[DType[np.str_]],
+ _StrCodes,
+]
+_DTypeLikeBytes = Union[
+ Type[bytes],
+ Type[np.bytes_],
+ DType[np.bytes_],
+ _SupportsDType[DType[np.bytes_]],
+ _BytesCodes,
+]
+_DTypeLikeVoid = Union[
+ Type[np.void],
+ DType[np.void],
+ _SupportsDType[DType[np.void]],
+ _VoidCodes,
+ _VoidDTypeLike,
+]
+_DTypeLikeObject = Union[
+ type,
+ DType[np.object_],
+ _SupportsDType[DType[np.object_]],
+ _ObjectCodes,
+]
+
+_DTypeLikeComplex_co = Union[
+ _DTypeLikeBool,
+ _DTypeLikeUInt,
+ _DTypeLikeInt,
+ _DTypeLikeFloat,
+ _DTypeLikeComplex,
+]
diff --git a/Lib/numpy/_typing/_extended_precision.py b/Lib/numpy/_typing/_extended_precision.py
new file mode 100644
index 00000000000000..edc1778ce16fed
--- /dev/null
+++ b/Lib/numpy/_typing/_extended_precision.py
@@ -0,0 +1,43 @@
+"""A module with platform-specific extended precision
+`numpy.number` subclasses.
+
+The subclasses are defined here (instead of ``__init__.pyi``) such
+that they can be imported conditionally via the numpy's mypy plugin.
+"""
+
+from typing import TYPE_CHECKING
+
+import numpy as np
+from . import (
+ _80Bit,
+ _96Bit,
+ _128Bit,
+ _256Bit,
+)
+
+if TYPE_CHECKING:
+ uint128 = np.unsignedinteger[_128Bit]
+ uint256 = np.unsignedinteger[_256Bit]
+ int128 = np.signedinteger[_128Bit]
+ int256 = np.signedinteger[_256Bit]
+ float80 = np.floating[_80Bit]
+ float96 = np.floating[_96Bit]
+ float128 = np.floating[_128Bit]
+ float256 = np.floating[_256Bit]
+ complex160 = np.complexfloating[_80Bit, _80Bit]
+ complex192 = np.complexfloating[_96Bit, _96Bit]
+ complex256 = np.complexfloating[_128Bit, _128Bit]
+ complex512 = np.complexfloating[_256Bit, _256Bit]
+else:
+ uint128 = Any
+ uint256 = Any
+ int128 = Any
+ int256 = Any
+ float80 = Any
+ float96 = Any
+ float128 = Any
+ float256 = Any
+ complex160 = Any
+ complex192 = Any
+ complex256 = Any
+ complex512 = Any
diff --git a/Lib/numpy/_typing/_generic_alias.py b/Lib/numpy/_typing/_generic_alias.py
new file mode 100644
index 00000000000000..01cd224adbe99e
--- /dev/null
+++ b/Lib/numpy/_typing/_generic_alias.py
@@ -0,0 +1,245 @@
+from __future__ import annotations
+
+import sys
+import types
+from collections.abc import Generator, Iterable, Iterator
+from typing import (
+ Any,
+ ClassVar,
+ NoReturn,
+ TypeVar,
+ TYPE_CHECKING,
+)
+
+import numpy as np
+
+__all__ = ["_GenericAlias", "NDArray"]
+
+_T = TypeVar("_T", bound="_GenericAlias")
+
+
+def _to_str(obj: object) -> str:
+ """Helper function for `_GenericAlias.__repr__`."""
+ if obj is Ellipsis:
+ return '...'
+ elif isinstance(obj, type) and not isinstance(obj, _GENERIC_ALIAS_TYPE):
+ if obj.__module__ == 'builtins':
+ return obj.__qualname__
+ else:
+ return f'{obj.__module__}.{obj.__qualname__}'
+ else:
+ return repr(obj)
+
+
+def _parse_parameters(args: Iterable[Any]) -> Generator[TypeVar, None, None]:
+ """Search for all typevars and typevar-containing objects in `args`.
+
+ Helper function for `_GenericAlias.__init__`.
+
+ """
+ for i in args:
+ if hasattr(i, "__parameters__"):
+ yield from i.__parameters__
+ elif isinstance(i, TypeVar):
+ yield i
+
+
+def _reconstruct_alias(alias: _T, parameters: Iterator[TypeVar]) -> _T:
+ """Recursively replace all typevars with those from `parameters`.
+
+ Helper function for `_GenericAlias.__getitem__`.
+
+ """
+ args = []
+ for i in alias.__args__:
+ if isinstance(i, TypeVar):
+ value: Any = next(parameters)
+ elif isinstance(i, _GenericAlias):
+ value = _reconstruct_alias(i, parameters)
+ elif hasattr(i, "__parameters__"):
+ prm_tup = tuple(next(parameters) for _ in i.__parameters__)
+ value = i[prm_tup]
+ else:
+ value = i
+ args.append(value)
+
+ cls = type(alias)
+ return cls(alias.__origin__, tuple(args), alias.__unpacked__)
+
+
+class _GenericAlias:
+ """A python-based backport of the `types.GenericAlias` class.
+
+ E.g. for ``t = list[int]``, ``t.__origin__`` is ``list`` and
+ ``t.__args__`` is ``(int,)``.
+
+ See Also
+ --------
+ :pep:`585`
+ The PEP responsible for introducing `types.GenericAlias`.
+
+ """
+
+ __slots__ = (
+ "__weakref__",
+ "_origin",
+ "_args",
+ "_parameters",
+ "_hash",
+ "_starred",
+ )
+
+ @property
+ def __origin__(self) -> type:
+ return super().__getattribute__("_origin")
+
+ @property
+ def __args__(self) -> tuple[object, ...]:
+ return super().__getattribute__("_args")
+
+ @property
+ def __parameters__(self) -> tuple[TypeVar, ...]:
+ """Type variables in the ``GenericAlias``."""
+ return super().__getattribute__("_parameters")
+
+ @property
+ def __unpacked__(self) -> bool:
+ return super().__getattribute__("_starred")
+
+ @property
+ def __typing_unpacked_tuple_args__(self) -> tuple[object, ...] | None:
+ # NOTE: This should return `__args__` if `__origin__` is a tuple,
+ # which should never be the case with how `_GenericAlias` is used
+ # within numpy
+ return None
+
+ def __init__(
+ self,
+ origin: type,
+ args: object | tuple[object, ...],
+ starred: bool = False,
+ ) -> None:
+ self._origin = origin
+ self._args = args if isinstance(args, tuple) else (args,)
+ self._parameters = tuple(_parse_parameters(self.__args__))
+ self._starred = starred
+
+ @property
+ def __call__(self) -> type[Any]:
+ return self.__origin__
+
+ def __reduce__(self: _T) -> tuple[
+ type[_T],
+ tuple[type[Any], tuple[object, ...], bool],
+ ]:
+ cls = type(self)
+ return cls, (self.__origin__, self.__args__, self.__unpacked__)
+
+ def __mro_entries__(self, bases: Iterable[object]) -> tuple[type[Any]]:
+ return (self.__origin__,)
+
+ def __dir__(self) -> list[str]:
+ """Implement ``dir(self)``."""
+ cls = type(self)
+ dir_origin = set(dir(self.__origin__))
+ return sorted(cls._ATTR_EXCEPTIONS | dir_origin)
+
+ def __hash__(self) -> int:
+ """Return ``hash(self)``."""
+ # Attempt to use the cached hash
+ try:
+ return super().__getattribute__("_hash")
+ except AttributeError:
+ self._hash: int = (
+ hash(self.__origin__) ^
+ hash(self.__args__) ^
+ hash(self.__unpacked__)
+ )
+ return super().__getattribute__("_hash")
+
+ def __instancecheck__(self, obj: object) -> NoReturn:
+ """Check if an `obj` is an instance."""
+ raise TypeError("isinstance() argument 2 cannot be a "
+ "parameterized generic")
+
+ def __subclasscheck__(self, cls: type) -> NoReturn:
+ """Check if a `cls` is a subclass."""
+ raise TypeError("issubclass() argument 2 cannot be a "
+ "parameterized generic")
+
+ def __repr__(self) -> str:
+ """Return ``repr(self)``."""
+ args = ", ".join(_to_str(i) for i in self.__args__)
+ origin = _to_str(self.__origin__)
+ prefix = "*" if self.__unpacked__ else ""
+ return f"{prefix}{origin}[{args}]"
+
+ def __getitem__(self: _T, key: object | tuple[object, ...]) -> _T:
+ """Return ``self[key]``."""
+ key_tup = key if isinstance(key, tuple) else (key,)
+
+ if len(self.__parameters__) == 0:
+ raise TypeError(f"There are no type variables left in {self}")
+ elif len(key_tup) > len(self.__parameters__):
+ raise TypeError(f"Too many arguments for {self}")
+ elif len(key_tup) < len(self.__parameters__):
+ raise TypeError(f"Too few arguments for {self}")
+
+ key_iter = iter(key_tup)
+ return _reconstruct_alias(self, key_iter)
+
+ def __eq__(self, value: object) -> bool:
+ """Return ``self == value``."""
+ if not isinstance(value, _GENERIC_ALIAS_TYPE):
+ return NotImplemented
+ return (
+ self.__origin__ == value.__origin__ and
+ self.__args__ == value.__args__ and
+ self.__unpacked__ == getattr(
+ value, "__unpacked__", self.__unpacked__
+ )
+ )
+
+ def __iter__(self: _T) -> Generator[_T, None, None]:
+ """Return ``iter(self)``."""
+ cls = type(self)
+ yield cls(self.__origin__, self.__args__, True)
+
+ _ATTR_EXCEPTIONS: ClassVar[frozenset[str]] = frozenset({
+ "__origin__",
+ "__args__",
+ "__parameters__",
+ "__mro_entries__",
+ "__reduce__",
+ "__reduce_ex__",
+ "__copy__",
+ "__deepcopy__",
+ "__unpacked__",
+ "__typing_unpacked_tuple_args__",
+ "__class__",
+ })
+
+ def __getattribute__(self, name: str) -> Any:
+ """Return ``getattr(self, name)``."""
+ # Pull the attribute from `__origin__` unless its
+ # name is in `_ATTR_EXCEPTIONS`
+ cls = type(self)
+ if name in cls._ATTR_EXCEPTIONS:
+ return super().__getattribute__(name)
+ return getattr(self.__origin__, name)
+
+
+# See `_GenericAlias.__eq__`
+if sys.version_info >= (3, 9):
+ _GENERIC_ALIAS_TYPE = (_GenericAlias, types.GenericAlias)
+else:
+ _GENERIC_ALIAS_TYPE = (_GenericAlias,)
+
+ScalarType = TypeVar("ScalarType", bound=np.generic, covariant=True)
+
+if TYPE_CHECKING or sys.version_info >= (3, 9):
+ _DType = np.dtype[ScalarType]
+ NDArray = np.ndarray[Any, np.dtype[ScalarType]]
+else:
+ _DType = _GenericAlias(np.dtype, (ScalarType,))
+ NDArray = _GenericAlias(np.ndarray, (Any, _DType))
diff --git a/Lib/numpy/_typing/_nbit.py b/Lib/numpy/_typing/_nbit.py
new file mode 100644
index 00000000000000..b8d35db4f5947f
--- /dev/null
+++ b/Lib/numpy/_typing/_nbit.py
@@ -0,0 +1,16 @@
+"""A module with the precisions of platform-specific `~numpy.number`s."""
+
+from typing import Any
+
+# To-be replaced with a `npt.NBitBase` subclass by numpy's mypy plugin
+_NBitByte = Any
+_NBitShort = Any
+_NBitIntC = Any
+_NBitIntP = Any
+_NBitInt = Any
+_NBitLongLong = Any
+
+_NBitHalf = Any
+_NBitSingle = Any
+_NBitDouble = Any
+_NBitLongDouble = Any
diff --git a/Lib/numpy/_typing/_nested_sequence.py b/Lib/numpy/_typing/_nested_sequence.py
new file mode 100644
index 00000000000000..789bf3844437e4
--- /dev/null
+++ b/Lib/numpy/_typing/_nested_sequence.py
@@ -0,0 +1,92 @@
+"""A module containing the `_NestedSequence` protocol."""
+
+from __future__ import annotations
+
+from typing import (
+ Any,
+ Iterator,
+ overload,
+ TypeVar,
+ Protocol,
+ runtime_checkable,
+)
+
+__all__ = ["_NestedSequence"]
+
+_T_co = TypeVar("_T_co", covariant=True)
+
+
+@runtime_checkable
+class _NestedSequence(Protocol[_T_co]):
+ """A protocol for representing nested sequences.
+
+ Warning
+ -------
+ `_NestedSequence` currently does not work in combination with typevars,
+ *e.g.* ``def func(a: _NestedSequnce[T]) -> T: ...``.
+
+ See Also
+ --------
+ collections.abc.Sequence
+ ABCs for read-only and mutable :term:`sequences`.
+
+ Examples
+ --------
+ .. code-block:: python
+
+ >>> from __future__ import annotations
+
+ >>> from typing import TYPE_CHECKING
+ >>> import numpy as np
+ >>> from numpy._typing import _NestedSequence
+
+ >>> def get_dtype(seq: _NestedSequence[float]) -> np.dtype[np.float64]:
+ ... return np.asarray(seq).dtype
+
+ >>> a = get_dtype([1.0])
+ >>> b = get_dtype([[1.0]])
+ >>> c = get_dtype([[[1.0]]])
+ >>> d = get_dtype([[[[1.0]]]])
+
+ >>> if TYPE_CHECKING:
+ ... reveal_locals()
+ ... # note: Revealed local types are:
+ ... # note: a: numpy.dtype[numpy.floating[numpy._typing._64Bit]]
+ ... # note: b: numpy.dtype[numpy.floating[numpy._typing._64Bit]]
+ ... # note: c: numpy.dtype[numpy.floating[numpy._typing._64Bit]]
+ ... # note: d: numpy.dtype[numpy.floating[numpy._typing._64Bit]]
+
+ """
+
+ def __len__(self, /) -> int:
+ """Implement ``len(self)``."""
+ raise NotImplementedError
+
+ @overload
+ def __getitem__(self, index: int, /) -> _T_co | _NestedSequence[_T_co]: ...
+ @overload
+ def __getitem__(self, index: slice, /) -> _NestedSequence[_T_co]: ...
+
+ def __getitem__(self, index, /):
+ """Implement ``self[x]``."""
+ raise NotImplementedError
+
+ def __contains__(self, x: object, /) -> bool:
+ """Implement ``x in self``."""
+ raise NotImplementedError
+
+ def __iter__(self, /) -> Iterator[_T_co | _NestedSequence[_T_co]]:
+ """Implement ``iter(self)``."""
+ raise NotImplementedError
+
+ def __reversed__(self, /) -> Iterator[_T_co | _NestedSequence[_T_co]]:
+ """Implement ``reversed(self)``."""
+ raise NotImplementedError
+
+ def count(self, value: Any, /) -> int:
+ """Return the number of occurrences of `value`."""
+ raise NotImplementedError
+
+ def index(self, value: Any, /) -> int:
+ """Return the first index of `value`."""
+ raise NotImplementedError
diff --git a/Lib/numpy/_typing/_scalars.py b/Lib/numpy/_typing/_scalars.py
new file mode 100644
index 00000000000000..516b996dc00740
--- /dev/null
+++ b/Lib/numpy/_typing/_scalars.py
@@ -0,0 +1,30 @@
+from typing import Union, Tuple, Any
+
+import numpy as np
+
+# NOTE: `_StrLike_co` and `_BytesLike_co` are pointless, as `np.str_` and
+# `np.bytes_` are already subclasses of their builtin counterpart
+
+_CharLike_co = Union[str, bytes]
+
+# The 6 `Like_co` type-aliases below represent all scalars that can be
+# coerced into `` (with the casting rule `same_kind`)
+_BoolLike_co = Union[bool, np.bool_]
+_UIntLike_co = Union[_BoolLike_co, np.unsignedinteger]
+_IntLike_co = Union[_BoolLike_co, int, np.integer]
+_FloatLike_co = Union[_IntLike_co, float, np.floating]
+_ComplexLike_co = Union[_FloatLike_co, complex, np.complexfloating]
+_TD64Like_co = Union[_IntLike_co, np.timedelta64]
+
+_NumberLike_co = Union[int, float, complex, np.number, np.bool_]
+_ScalarLike_co = Union[
+ int,
+ float,
+ complex,
+ str,
+ bytes,
+ np.generic,
+]
+
+# `_VoidLike_co` is technically not a scalar, but it's close enough
+_VoidLike_co = Union[Tuple[Any, ...], np.void]
diff --git a/Lib/numpy/_typing/_shape.py b/Lib/numpy/_typing/_shape.py
new file mode 100644
index 00000000000000..c28859b19bae50
--- /dev/null
+++ b/Lib/numpy/_typing/_shape.py
@@ -0,0 +1,6 @@
+from typing import Sequence, Tuple, Union, SupportsIndex
+
+_Shape = Tuple[int, ...]
+
+# Anything that can be coerced to a shape tuple
+_ShapeLike = Union[SupportsIndex, Sequence[SupportsIndex]]
diff --git a/Lib/numpy/_typing/_ufunc.pyi b/Lib/numpy/_typing/_ufunc.pyi
new file mode 100644
index 00000000000000..9f8e0d4edbfba4
--- /dev/null
+++ b/Lib/numpy/_typing/_ufunc.pyi
@@ -0,0 +1,445 @@
+"""A module with private type-check-only `numpy.ufunc` subclasses.
+
+The signatures of the ufuncs are too varied to reasonably type
+with a single class. So instead, `ufunc` has been expanded into
+four private subclasses, one for each combination of
+`~ufunc.nin` and `~ufunc.nout`.
+
+"""
+
+from typing import (
+ Any,
+ Generic,
+ overload,
+ TypeVar,
+ Literal,
+ SupportsIndex,
+ Protocol,
+)
+
+from numpy import ufunc, _CastingKind, _OrderKACF
+from numpy.typing import NDArray
+
+from ._shape import _ShapeLike
+from ._scalars import _ScalarLike_co
+from ._array_like import ArrayLike, _ArrayLikeBool_co, _ArrayLikeInt_co
+from ._dtype_like import DTypeLike
+
+_T = TypeVar("_T")
+_2Tuple = tuple[_T, _T]
+_3Tuple = tuple[_T, _T, _T]
+_4Tuple = tuple[_T, _T, _T, _T]
+
+_NTypes = TypeVar("_NTypes", bound=int)
+_IDType = TypeVar("_IDType", bound=Any)
+_NameType = TypeVar("_NameType", bound=str)
+
+
+class _SupportsArrayUFunc(Protocol):
+ def __array_ufunc__(
+ self,
+ ufunc: ufunc,
+ method: Literal["__call__", "reduce", "reduceat", "accumulate", "outer", "inner"],
+ *inputs: Any,
+ **kwargs: Any,
+ ) -> Any: ...
+
+
+# NOTE: In reality `extobj` should be a length of list 3 containing an
+# int, an int, and a callable, but there's no way to properly express
+# non-homogenous lists.
+# Use `Any` over `Union` to avoid issues related to lists invariance.
+
+# NOTE: `reduce`, `accumulate`, `reduceat` and `outer` raise a ValueError for
+# ufuncs that don't accept two input arguments and return one output argument.
+# In such cases the respective methods are simply typed as `None`.
+
+# NOTE: Similarly, `at` won't be defined for ufuncs that return
+# multiple outputs; in such cases `at` is typed as `None`
+
+# NOTE: If 2 output types are returned then `out` must be a
+# 2-tuple of arrays. Otherwise `None` or a plain array are also acceptable
+
+class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc]
+ @property
+ def __name__(self) -> _NameType: ...
+ @property
+ def ntypes(self) -> _NTypes: ...
+ @property
+ def identity(self) -> _IDType: ...
+ @property
+ def nin(self) -> Literal[1]: ...
+ @property
+ def nout(self) -> Literal[1]: ...
+ @property
+ def nargs(self) -> Literal[2]: ...
+ @property
+ def signature(self) -> None: ...
+ @property
+ def reduce(self) -> None: ...
+ @property
+ def accumulate(self) -> None: ...
+ @property
+ def reduceat(self) -> None: ...
+ @property
+ def outer(self) -> None: ...
+
+ @overload
+ def __call__(
+ self,
+ __x1: _ScalarLike_co,
+ out: None = ...,
+ *,
+ where: None | _ArrayLikeBool_co = ...,
+ casting: _CastingKind = ...,
+ order: _OrderKACF = ...,
+ dtype: DTypeLike = ...,
+ subok: bool = ...,
+ signature: str | _2Tuple[None | str] = ...,
+ extobj: list[Any] = ...,
+ ) -> Any: ...
+ @overload
+ def __call__(
+ self,
+ __x1: ArrayLike,
+ out: None | NDArray[Any] | tuple[NDArray[Any]] = ...,
+ *,
+ where: None | _ArrayLikeBool_co = ...,
+ casting: _CastingKind = ...,
+ order: _OrderKACF = ...,
+ dtype: DTypeLike = ...,
+ subok: bool = ...,
+ signature: str | _2Tuple[None | str] = ...,
+ extobj: list[Any] = ...,
+ ) -> NDArray[Any]: ...
+ @overload
+ def __call__(
+ self,
+ __x1: _SupportsArrayUFunc,
+ out: None | NDArray[Any] | tuple[NDArray[Any]] = ...,
+ *,
+ where: None | _ArrayLikeBool_co = ...,
+ casting: _CastingKind = ...,
+ order: _OrderKACF = ...,
+ dtype: DTypeLike = ...,
+ subok: bool = ...,
+ signature: str | _2Tuple[None | str] = ...,
+ extobj: list[Any] = ...,
+ ) -> Any: ...
+
+ def at(
+ self,
+ a: _SupportsArrayUFunc,
+ indices: _ArrayLikeInt_co,
+ /,
+ ) -> None: ...
+
+class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc]
+ @property
+ def __name__(self) -> _NameType: ...
+ @property
+ def ntypes(self) -> _NTypes: ...
+ @property
+ def identity(self) -> _IDType: ...
+ @property
+ def nin(self) -> Literal[2]: ...
+ @property
+ def nout(self) -> Literal[1]: ...
+ @property
+ def nargs(self) -> Literal[3]: ...
+ @property
+ def signature(self) -> None: ...
+
+ @overload
+ def __call__(
+ self,
+ __x1: _ScalarLike_co,
+ __x2: _ScalarLike_co,
+ out: None = ...,
+ *,
+ where: None | _ArrayLikeBool_co = ...,
+ casting: _CastingKind = ...,
+ order: _OrderKACF = ...,
+ dtype: DTypeLike = ...,
+ subok: bool = ...,
+ signature: str | _3Tuple[None | str] = ...,
+ extobj: list[Any] = ...,
+ ) -> Any: ...
+ @overload
+ def __call__(
+ self,
+ __x1: ArrayLike,
+ __x2: ArrayLike,
+ out: None | NDArray[Any] | tuple[NDArray[Any]] = ...,
+ *,
+ where: None | _ArrayLikeBool_co = ...,
+ casting: _CastingKind = ...,
+ order: _OrderKACF = ...,
+ dtype: DTypeLike = ...,
+ subok: bool = ...,
+ signature: str | _3Tuple[None | str] = ...,
+ extobj: list[Any] = ...,
+ ) -> NDArray[Any]: ...
+
+ def at(
+ self,
+ a: NDArray[Any],
+ indices: _ArrayLikeInt_co,
+ b: ArrayLike,
+ /,
+ ) -> None: ...
+
+ def reduce(
+ self,
+ array: ArrayLike,
+ axis: None | _ShapeLike = ...,
+ dtype: DTypeLike = ...,
+ out: None | NDArray[Any] = ...,
+ keepdims: bool = ...,
+ initial: Any = ...,
+ where: _ArrayLikeBool_co = ...,
+ ) -> Any: ...
+
+ def accumulate(
+ self,
+ array: ArrayLike,
+ axis: SupportsIndex = ...,
+ dtype: DTypeLike = ...,
+ out: None | NDArray[Any] = ...,
+ ) -> NDArray[Any]: ...
+
+ def reduceat(
+ self,
+ array: ArrayLike,
+ indices: _ArrayLikeInt_co,
+ axis: SupportsIndex = ...,
+ dtype: DTypeLike = ...,
+ out: None | NDArray[Any] = ...,
+ ) -> NDArray[Any]: ...
+
+ # Expand `**kwargs` into explicit keyword-only arguments
+ @overload
+ def outer(
+ self,
+ A: _ScalarLike_co,
+ B: _ScalarLike_co,
+ /, *,
+ out: None = ...,
+ where: None | _ArrayLikeBool_co = ...,
+ casting: _CastingKind = ...,
+ order: _OrderKACF = ...,
+ dtype: DTypeLike = ...,
+ subok: bool = ...,
+ signature: str | _3Tuple[None | str] = ...,
+ extobj: list[Any] = ...,
+ ) -> Any: ...
+ @overload
+ def outer( # type: ignore[misc]
+ self,
+ A: ArrayLike,
+ B: ArrayLike,
+ /, *,
+ out: None | NDArray[Any] | tuple[NDArray[Any]] = ...,
+ where: None | _ArrayLikeBool_co = ...,
+ casting: _CastingKind = ...,
+ order: _OrderKACF = ...,
+ dtype: DTypeLike = ...,
+ subok: bool = ...,
+ signature: str | _3Tuple[None | str] = ...,
+ extobj: list[Any] = ...,
+ ) -> NDArray[Any]: ...
+
+class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc]
+ @property
+ def __name__(self) -> _NameType: ...
+ @property
+ def ntypes(self) -> _NTypes: ...
+ @property
+ def identity(self) -> _IDType: ...
+ @property
+ def nin(self) -> Literal[1]: ...
+ @property
+ def nout(self) -> Literal[2]: ...
+ @property
+ def nargs(self) -> Literal[3]: ...
+ @property
+ def signature(self) -> None: ...
+ @property
+ def at(self) -> None: ...
+ @property
+ def reduce(self) -> None: ...
+ @property
+ def accumulate(self) -> None: ...
+ @property
+ def reduceat(self) -> None: ...
+ @property
+ def outer(self) -> None: ...
+
+ @overload
+ def __call__(
+ self,
+ __x1: _ScalarLike_co,
+ __out1: None = ...,
+ __out2: None = ...,
+ *,
+ where: None | _ArrayLikeBool_co = ...,
+ casting: _CastingKind = ...,
+ order: _OrderKACF = ...,
+ dtype: DTypeLike = ...,
+ subok: bool = ...,
+ signature: str | _3Tuple[None | str] = ...,
+ extobj: list[Any] = ...,
+ ) -> _2Tuple[Any]: ...
+ @overload
+ def __call__(
+ self,
+ __x1: ArrayLike,
+ __out1: None | NDArray[Any] = ...,
+ __out2: None | NDArray[Any] = ...,
+ *,
+ out: _2Tuple[NDArray[Any]] = ...,
+ where: None | _ArrayLikeBool_co = ...,
+ casting: _CastingKind = ...,
+ order: _OrderKACF = ...,
+ dtype: DTypeLike = ...,
+ subok: bool = ...,
+ signature: str | _3Tuple[None | str] = ...,
+ extobj: list[Any] = ...,
+ ) -> _2Tuple[NDArray[Any]]: ...
+ @overload
+ def __call__(
+ self,
+ __x1: _SupportsArrayUFunc,
+ __out1: None | NDArray[Any] = ...,
+ __out2: None | NDArray[Any] = ...,
+ *,
+ out: _2Tuple[NDArray[Any]] = ...,
+ where: None | _ArrayLikeBool_co = ...,
+ casting: _CastingKind = ...,
+ order: _OrderKACF = ...,
+ dtype: DTypeLike = ...,
+ subok: bool = ...,
+ signature: str | _3Tuple[None | str] = ...,
+ extobj: list[Any] = ...,
+ ) -> _2Tuple[Any]: ...
+
+class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc]
+ @property
+ def __name__(self) -> _NameType: ...
+ @property
+ def ntypes(self) -> _NTypes: ...
+ @property
+ def identity(self) -> _IDType: ...
+ @property
+ def nin(self) -> Literal[2]: ...
+ @property
+ def nout(self) -> Literal[2]: ...
+ @property
+ def nargs(self) -> Literal[4]: ...
+ @property
+ def signature(self) -> None: ...
+ @property
+ def at(self) -> None: ...
+ @property
+ def reduce(self) -> None: ...
+ @property
+ def accumulate(self) -> None: ...
+ @property
+ def reduceat(self) -> None: ...
+ @property
+ def outer(self) -> None: ...
+
+ @overload
+ def __call__(
+ self,
+ __x1: _ScalarLike_co,
+ __x2: _ScalarLike_co,
+ __out1: None = ...,
+ __out2: None = ...,
+ *,
+ where: None | _ArrayLikeBool_co = ...,
+ casting: _CastingKind = ...,
+ order: _OrderKACF = ...,
+ dtype: DTypeLike = ...,
+ subok: bool = ...,
+ signature: str | _4Tuple[None | str] = ...,
+ extobj: list[Any] = ...,
+ ) -> _2Tuple[Any]: ...
+ @overload
+ def __call__(
+ self,
+ __x1: ArrayLike,
+ __x2: ArrayLike,
+ __out1: None | NDArray[Any] = ...,
+ __out2: None | NDArray[Any] = ...,
+ *,
+ out: _2Tuple[NDArray[Any]] = ...,
+ where: None | _ArrayLikeBool_co = ...,
+ casting: _CastingKind = ...,
+ order: _OrderKACF = ...,
+ dtype: DTypeLike = ...,
+ subok: bool = ...,
+ signature: str | _4Tuple[None | str] = ...,
+ extobj: list[Any] = ...,
+ ) -> _2Tuple[NDArray[Any]]: ...
+
+class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc]
+ @property
+ def __name__(self) -> _NameType: ...
+ @property
+ def ntypes(self) -> _NTypes: ...
+ @property
+ def identity(self) -> _IDType: ...
+ @property
+ def nin(self) -> Literal[2]: ...
+ @property
+ def nout(self) -> Literal[1]: ...
+ @property
+ def nargs(self) -> Literal[3]: ...
+
+ # NOTE: In practice the only gufunc in the main namespace is `matmul`,
+ # so we can use its signature here
+ @property
+ def signature(self) -> Literal["(n?,k),(k,m?)->(n?,m?)"]: ...
+ @property
+ def reduce(self) -> None: ...
+ @property
+ def accumulate(self) -> None: ...
+ @property
+ def reduceat(self) -> None: ...
+ @property
+ def outer(self) -> None: ...
+ @property
+ def at(self) -> None: ...
+
+ # Scalar for 1D array-likes; ndarray otherwise
+ @overload
+ def __call__(
+ self,
+ __x1: ArrayLike,
+ __x2: ArrayLike,
+ out: None = ...,
+ *,
+ casting: _CastingKind = ...,
+ order: _OrderKACF = ...,
+ dtype: DTypeLike = ...,
+ subok: bool = ...,
+ signature: str | _3Tuple[None | str] = ...,
+ extobj: list[Any] = ...,
+ axes: list[_2Tuple[SupportsIndex]] = ...,
+ ) -> Any: ...
+ @overload
+ def __call__(
+ self,
+ __x1: ArrayLike,
+ __x2: ArrayLike,
+ out: NDArray[Any] | tuple[NDArray[Any]],
+ *,
+ casting: _CastingKind = ...,
+ order: _OrderKACF = ...,
+ dtype: DTypeLike = ...,
+ subok: bool = ...,
+ signature: str | _3Tuple[None | str] = ...,
+ extobj: list[Any] = ...,
+ axes: list[_2Tuple[SupportsIndex]] = ...,
+ ) -> NDArray[Any]: ...
diff --git a/Lib/numpy/_typing/setup.py b/Lib/numpy/_typing/setup.py
new file mode 100644
index 00000000000000..24022fdaa32708
--- /dev/null
+++ b/Lib/numpy/_typing/setup.py
@@ -0,0 +1,10 @@
+def configuration(parent_package='', top_path=None):
+ from numpy.distutils.misc_util import Configuration
+ config = Configuration('_typing', parent_package, top_path)
+ config.add_data_files('*.pyi')
+ return config
+
+
+if __name__ == '__main__':
+ from numpy.distutils.core import setup
+ setup(configuration=configuration)
diff --git a/Lib/numpy/_version.py b/Lib/numpy/_version.py
new file mode 100644
index 00000000000000..d0ec0c5829632d
--- /dev/null
+++ b/Lib/numpy/_version.py
@@ -0,0 +1,21 @@
+
+# This file was generated by 'versioneer.py' (0.26) from
+# revision-control system data, or from the parent directory name of an
+# unpacked source archive. Distribution tarballs contain a pre-generated copy
+# of this file.
+
+import json
+
+version_json = '''
+{
+ "date": "2023-02-05T11:25:52-0500",
+ "dirty": false,
+ "error": null,
+ "full-revisionid": "85f38ab180ece5290f64e8ddbd9cf06ad8fa4a5e",
+ "version": "1.24.2"
+}
+''' # END VERSION_JSON
+
+
+def get_versions():
+ return json.loads(version_json)
diff --git a/Lib/numpy/array_api/__init__.py b/Lib/numpy/array_api/__init__.py
new file mode 100644
index 00000000000000..5e58ee0a899872
--- /dev/null
+++ b/Lib/numpy/array_api/__init__.py
@@ -0,0 +1,377 @@
+"""
+A NumPy sub-namespace that conforms to the Python array API standard.
+
+This submodule accompanies NEP 47, which proposes its inclusion in NumPy. It
+is still considered experimental, and will issue a warning when imported.
+
+This is a proof-of-concept namespace that wraps the corresponding NumPy
+functions to give a conforming implementation of the Python array API standard
+(https://data-apis.github.io/array-api/latest/). The standard is currently in
+an RFC phase and comments on it are both welcome and encouraged. Comments
+should be made either at https://github.com/data-apis/array-api or at
+https://github.com/data-apis/consortium-feedback/discussions.
+
+NumPy already follows the proposed spec for the most part, so this module
+serves mostly as a thin wrapper around it. However, NumPy also implements a
+lot of behavior that is not included in the spec, so this serves as a
+restricted subset of the API. Only those functions that are part of the spec
+are included in this namespace, and all functions are given with the exact
+signature given in the spec, including the use of position-only arguments, and
+omitting any extra keyword arguments implemented by NumPy but not part of the
+spec. The behavior of some functions is also modified from the NumPy behavior
+to conform to the standard. Note that the underlying array object itself is
+wrapped in a wrapper Array() class, but is otherwise unchanged. This submodule
+is implemented in pure Python with no C extensions.
+
+The array API spec is designed as a "minimal API subset" and explicitly allows
+libraries to include behaviors not specified by it. But users of this module
+that intend to write portable code should be aware that only those behaviors
+that are listed in the spec are guaranteed to be implemented across libraries.
+Consequently, the NumPy implementation was chosen to be both conforming and
+minimal, so that users can use this implementation of the array API namespace
+and be sure that behaviors that it defines will be available in conforming
+namespaces from other libraries.
+
+A few notes about the current state of this submodule:
+
+- There is a test suite that tests modules against the array API standard at
+ https://github.com/data-apis/array-api-tests. The test suite is still a work
+ in progress, but the existing tests pass on this module, with a few
+ exceptions:
+
+ - DLPack support (see https://github.com/data-apis/array-api/pull/106) is
+ not included here, as it requires a full implementation in NumPy proper
+ first.
+
+ The test suite is not yet complete, and even the tests that exist are not
+ guaranteed to give a comprehensive coverage of the spec. Therefore, when
+ reviewing and using this submodule, you should refer to the standard
+ documents themselves. There are some tests in numpy.array_api.tests, but
+ they primarily focus on things that are not tested by the official array API
+ test suite.
+
+- There is a custom array object, numpy.array_api.Array, which is returned by
+ all functions in this module. All functions in the array API namespace
+ implicitly assume that they will only receive this object as input. The only
+ way to create instances of this object is to use one of the array creation
+ functions. It does not have a public constructor on the object itself. The
+ object is a small wrapper class around numpy.ndarray. The main purpose of it
+ is to restrict the namespace of the array object to only those dtypes and
+ only those methods that are required by the spec, as well as to limit/change
+ certain behavior that differs in the spec. In particular:
+
+ - The array API namespace does not have scalar objects, only 0-D arrays.
+ Operations on Array that would create a scalar in NumPy create a 0-D
+ array.
+
+ - Indexing: Only a subset of indices supported by NumPy are required by the
+ spec. The Array object restricts indexing to only allow those types of
+ indices that are required by the spec. See the docstring of the
+ numpy.array_api.Array._validate_indices helper function for more
+ information.
+
+ - Type promotion: Some type promotion rules are different in the spec. In
+ particular, the spec does not have any value-based casting. The spec also
+ does not require cross-kind casting, like integer -> floating-point. Only
+ those promotions that are explicitly required by the array API
+ specification are allowed in this module. See NEP 47 for more info.
+
+ - Functions do not automatically call asarray() on their input, and will not
+ work if the input type is not Array. The exception is array creation
+ functions, and Python operators on the Array object, which accept Python
+ scalars of the same type as the array dtype.
+
+- All functions include type annotations, corresponding to those given in the
+ spec (see _typing.py for definitions of some custom types). These do not
+ currently fully pass mypy due to some limitations in mypy.
+
+- Dtype objects are just the NumPy dtype objects, e.g., float64 =
+ np.dtype('float64'). The spec does not require any behavior on these dtype
+ objects other than that they be accessible by name and be comparable by
+ equality, but it was considered too much extra complexity to create custom
+ objects to represent dtypes.
+
+- All places where the implementations in this submodule are known to deviate
+ from their corresponding functions in NumPy are marked with "# Note:"
+ comments.
+
+Still TODO in this module are:
+
+- DLPack support for numpy.ndarray is still in progress. See
+ https://github.com/numpy/numpy/pull/19083.
+
+- The copy=False keyword argument to asarray() is not yet implemented. This
+ requires support in numpy.asarray() first.
+
+- Some functions are not yet fully tested in the array API test suite, and may
+ require updates that are not yet known until the tests are written.
+
+- The spec is still in an RFC phase and may still have minor updates, which
+ will need to be reflected here.
+
+- Complex number support in array API spec is planned but not yet finalized,
+ as are the fft extension and certain linear algebra functions such as eig
+ that require complex dtypes.
+
+"""
+
+import warnings
+
+warnings.warn(
+ "The numpy.array_api submodule is still experimental. See NEP 47.", stacklevel=2
+)
+
+__array_api_version__ = "2021.12"
+
+__all__ = ["__array_api_version__"]
+
+from ._constants import e, inf, nan, pi
+
+__all__ += ["e", "inf", "nan", "pi"]
+
+from ._creation_functions import (
+ asarray,
+ arange,
+ empty,
+ empty_like,
+ eye,
+ from_dlpack,
+ full,
+ full_like,
+ linspace,
+ meshgrid,
+ ones,
+ ones_like,
+ tril,
+ triu,
+ zeros,
+ zeros_like,
+)
+
+__all__ += [
+ "asarray",
+ "arange",
+ "empty",
+ "empty_like",
+ "eye",
+ "from_dlpack",
+ "full",
+ "full_like",
+ "linspace",
+ "meshgrid",
+ "ones",
+ "ones_like",
+ "tril",
+ "triu",
+ "zeros",
+ "zeros_like",
+]
+
+from ._data_type_functions import (
+ astype,
+ broadcast_arrays,
+ broadcast_to,
+ can_cast,
+ finfo,
+ iinfo,
+ result_type,
+)
+
+__all__ += [
+ "astype",
+ "broadcast_arrays",
+ "broadcast_to",
+ "can_cast",
+ "finfo",
+ "iinfo",
+ "result_type",
+]
+
+from ._dtypes import (
+ int8,
+ int16,
+ int32,
+ int64,
+ uint8,
+ uint16,
+ uint32,
+ uint64,
+ float32,
+ float64,
+ bool,
+)
+
+__all__ += [
+ "int8",
+ "int16",
+ "int32",
+ "int64",
+ "uint8",
+ "uint16",
+ "uint32",
+ "uint64",
+ "float32",
+ "float64",
+ "bool",
+]
+
+from ._elementwise_functions import (
+ abs,
+ acos,
+ acosh,
+ add,
+ asin,
+ asinh,
+ atan,
+ atan2,
+ atanh,
+ bitwise_and,
+ bitwise_left_shift,
+ bitwise_invert,
+ bitwise_or,
+ bitwise_right_shift,
+ bitwise_xor,
+ ceil,
+ cos,
+ cosh,
+ divide,
+ equal,
+ exp,
+ expm1,
+ floor,
+ floor_divide,
+ greater,
+ greater_equal,
+ isfinite,
+ isinf,
+ isnan,
+ less,
+ less_equal,
+ log,
+ log1p,
+ log2,
+ log10,
+ logaddexp,
+ logical_and,
+ logical_not,
+ logical_or,
+ logical_xor,
+ multiply,
+ negative,
+ not_equal,
+ positive,
+ pow,
+ remainder,
+ round,
+ sign,
+ sin,
+ sinh,
+ square,
+ sqrt,
+ subtract,
+ tan,
+ tanh,
+ trunc,
+)
+
+__all__ += [
+ "abs",
+ "acos",
+ "acosh",
+ "add",
+ "asin",
+ "asinh",
+ "atan",
+ "atan2",
+ "atanh",
+ "bitwise_and",
+ "bitwise_left_shift",
+ "bitwise_invert",
+ "bitwise_or",
+ "bitwise_right_shift",
+ "bitwise_xor",
+ "ceil",
+ "cos",
+ "cosh",
+ "divide",
+ "equal",
+ "exp",
+ "expm1",
+ "floor",
+ "floor_divide",
+ "greater",
+ "greater_equal",
+ "isfinite",
+ "isinf",
+ "isnan",
+ "less",
+ "less_equal",
+ "log",
+ "log1p",
+ "log2",
+ "log10",
+ "logaddexp",
+ "logical_and",
+ "logical_not",
+ "logical_or",
+ "logical_xor",
+ "multiply",
+ "negative",
+ "not_equal",
+ "positive",
+ "pow",
+ "remainder",
+ "round",
+ "sign",
+ "sin",
+ "sinh",
+ "square",
+ "sqrt",
+ "subtract",
+ "tan",
+ "tanh",
+ "trunc",
+]
+
+# linalg is an extension in the array API spec, which is a sub-namespace. Only
+# a subset of functions in it are imported into the top-level namespace.
+from . import linalg
+
+__all__ += ["linalg"]
+
+from .linalg import matmul, tensordot, matrix_transpose, vecdot
+
+__all__ += ["matmul", "tensordot", "matrix_transpose", "vecdot"]
+
+from ._manipulation_functions import (
+ concat,
+ expand_dims,
+ flip,
+ permute_dims,
+ reshape,
+ roll,
+ squeeze,
+ stack,
+)
+
+__all__ += ["concat", "expand_dims", "flip", "permute_dims", "reshape", "roll", "squeeze", "stack"]
+
+from ._searching_functions import argmax, argmin, nonzero, where
+
+__all__ += ["argmax", "argmin", "nonzero", "where"]
+
+from ._set_functions import unique_all, unique_counts, unique_inverse, unique_values
+
+__all__ += ["unique_all", "unique_counts", "unique_inverse", "unique_values"]
+
+from ._sorting_functions import argsort, sort
+
+__all__ += ["argsort", "sort"]
+
+from ._statistical_functions import max, mean, min, prod, std, sum, var
+
+__all__ += ["max", "mean", "min", "prod", "std", "sum", "var"]
+
+from ._utility_functions import all, any
+
+__all__ += ["all", "any"]
diff --git a/Lib/numpy/array_api/_array_object.py b/Lib/numpy/array_api/_array_object.py
new file mode 100644
index 00000000000000..c4746fad96e7c0
--- /dev/null
+++ b/Lib/numpy/array_api/_array_object.py
@@ -0,0 +1,1118 @@
+"""
+Wrapper class around the ndarray object for the array API standard.
+
+The array API standard defines some behaviors differently than ndarray, in
+particular, type promotion rules are different (the standard has no
+value-based casting). The standard also specifies a more limited subset of
+array methods and functionalities than are implemented on ndarray. Since the
+goal of the array_api namespace is to be a minimal implementation of the array
+API standard, we need to define a separate wrapper class for the array_api
+namespace.
+
+The standard compliant class is only a wrapper class. It is *not* a subclass
+of ndarray.
+"""
+
+from __future__ import annotations
+
+import operator
+from enum import IntEnum
+from ._creation_functions import asarray
+from ._dtypes import (
+ _all_dtypes,
+ _boolean_dtypes,
+ _integer_dtypes,
+ _integer_or_boolean_dtypes,
+ _floating_dtypes,
+ _numeric_dtypes,
+ _result_type,
+ _dtype_categories,
+)
+
+from typing import TYPE_CHECKING, Optional, Tuple, Union, Any, SupportsIndex
+import types
+
+if TYPE_CHECKING:
+ from ._typing import Any, PyCapsule, Device, Dtype
+ import numpy.typing as npt
+
+import numpy as np
+
+from numpy import array_api
+
+
+class Array:
+ """
+ n-d array object for the array API namespace.
+
+ See the docstring of :py:obj:`np.ndarray ` for more
+ information.
+
+ This is a wrapper around numpy.ndarray that restricts the usage to only
+ those things that are required by the array API namespace. Note,
+ attributes on this object that start with a single underscore are not part
+ of the API specification and should only be used internally. This object
+ should not be constructed directly. Rather, use one of the creation
+ functions, such as asarray().
+
+ """
+ _array: np.ndarray
+
+ # Use a custom constructor instead of __init__, as manually initializing
+ # this class is not supported API.
+ @classmethod
+ def _new(cls, x, /):
+ """
+ This is a private method for initializing the array API Array
+ object.
+
+ Functions outside of the array_api submodule should not use this
+ method. Use one of the creation functions instead, such as
+ ``asarray``.
+
+ """
+ obj = super().__new__(cls)
+ # Note: The spec does not have array scalars, only 0-D arrays.
+ if isinstance(x, np.generic):
+ # Convert the array scalar to a 0-D array
+ x = np.asarray(x)
+ if x.dtype not in _all_dtypes:
+ raise TypeError(
+ f"The array_api namespace does not support the dtype '{x.dtype}'"
+ )
+ obj._array = x
+ return obj
+
+ # Prevent Array() from working
+ def __new__(cls, *args, **kwargs):
+ raise TypeError(
+ "The array_api Array object should not be instantiated directly. Use an array creation function, such as asarray(), instead."
+ )
+
+ # These functions are not required by the spec, but are implemented for
+ # the sake of usability.
+
+ def __str__(self: Array, /) -> str:
+ """
+ Performs the operation __str__.
+ """
+ return self._array.__str__().replace("array", "Array")
+
+ def __repr__(self: Array, /) -> str:
+ """
+ Performs the operation __repr__.
+ """
+ suffix = f", dtype={self.dtype.name})"
+ if 0 in self.shape:
+ prefix = "empty("
+ mid = str(self.shape)
+ else:
+ prefix = "Array("
+ mid = np.array2string(self._array, separator=', ', prefix=prefix, suffix=suffix)
+ return prefix + mid + suffix
+
+ # This function is not required by the spec, but we implement it here for
+ # convenience so that np.asarray(np.array_api.Array) will work.
+ def __array__(self, dtype: None | np.dtype[Any] = None) -> npt.NDArray[Any]:
+ """
+ Warning: this method is NOT part of the array API spec. Implementers
+ of other libraries need not include it, and users should not assume it
+ will be present in other implementations.
+
+ """
+ return np.asarray(self._array, dtype=dtype)
+
+ # These are various helper functions to make the array behavior match the
+ # spec in places where it either deviates from or is more strict than
+ # NumPy behavior
+
+ def _check_allowed_dtypes(self, other: bool | int | float | Array, dtype_category: str, op: str) -> Array:
+ """
+ Helper function for operators to only allow specific input dtypes
+
+ Use like
+
+ other = self._check_allowed_dtypes(other, 'numeric', '__add__')
+ if other is NotImplemented:
+ return other
+ """
+
+ if self.dtype not in _dtype_categories[dtype_category]:
+ raise TypeError(f"Only {dtype_category} dtypes are allowed in {op}")
+ if isinstance(other, (int, float, bool)):
+ other = self._promote_scalar(other)
+ elif isinstance(other, Array):
+ if other.dtype not in _dtype_categories[dtype_category]:
+ raise TypeError(f"Only {dtype_category} dtypes are allowed in {op}")
+ else:
+ return NotImplemented
+
+ # This will raise TypeError for type combinations that are not allowed
+ # to promote in the spec (even if the NumPy array operator would
+ # promote them).
+ res_dtype = _result_type(self.dtype, other.dtype)
+ if op.startswith("__i"):
+ # Note: NumPy will allow in-place operators in some cases where
+ # the type promoted operator does not match the left-hand side
+ # operand. For example,
+
+ # >>> a = np.array(1, dtype=np.int8)
+ # >>> a += np.array(1, dtype=np.int16)
+
+ # The spec explicitly disallows this.
+ if res_dtype != self.dtype:
+ raise TypeError(
+ f"Cannot perform {op} with dtypes {self.dtype} and {other.dtype}"
+ )
+
+ return other
+
+ # Helper function to match the type promotion rules in the spec
+ def _promote_scalar(self, scalar):
+ """
+ Returns a promoted version of a Python scalar appropriate for use with
+ operations on self.
+
+ This may raise an OverflowError in cases where the scalar is an
+ integer that is too large to fit in a NumPy integer dtype, or
+ TypeError when the scalar type is incompatible with the dtype of self.
+ """
+ # Note: Only Python scalar types that match the array dtype are
+ # allowed.
+ if isinstance(scalar, bool):
+ if self.dtype not in _boolean_dtypes:
+ raise TypeError(
+ "Python bool scalars can only be promoted with bool arrays"
+ )
+ elif isinstance(scalar, int):
+ if self.dtype in _boolean_dtypes:
+ raise TypeError(
+ "Python int scalars cannot be promoted with bool arrays"
+ )
+ elif isinstance(scalar, float):
+ if self.dtype not in _floating_dtypes:
+ raise TypeError(
+ "Python float scalars can only be promoted with floating-point arrays."
+ )
+ else:
+ raise TypeError("'scalar' must be a Python scalar")
+
+ # Note: scalars are unconditionally cast to the same dtype as the
+ # array.
+
+ # Note: the spec only specifies integer-dtype/int promotion
+ # behavior for integers within the bounds of the integer dtype.
+ # Outside of those bounds we use the default NumPy behavior (either
+ # cast or raise OverflowError).
+ return Array._new(np.array(scalar, self.dtype))
+
+ @staticmethod
+ def _normalize_two_args(x1, x2) -> Tuple[Array, Array]:
+ """
+ Normalize inputs to two arg functions to fix type promotion rules
+
+ NumPy deviates from the spec type promotion rules in cases where one
+ argument is 0-dimensional and the other is not. For example:
+
+ >>> import numpy as np
+ >>> a = np.array([1.0], dtype=np.float32)
+ >>> b = np.array(1.0, dtype=np.float64)
+ >>> np.add(a, b) # The spec says this should be float64
+ array([2.], dtype=float32)
+
+ To fix this, we add a dimension to the 0-dimension array before passing it
+ through. This works because a dimension would be added anyway from
+ broadcasting, so the resulting shape is the same, but this prevents NumPy
+ from not promoting the dtype.
+ """
+ # Another option would be to use signature=(x1.dtype, x2.dtype, None),
+ # but that only works for ufuncs, so we would have to call the ufuncs
+ # directly in the operator methods. One should also note that this
+ # sort of trick wouldn't work for functions like searchsorted, which
+ # don't do normal broadcasting, but there aren't any functions like
+ # that in the array API namespace.
+ if x1.ndim == 0 and x2.ndim != 0:
+ # The _array[None] workaround was chosen because it is relatively
+ # performant. broadcast_to(x1._array, x2.shape) is much slower. We
+ # could also manually type promote x2, but that is more complicated
+ # and about the same performance as this.
+ x1 = Array._new(x1._array[None])
+ elif x2.ndim == 0 and x1.ndim != 0:
+ x2 = Array._new(x2._array[None])
+ return (x1, x2)
+
+ # Note: A large fraction of allowed indices are disallowed here (see the
+ # docstring below)
+ def _validate_index(self, key):
+ """
+ Validate an index according to the array API.
+
+ The array API specification only requires a subset of indices that are
+ supported by NumPy. This function will reject any index that is
+ allowed by NumPy but not required by the array API specification. We
+ always raise ``IndexError`` on such indices (the spec does not require
+ any specific behavior on them, but this makes the NumPy array API
+ namespace a minimal implementation of the spec). See
+ https://data-apis.org/array-api/latest/API_specification/indexing.html
+ for the full list of required indexing behavior
+
+ This function raises IndexError if the index ``key`` is invalid. It
+ only raises ``IndexError`` on indices that are not already rejected by
+ NumPy, as NumPy will already raise the appropriate error on such
+ indices. ``shape`` may be None, in which case, only cases that are
+ independent of the array shape are checked.
+
+ The following cases are allowed by NumPy, but not specified by the array
+ API specification:
+
+ - Indices to not include an implicit ellipsis at the end. That is,
+ every axis of an array must be explicitly indexed or an ellipsis
+ included. This behaviour is sometimes referred to as flat indexing.
+
+ - The start and stop of a slice may not be out of bounds. In
+ particular, for a slice ``i:j:k`` on an axis of size ``n``, only the
+ following are allowed:
+
+ - ``i`` or ``j`` omitted (``None``).
+ - ``-n <= i <= max(0, n - 1)``.
+ - For ``k > 0`` or ``k`` omitted (``None``), ``-n <= j <= n``.
+ - For ``k < 0``, ``-n - 1 <= j <= max(0, n - 1)``.
+
+ - Boolean array indices are not allowed as part of a larger tuple
+ index.
+
+ - Integer array indices are not allowed (with the exception of 0-D
+ arrays, which are treated the same as scalars).
+
+ Additionally, it should be noted that indices that would return a
+ scalar in NumPy will return a 0-D array. Array scalars are not allowed
+ in the specification, only 0-D arrays. This is done in the
+ ``Array._new`` constructor, not this function.
+
+ """
+ _key = key if isinstance(key, tuple) else (key,)
+ for i in _key:
+ if isinstance(i, bool) or not (
+ isinstance(i, SupportsIndex) # i.e. ints
+ or isinstance(i, slice)
+ or i == Ellipsis
+ or i is None
+ or isinstance(i, Array)
+ or isinstance(i, np.ndarray)
+ ):
+ raise IndexError(
+ f"Single-axes index {i} has {type(i)=}, but only "
+ "integers, slices (:), ellipsis (...), newaxis (None), "
+ "zero-dimensional integer arrays and boolean arrays "
+ "are specified in the Array API."
+ )
+
+ nonexpanding_key = []
+ single_axes = []
+ n_ellipsis = 0
+ key_has_mask = False
+ for i in _key:
+ if i is not None:
+ nonexpanding_key.append(i)
+ if isinstance(i, Array) or isinstance(i, np.ndarray):
+ if i.dtype in _boolean_dtypes:
+ key_has_mask = True
+ single_axes.append(i)
+ else:
+ # i must not be an array here, to avoid elementwise equals
+ if i == Ellipsis:
+ n_ellipsis += 1
+ else:
+ single_axes.append(i)
+
+ n_single_axes = len(single_axes)
+ if n_ellipsis > 1:
+ return # handled by ndarray
+ elif n_ellipsis == 0:
+ # Note boolean masks must be the sole index, which we check for
+ # later on.
+ if not key_has_mask and n_single_axes < self.ndim:
+ raise IndexError(
+ f"{self.ndim=}, but the multi-axes index only specifies "
+ f"{n_single_axes} dimensions. If this was intentional, "
+ "add a trailing ellipsis (...) which expands into as many "
+ "slices (:) as necessary - this is what np.ndarray arrays "
+ "implicitly do, but such flat indexing behaviour is not "
+ "specified in the Array API."
+ )
+
+ if n_ellipsis == 0:
+ indexed_shape = self.shape
+ else:
+ ellipsis_start = None
+ for pos, i in enumerate(nonexpanding_key):
+ if not (isinstance(i, Array) or isinstance(i, np.ndarray)):
+ if i == Ellipsis:
+ ellipsis_start = pos
+ break
+ assert ellipsis_start is not None # sanity check
+ ellipsis_end = self.ndim - (n_single_axes - ellipsis_start)
+ indexed_shape = (
+ self.shape[:ellipsis_start] + self.shape[ellipsis_end:]
+ )
+ for i, side in zip(single_axes, indexed_shape):
+ if isinstance(i, slice):
+ if side == 0:
+ f_range = "0 (or None)"
+ else:
+ f_range = f"between -{side} and {side - 1} (or None)"
+ if i.start is not None:
+ try:
+ start = operator.index(i.start)
+ except TypeError:
+ pass # handled by ndarray
+ else:
+ if not (-side <= start <= side):
+ raise IndexError(
+ f"Slice {i} contains {start=}, but should be "
+ f"{f_range} for an axis of size {side} "
+ "(out-of-bounds starts are not specified in "
+ "the Array API)"
+ )
+ if i.stop is not None:
+ try:
+ stop = operator.index(i.stop)
+ except TypeError:
+ pass # handled by ndarray
+ else:
+ if not (-side <= stop <= side):
+ raise IndexError(
+ f"Slice {i} contains {stop=}, but should be "
+ f"{f_range} for an axis of size {side} "
+ "(out-of-bounds stops are not specified in "
+ "the Array API)"
+ )
+ elif isinstance(i, Array):
+ if i.dtype in _boolean_dtypes and len(_key) != 1:
+ assert isinstance(key, tuple) # sanity check
+ raise IndexError(
+ f"Single-axes index {i} is a boolean array and "
+ f"{len(key)=}, but masking is only specified in the "
+ "Array API when the array is the sole index."
+ )
+ elif i.dtype in _integer_dtypes and i.ndim != 0:
+ raise IndexError(
+ f"Single-axes index {i} is a non-zero-dimensional "
+ "integer array, but advanced integer indexing is not "
+ "specified in the Array API."
+ )
+ elif isinstance(i, tuple):
+ raise IndexError(
+ f"Single-axes index {i} is a tuple, but nested tuple "
+ "indices are not specified in the Array API."
+ )
+
+ # Everything below this line is required by the spec.
+
+ def __abs__(self: Array, /) -> Array:
+ """
+ Performs the operation __abs__.
+ """
+ if self.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in __abs__")
+ res = self._array.__abs__()
+ return self.__class__._new(res)
+
+ def __add__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __add__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__add__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__add__(other._array)
+ return self.__class__._new(res)
+
+ def __and__(self: Array, other: Union[int, bool, Array], /) -> Array:
+ """
+ Performs the operation __and__.
+ """
+ other = self._check_allowed_dtypes(other, "integer or boolean", "__and__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__and__(other._array)
+ return self.__class__._new(res)
+
+ def __array_namespace__(
+ self: Array, /, *, api_version: Optional[str] = None
+ ) -> types.ModuleType:
+ if api_version is not None and not api_version.startswith("2021."):
+ raise ValueError(f"Unrecognized array API version: {api_version!r}")
+ return array_api
+
+ def __bool__(self: Array, /) -> bool:
+ """
+ Performs the operation __bool__.
+ """
+ # Note: This is an error here.
+ if self._array.ndim != 0:
+ raise TypeError("bool is only allowed on arrays with 0 dimensions")
+ if self.dtype not in _boolean_dtypes:
+ raise ValueError("bool is only allowed on boolean arrays")
+ res = self._array.__bool__()
+ return res
+
+ def __dlpack__(self: Array, /, *, stream: None = None) -> PyCapsule:
+ """
+ Performs the operation __dlpack__.
+ """
+ return self._array.__dlpack__(stream=stream)
+
+ def __dlpack_device__(self: Array, /) -> Tuple[IntEnum, int]:
+ """
+ Performs the operation __dlpack_device__.
+ """
+ # Note: device support is required for this
+ return self._array.__dlpack_device__()
+
+ def __eq__(self: Array, other: Union[int, float, bool, Array], /) -> Array:
+ """
+ Performs the operation __eq__.
+ """
+ # Even though "all" dtypes are allowed, we still require them to be
+ # promotable with each other.
+ other = self._check_allowed_dtypes(other, "all", "__eq__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__eq__(other._array)
+ return self.__class__._new(res)
+
+ def __float__(self: Array, /) -> float:
+ """
+ Performs the operation __float__.
+ """
+ # Note: This is an error here.
+ if self._array.ndim != 0:
+ raise TypeError("float is only allowed on arrays with 0 dimensions")
+ if self.dtype not in _floating_dtypes:
+ raise ValueError("float is only allowed on floating-point arrays")
+ res = self._array.__float__()
+ return res
+
+ def __floordiv__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __floordiv__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__floordiv__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__floordiv__(other._array)
+ return self.__class__._new(res)
+
+ def __ge__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __ge__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__ge__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__ge__(other._array)
+ return self.__class__._new(res)
+
+ def __getitem__(
+ self: Array,
+ key: Union[
+ int, slice, ellipsis, Tuple[Union[int, slice, ellipsis], ...], Array
+ ],
+ /,
+ ) -> Array:
+ """
+ Performs the operation __getitem__.
+ """
+ # Note: Only indices required by the spec are allowed. See the
+ # docstring of _validate_index
+ self._validate_index(key)
+ if isinstance(key, Array):
+ # Indexing self._array with array_api arrays can be erroneous
+ key = key._array
+ res = self._array.__getitem__(key)
+ return self._new(res)
+
+ def __gt__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __gt__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__gt__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__gt__(other._array)
+ return self.__class__._new(res)
+
+ def __int__(self: Array, /) -> int:
+ """
+ Performs the operation __int__.
+ """
+ # Note: This is an error here.
+ if self._array.ndim != 0:
+ raise TypeError("int is only allowed on arrays with 0 dimensions")
+ if self.dtype not in _integer_dtypes:
+ raise ValueError("int is only allowed on integer arrays")
+ res = self._array.__int__()
+ return res
+
+ def __index__(self: Array, /) -> int:
+ """
+ Performs the operation __index__.
+ """
+ res = self._array.__index__()
+ return res
+
+ def __invert__(self: Array, /) -> Array:
+ """
+ Performs the operation __invert__.
+ """
+ if self.dtype not in _integer_or_boolean_dtypes:
+ raise TypeError("Only integer or boolean dtypes are allowed in __invert__")
+ res = self._array.__invert__()
+ return self.__class__._new(res)
+
+ def __le__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __le__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__le__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__le__(other._array)
+ return self.__class__._new(res)
+
+ def __lshift__(self: Array, other: Union[int, Array], /) -> Array:
+ """
+ Performs the operation __lshift__.
+ """
+ other = self._check_allowed_dtypes(other, "integer", "__lshift__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__lshift__(other._array)
+ return self.__class__._new(res)
+
+ def __lt__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __lt__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__lt__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__lt__(other._array)
+ return self.__class__._new(res)
+
+ def __matmul__(self: Array, other: Array, /) -> Array:
+ """
+ Performs the operation __matmul__.
+ """
+ # matmul is not defined for scalars, but without this, we may get
+ # the wrong error message from asarray.
+ other = self._check_allowed_dtypes(other, "numeric", "__matmul__")
+ if other is NotImplemented:
+ return other
+ res = self._array.__matmul__(other._array)
+ return self.__class__._new(res)
+
+ def __mod__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __mod__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__mod__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__mod__(other._array)
+ return self.__class__._new(res)
+
+ def __mul__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __mul__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__mul__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__mul__(other._array)
+ return self.__class__._new(res)
+
+ def __ne__(self: Array, other: Union[int, float, bool, Array], /) -> Array:
+ """
+ Performs the operation __ne__.
+ """
+ other = self._check_allowed_dtypes(other, "all", "__ne__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__ne__(other._array)
+ return self.__class__._new(res)
+
+ def __neg__(self: Array, /) -> Array:
+ """
+ Performs the operation __neg__.
+ """
+ if self.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in __neg__")
+ res = self._array.__neg__()
+ return self.__class__._new(res)
+
+ def __or__(self: Array, other: Union[int, bool, Array], /) -> Array:
+ """
+ Performs the operation __or__.
+ """
+ other = self._check_allowed_dtypes(other, "integer or boolean", "__or__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__or__(other._array)
+ return self.__class__._new(res)
+
+ def __pos__(self: Array, /) -> Array:
+ """
+ Performs the operation __pos__.
+ """
+ if self.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in __pos__")
+ res = self._array.__pos__()
+ return self.__class__._new(res)
+
+ def __pow__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __pow__.
+ """
+ from ._elementwise_functions import pow
+
+ other = self._check_allowed_dtypes(other, "numeric", "__pow__")
+ if other is NotImplemented:
+ return other
+ # Note: NumPy's __pow__ does not follow type promotion rules for 0-d
+ # arrays, so we use pow() here instead.
+ return pow(self, other)
+
+ def __rshift__(self: Array, other: Union[int, Array], /) -> Array:
+ """
+ Performs the operation __rshift__.
+ """
+ other = self._check_allowed_dtypes(other, "integer", "__rshift__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rshift__(other._array)
+ return self.__class__._new(res)
+
+ def __setitem__(
+ self,
+ key: Union[
+ int, slice, ellipsis, Tuple[Union[int, slice, ellipsis], ...], Array
+ ],
+ value: Union[int, float, bool, Array],
+ /,
+ ) -> None:
+ """
+ Performs the operation __setitem__.
+ """
+ # Note: Only indices required by the spec are allowed. See the
+ # docstring of _validate_index
+ self._validate_index(key)
+ if isinstance(key, Array):
+ # Indexing self._array with array_api arrays can be erroneous
+ key = key._array
+ self._array.__setitem__(key, asarray(value)._array)
+
+ def __sub__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __sub__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__sub__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__sub__(other._array)
+ return self.__class__._new(res)
+
+ # PEP 484 requires int to be a subtype of float, but __truediv__ should
+ # not accept int.
+ def __truediv__(self: Array, other: Union[float, Array], /) -> Array:
+ """
+ Performs the operation __truediv__.
+ """
+ other = self._check_allowed_dtypes(other, "floating-point", "__truediv__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__truediv__(other._array)
+ return self.__class__._new(res)
+
+ def __xor__(self: Array, other: Union[int, bool, Array], /) -> Array:
+ """
+ Performs the operation __xor__.
+ """
+ other = self._check_allowed_dtypes(other, "integer or boolean", "__xor__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__xor__(other._array)
+ return self.__class__._new(res)
+
+ def __iadd__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __iadd__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__iadd__")
+ if other is NotImplemented:
+ return other
+ self._array.__iadd__(other._array)
+ return self
+
+ def __radd__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __radd__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__radd__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__radd__(other._array)
+ return self.__class__._new(res)
+
+ def __iand__(self: Array, other: Union[int, bool, Array], /) -> Array:
+ """
+ Performs the operation __iand__.
+ """
+ other = self._check_allowed_dtypes(other, "integer or boolean", "__iand__")
+ if other is NotImplemented:
+ return other
+ self._array.__iand__(other._array)
+ return self
+
+ def __rand__(self: Array, other: Union[int, bool, Array], /) -> Array:
+ """
+ Performs the operation __rand__.
+ """
+ other = self._check_allowed_dtypes(other, "integer or boolean", "__rand__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rand__(other._array)
+ return self.__class__._new(res)
+
+ def __ifloordiv__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __ifloordiv__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__ifloordiv__")
+ if other is NotImplemented:
+ return other
+ self._array.__ifloordiv__(other._array)
+ return self
+
+ def __rfloordiv__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __rfloordiv__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__rfloordiv__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rfloordiv__(other._array)
+ return self.__class__._new(res)
+
+ def __ilshift__(self: Array, other: Union[int, Array], /) -> Array:
+ """
+ Performs the operation __ilshift__.
+ """
+ other = self._check_allowed_dtypes(other, "integer", "__ilshift__")
+ if other is NotImplemented:
+ return other
+ self._array.__ilshift__(other._array)
+ return self
+
+ def __rlshift__(self: Array, other: Union[int, Array], /) -> Array:
+ """
+ Performs the operation __rlshift__.
+ """
+ other = self._check_allowed_dtypes(other, "integer", "__rlshift__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rlshift__(other._array)
+ return self.__class__._new(res)
+
+ def __imatmul__(self: Array, other: Array, /) -> Array:
+ """
+ Performs the operation __imatmul__.
+ """
+ # Note: NumPy does not implement __imatmul__.
+
+ # matmul is not defined for scalars, but without this, we may get
+ # the wrong error message from asarray.
+ other = self._check_allowed_dtypes(other, "numeric", "__imatmul__")
+ if other is NotImplemented:
+ return other
+
+ # __imatmul__ can only be allowed when it would not change the shape
+ # of self.
+ other_shape = other.shape
+ if self.shape == () or other_shape == ():
+ raise ValueError("@= requires at least one dimension")
+ if len(other_shape) == 1 or other_shape[-1] != other_shape[-2]:
+ raise ValueError("@= cannot change the shape of the input array")
+ self._array[:] = self._array.__matmul__(other._array)
+ return self
+
+ def __rmatmul__(self: Array, other: Array, /) -> Array:
+ """
+ Performs the operation __rmatmul__.
+ """
+ # matmul is not defined for scalars, but without this, we may get
+ # the wrong error message from asarray.
+ other = self._check_allowed_dtypes(other, "numeric", "__rmatmul__")
+ if other is NotImplemented:
+ return other
+ res = self._array.__rmatmul__(other._array)
+ return self.__class__._new(res)
+
+ def __imod__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __imod__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__imod__")
+ if other is NotImplemented:
+ return other
+ self._array.__imod__(other._array)
+ return self
+
+ def __rmod__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __rmod__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__rmod__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rmod__(other._array)
+ return self.__class__._new(res)
+
+ def __imul__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __imul__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__imul__")
+ if other is NotImplemented:
+ return other
+ self._array.__imul__(other._array)
+ return self
+
+ def __rmul__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __rmul__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__rmul__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rmul__(other._array)
+ return self.__class__._new(res)
+
+ def __ior__(self: Array, other: Union[int, bool, Array], /) -> Array:
+ """
+ Performs the operation __ior__.
+ """
+ other = self._check_allowed_dtypes(other, "integer or boolean", "__ior__")
+ if other is NotImplemented:
+ return other
+ self._array.__ior__(other._array)
+ return self
+
+ def __ror__(self: Array, other: Union[int, bool, Array], /) -> Array:
+ """
+ Performs the operation __ror__.
+ """
+ other = self._check_allowed_dtypes(other, "integer or boolean", "__ror__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__ror__(other._array)
+ return self.__class__._new(res)
+
+ def __ipow__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __ipow__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__ipow__")
+ if other is NotImplemented:
+ return other
+ self._array.__ipow__(other._array)
+ return self
+
+ def __rpow__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __rpow__.
+ """
+ from ._elementwise_functions import pow
+
+ other = self._check_allowed_dtypes(other, "numeric", "__rpow__")
+ if other is NotImplemented:
+ return other
+ # Note: NumPy's __pow__ does not follow the spec type promotion rules
+ # for 0-d arrays, so we use pow() here instead.
+ return pow(other, self)
+
+ def __irshift__(self: Array, other: Union[int, Array], /) -> Array:
+ """
+ Performs the operation __irshift__.
+ """
+ other = self._check_allowed_dtypes(other, "integer", "__irshift__")
+ if other is NotImplemented:
+ return other
+ self._array.__irshift__(other._array)
+ return self
+
+ def __rrshift__(self: Array, other: Union[int, Array], /) -> Array:
+ """
+ Performs the operation __rrshift__.
+ """
+ other = self._check_allowed_dtypes(other, "integer", "__rrshift__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rrshift__(other._array)
+ return self.__class__._new(res)
+
+ def __isub__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __isub__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__isub__")
+ if other is NotImplemented:
+ return other
+ self._array.__isub__(other._array)
+ return self
+
+ def __rsub__(self: Array, other: Union[int, float, Array], /) -> Array:
+ """
+ Performs the operation __rsub__.
+ """
+ other = self._check_allowed_dtypes(other, "numeric", "__rsub__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rsub__(other._array)
+ return self.__class__._new(res)
+
+ def __itruediv__(self: Array, other: Union[float, Array], /) -> Array:
+ """
+ Performs the operation __itruediv__.
+ """
+ other = self._check_allowed_dtypes(other, "floating-point", "__itruediv__")
+ if other is NotImplemented:
+ return other
+ self._array.__itruediv__(other._array)
+ return self
+
+ def __rtruediv__(self: Array, other: Union[float, Array], /) -> Array:
+ """
+ Performs the operation __rtruediv__.
+ """
+ other = self._check_allowed_dtypes(other, "floating-point", "__rtruediv__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rtruediv__(other._array)
+ return self.__class__._new(res)
+
+ def __ixor__(self: Array, other: Union[int, bool, Array], /) -> Array:
+ """
+ Performs the operation __ixor__.
+ """
+ other = self._check_allowed_dtypes(other, "integer or boolean", "__ixor__")
+ if other is NotImplemented:
+ return other
+ self._array.__ixor__(other._array)
+ return self
+
+ def __rxor__(self: Array, other: Union[int, bool, Array], /) -> Array:
+ """
+ Performs the operation __rxor__.
+ """
+ other = self._check_allowed_dtypes(other, "integer or boolean", "__rxor__")
+ if other is NotImplemented:
+ return other
+ self, other = self._normalize_two_args(self, other)
+ res = self._array.__rxor__(other._array)
+ return self.__class__._new(res)
+
+ def to_device(self: Array, device: Device, /, stream: None = None) -> Array:
+ if stream is not None:
+ raise ValueError("The stream argument to to_device() is not supported")
+ if device == 'cpu':
+ return self
+ raise ValueError(f"Unsupported device {device!r}")
+
+ @property
+ def dtype(self) -> Dtype:
+ """
+ Array API compatible wrapper for :py:meth:`np.ndarray.dtype `.
+
+ See its docstring for more information.
+ """
+ return self._array.dtype
+
+ @property
+ def device(self) -> Device:
+ return "cpu"
+
+ # Note: mT is new in array API spec (see matrix_transpose)
+ @property
+ def mT(self) -> Array:
+ from .linalg import matrix_transpose
+ return matrix_transpose(self)
+
+ @property
+ def ndim(self) -> int:
+ """
+ Array API compatible wrapper for :py:meth:`np.ndarray.ndim `.
+
+ See its docstring for more information.
+ """
+ return self._array.ndim
+
+ @property
+ def shape(self) -> Tuple[int, ...]:
+ """
+ Array API compatible wrapper for :py:meth:`np.ndarray.shape `.
+
+ See its docstring for more information.
+ """
+ return self._array.shape
+
+ @property
+ def size(self) -> int:
+ """
+ Array API compatible wrapper for :py:meth:`np.ndarray.size `.
+
+ See its docstring for more information.
+ """
+ return self._array.size
+
+ @property
+ def T(self) -> Array:
+ """
+ Array API compatible wrapper for :py:meth:`np.ndarray.T `.
+
+ See its docstring for more information.
+ """
+ # Note: T only works on 2-dimensional arrays. See the corresponding
+ # note in the specification:
+ # https://data-apis.org/array-api/latest/API_specification/array_object.html#t
+ if self.ndim != 2:
+ raise ValueError("x.T requires x to have 2 dimensions. Use x.mT to transpose stacks of matrices and permute_dims() to permute dimensions.")
+ return self.__class__._new(self._array.T)
diff --git a/Lib/numpy/array_api/_constants.py b/Lib/numpy/array_api/_constants.py
new file mode 100644
index 00000000000000..9541941e7c6f18
--- /dev/null
+++ b/Lib/numpy/array_api/_constants.py
@@ -0,0 +1,6 @@
+import numpy as np
+
+e = np.e
+inf = np.inf
+nan = np.nan
+pi = np.pi
diff --git a/Lib/numpy/array_api/_creation_functions.py b/Lib/numpy/array_api/_creation_functions.py
new file mode 100644
index 00000000000000..3b014d37b2d64a
--- /dev/null
+++ b/Lib/numpy/array_api/_creation_functions.py
@@ -0,0 +1,351 @@
+from __future__ import annotations
+
+
+from typing import TYPE_CHECKING, List, Optional, Tuple, Union
+
+if TYPE_CHECKING:
+ from ._typing import (
+ Array,
+ Device,
+ Dtype,
+ NestedSequence,
+ SupportsBufferProtocol,
+ )
+ from collections.abc import Sequence
+from ._dtypes import _all_dtypes
+
+import numpy as np
+
+
+def _check_valid_dtype(dtype):
+ # Note: Only spelling dtypes as the dtype objects is supported.
+
+ # We use this instead of "dtype in _all_dtypes" because the dtype objects
+ # define equality with the sorts of things we want to disallow.
+ for d in (None,) + _all_dtypes:
+ if dtype is d:
+ return
+ raise ValueError("dtype must be one of the supported dtypes")
+
+
+def asarray(
+ obj: Union[
+ Array,
+ bool,
+ int,
+ float,
+ NestedSequence[bool | int | float],
+ SupportsBufferProtocol,
+ ],
+ /,
+ *,
+ dtype: Optional[Dtype] = None,
+ device: Optional[Device] = None,
+ copy: Optional[Union[bool, np._CopyMode]] = None,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.asarray `.
+
+ See its docstring for more information.
+ """
+ # _array_object imports in this file are inside the functions to avoid
+ # circular imports
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ if copy in (False, np._CopyMode.IF_NEEDED):
+ # Note: copy=False is not yet implemented in np.asarray
+ raise NotImplementedError("copy=False is not yet implemented")
+ if isinstance(obj, Array):
+ if dtype is not None and obj.dtype != dtype:
+ copy = True
+ if copy in (True, np._CopyMode.ALWAYS):
+ return Array._new(np.array(obj._array, copy=True, dtype=dtype))
+ return obj
+ if dtype is None and isinstance(obj, int) and (obj > 2 ** 64 or obj < -(2 ** 63)):
+ # Give a better error message in this case. NumPy would convert this
+ # to an object array. TODO: This won't handle large integers in lists.
+ raise OverflowError("Integer out of bounds for array dtypes")
+ res = np.asarray(obj, dtype=dtype)
+ return Array._new(res)
+
+
+def arange(
+ start: Union[int, float],
+ /,
+ stop: Optional[Union[int, float]] = None,
+ step: Union[int, float] = 1,
+ *,
+ dtype: Optional[Dtype] = None,
+ device: Optional[Device] = None,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.arange `.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ return Array._new(np.arange(start, stop=stop, step=step, dtype=dtype))
+
+
+def empty(
+ shape: Union[int, Tuple[int, ...]],
+ *,
+ dtype: Optional[Dtype] = None,
+ device: Optional[Device] = None,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.empty `.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ return Array._new(np.empty(shape, dtype=dtype))
+
+
+def empty_like(
+ x: Array, /, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.empty_like `.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ return Array._new(np.empty_like(x._array, dtype=dtype))
+
+
+def eye(
+ n_rows: int,
+ n_cols: Optional[int] = None,
+ /,
+ *,
+ k: int = 0,
+ dtype: Optional[Dtype] = None,
+ device: Optional[Device] = None,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.eye `.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ return Array._new(np.eye(n_rows, M=n_cols, k=k, dtype=dtype))
+
+
+def from_dlpack(x: object, /) -> Array:
+ from ._array_object import Array
+
+ return Array._new(np.from_dlpack(x))
+
+
+def full(
+ shape: Union[int, Tuple[int, ...]],
+ fill_value: Union[int, float],
+ *,
+ dtype: Optional[Dtype] = None,
+ device: Optional[Device] = None,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.full `.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ if isinstance(fill_value, Array) and fill_value.ndim == 0:
+ fill_value = fill_value._array
+ res = np.full(shape, fill_value, dtype=dtype)
+ if res.dtype not in _all_dtypes:
+ # This will happen if the fill value is not something that NumPy
+ # coerces to one of the acceptable dtypes.
+ raise TypeError("Invalid input to full")
+ return Array._new(res)
+
+
+def full_like(
+ x: Array,
+ /,
+ fill_value: Union[int, float],
+ *,
+ dtype: Optional[Dtype] = None,
+ device: Optional[Device] = None,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.full_like `.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ res = np.full_like(x._array, fill_value, dtype=dtype)
+ if res.dtype not in _all_dtypes:
+ # This will happen if the fill value is not something that NumPy
+ # coerces to one of the acceptable dtypes.
+ raise TypeError("Invalid input to full_like")
+ return Array._new(res)
+
+
+def linspace(
+ start: Union[int, float],
+ stop: Union[int, float],
+ /,
+ num: int,
+ *,
+ dtype: Optional[Dtype] = None,
+ device: Optional[Device] = None,
+ endpoint: bool = True,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.linspace `.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ return Array._new(np.linspace(start, stop, num, dtype=dtype, endpoint=endpoint))
+
+
+def meshgrid(*arrays: Array, indexing: str = "xy") -> List[Array]:
+ """
+ Array API compatible wrapper for :py:func:`np.meshgrid `.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ # Note: unlike np.meshgrid, only inputs with all the same dtype are
+ # allowed
+
+ if len({a.dtype for a in arrays}) > 1:
+ raise ValueError("meshgrid inputs must all have the same dtype")
+
+ return [
+ Array._new(array)
+ for array in np.meshgrid(*[a._array for a in arrays], indexing=indexing)
+ ]
+
+
+def ones(
+ shape: Union[int, Tuple[int, ...]],
+ *,
+ dtype: Optional[Dtype] = None,
+ device: Optional[Device] = None,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.ones `.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ return Array._new(np.ones(shape, dtype=dtype))
+
+
+def ones_like(
+ x: Array, /, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.ones_like `.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ return Array._new(np.ones_like(x._array, dtype=dtype))
+
+
+def tril(x: Array, /, *, k: int = 0) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.tril `.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ if x.ndim < 2:
+ # Note: Unlike np.tril, x must be at least 2-D
+ raise ValueError("x must be at least 2-dimensional for tril")
+ return Array._new(np.tril(x._array, k=k))
+
+
+def triu(x: Array, /, *, k: int = 0) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.triu `.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ if x.ndim < 2:
+ # Note: Unlike np.triu, x must be at least 2-D
+ raise ValueError("x must be at least 2-dimensional for triu")
+ return Array._new(np.triu(x._array, k=k))
+
+
+def zeros(
+ shape: Union[int, Tuple[int, ...]],
+ *,
+ dtype: Optional[Dtype] = None,
+ device: Optional[Device] = None,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.zeros `.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ return Array._new(np.zeros(shape, dtype=dtype))
+
+
+def zeros_like(
+ x: Array, /, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.zeros_like `.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ _check_valid_dtype(dtype)
+ if device not in ["cpu", None]:
+ raise ValueError(f"Unsupported device {device!r}")
+ return Array._new(np.zeros_like(x._array, dtype=dtype))
diff --git a/Lib/numpy/array_api/_data_type_functions.py b/Lib/numpy/array_api/_data_type_functions.py
new file mode 100644
index 00000000000000..7026bd489563e5
--- /dev/null
+++ b/Lib/numpy/array_api/_data_type_functions.py
@@ -0,0 +1,146 @@
+from __future__ import annotations
+
+from ._array_object import Array
+from ._dtypes import _all_dtypes, _result_type
+
+from dataclasses import dataclass
+from typing import TYPE_CHECKING, List, Tuple, Union
+
+if TYPE_CHECKING:
+ from ._typing import Dtype
+ from collections.abc import Sequence
+
+import numpy as np
+
+
+# Note: astype is a function, not an array method as in NumPy.
+def astype(x: Array, dtype: Dtype, /, *, copy: bool = True) -> Array:
+ if not copy and dtype == x.dtype:
+ return x
+ return Array._new(x._array.astype(dtype=dtype, copy=copy))
+
+
+def broadcast_arrays(*arrays: Array) -> List[Array]:
+ """
+ Array API compatible wrapper for :py:func:`np.broadcast_arrays `.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ return [
+ Array._new(array) for array in np.broadcast_arrays(*[a._array for a in arrays])
+ ]
+
+
+def broadcast_to(x: Array, /, shape: Tuple[int, ...]) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.broadcast_to `.
+
+ See its docstring for more information.
+ """
+ from ._array_object import Array
+
+ return Array._new(np.broadcast_to(x._array, shape))
+
+
+def can_cast(from_: Union[Dtype, Array], to: Dtype, /) -> bool:
+ """
+ Array API compatible wrapper for :py:func:`np.can_cast `.
+
+ See its docstring for more information.
+ """
+ if isinstance(from_, Array):
+ from_ = from_.dtype
+ elif from_ not in _all_dtypes:
+ raise TypeError(f"{from_=}, but should be an array_api array or dtype")
+ if to not in _all_dtypes:
+ raise TypeError(f"{to=}, but should be a dtype")
+ # Note: We avoid np.can_cast() as it has discrepancies with the array API,
+ # since NumPy allows cross-kind casting (e.g., NumPy allows bool -> int8).
+ # See https://github.com/numpy/numpy/issues/20870
+ try:
+ # We promote `from_` and `to` together. We then check if the promoted
+ # dtype is `to`, which indicates if `from_` can (up)cast to `to`.
+ dtype = _result_type(from_, to)
+ return to == dtype
+ except TypeError:
+ # _result_type() raises if the dtypes don't promote together
+ return False
+
+
+# These are internal objects for the return types of finfo and iinfo, since
+# the NumPy versions contain extra data that isn't part of the spec.
+@dataclass
+class finfo_object:
+ bits: int
+ # Note: The types of the float data here are float, whereas in NumPy they
+ # are scalars of the corresponding float dtype.
+ eps: float
+ max: float
+ min: float
+ smallest_normal: float
+
+
+@dataclass
+class iinfo_object:
+ bits: int
+ max: int
+ min: int
+
+
+def finfo(type: Union[Dtype, Array], /) -> finfo_object:
+ """
+ Array API compatible wrapper for :py:func:`np.finfo `.
+
+ See its docstring for more information.
+ """
+ fi = np.finfo(type)
+ # Note: The types of the float data here are float, whereas in NumPy they
+ # are scalars of the corresponding float dtype.
+ return finfo_object(
+ fi.bits,
+ float(fi.eps),
+ float(fi.max),
+ float(fi.min),
+ float(fi.smallest_normal),
+ )
+
+
+def iinfo(type: Union[Dtype, Array], /) -> iinfo_object:
+ """
+ Array API compatible wrapper for :py:func:`np.iinfo `.
+
+ See its docstring for more information.
+ """
+ ii = np.iinfo(type)
+ return iinfo_object(ii.bits, ii.max, ii.min)
+
+
+def result_type(*arrays_and_dtypes: Union[Array, Dtype]) -> Dtype:
+ """
+ Array API compatible wrapper for :py:func:`np.result_type `.
+
+ See its docstring for more information.
+ """
+ # Note: we use a custom implementation that gives only the type promotions
+ # required by the spec rather than using np.result_type. NumPy implements
+ # too many extra type promotions like int64 + uint64 -> float64, and does
+ # value-based casting on scalar arrays.
+ A = []
+ for a in arrays_and_dtypes:
+ if isinstance(a, Array):
+ a = a.dtype
+ elif isinstance(a, np.ndarray) or a not in _all_dtypes:
+ raise TypeError("result_type() inputs must be array_api arrays or dtypes")
+ A.append(a)
+
+ if len(A) == 0:
+ raise ValueError("at least one array or dtype is required")
+ elif len(A) == 1:
+ return A[0]
+ else:
+ t = A[0]
+ for t2 in A[1:]:
+ t = _result_type(t, t2)
+ return t
diff --git a/Lib/numpy/array_api/_dtypes.py b/Lib/numpy/array_api/_dtypes.py
new file mode 100644
index 00000000000000..476d619fee6379
--- /dev/null
+++ b/Lib/numpy/array_api/_dtypes.py
@@ -0,0 +1,143 @@
+import numpy as np
+
+# Note: we use dtype objects instead of dtype classes. The spec does not
+# require any behavior on dtypes other than equality.
+int8 = np.dtype("int8")
+int16 = np.dtype("int16")
+int32 = np.dtype("int32")
+int64 = np.dtype("int64")
+uint8 = np.dtype("uint8")
+uint16 = np.dtype("uint16")
+uint32 = np.dtype("uint32")
+uint64 = np.dtype("uint64")
+float32 = np.dtype("float32")
+float64 = np.dtype("float64")
+# Note: This name is changed
+bool = np.dtype("bool")
+
+_all_dtypes = (
+ int8,
+ int16,
+ int32,
+ int64,
+ uint8,
+ uint16,
+ uint32,
+ uint64,
+ float32,
+ float64,
+ bool,
+)
+_boolean_dtypes = (bool,)
+_floating_dtypes = (float32, float64)
+_integer_dtypes = (int8, int16, int32, int64, uint8, uint16, uint32, uint64)
+_integer_or_boolean_dtypes = (
+ bool,
+ int8,
+ int16,
+ int32,
+ int64,
+ uint8,
+ uint16,
+ uint32,
+ uint64,
+)
+_numeric_dtypes = (
+ float32,
+ float64,
+ int8,
+ int16,
+ int32,
+ int64,
+ uint8,
+ uint16,
+ uint32,
+ uint64,
+)
+
+_dtype_categories = {
+ "all": _all_dtypes,
+ "numeric": _numeric_dtypes,
+ "integer": _integer_dtypes,
+ "integer or boolean": _integer_or_boolean_dtypes,
+ "boolean": _boolean_dtypes,
+ "floating-point": _floating_dtypes,
+}
+
+
+# Note: the spec defines a restricted type promotion table compared to NumPy.
+# In particular, cross-kind promotions like integer + float or boolean +
+# integer are not allowed, even for functions that accept both kinds.
+# Additionally, NumPy promotes signed integer + uint64 to float64, but this
+# promotion is not allowed here. To be clear, Python scalar int objects are
+# allowed to promote to floating-point dtypes, but only in array operators
+# (see Array._promote_scalar) method in _array_object.py.
+_promotion_table = {
+ (int8, int8): int8,
+ (int8, int16): int16,
+ (int8, int32): int32,
+ (int8, int64): int64,
+ (int16, int8): int16,
+ (int16, int16): int16,
+ (int16, int32): int32,
+ (int16, int64): int64,
+ (int32, int8): int32,
+ (int32, int16): int32,
+ (int32, int32): int32,
+ (int32, int64): int64,
+ (int64, int8): int64,
+ (int64, int16): int64,
+ (int64, int32): int64,
+ (int64, int64): int64,
+ (uint8, uint8): uint8,
+ (uint8, uint16): uint16,
+ (uint8, uint32): uint32,
+ (uint8, uint64): uint64,
+ (uint16, uint8): uint16,
+ (uint16, uint16): uint16,
+ (uint16, uint32): uint32,
+ (uint16, uint64): uint64,
+ (uint32, uint8): uint32,
+ (uint32, uint16): uint32,
+ (uint32, uint32): uint32,
+ (uint32, uint64): uint64,
+ (uint64, uint8): uint64,
+ (uint64, uint16): uint64,
+ (uint64, uint32): uint64,
+ (uint64, uint64): uint64,
+ (int8, uint8): int16,
+ (int8, uint16): int32,
+ (int8, uint32): int64,
+ (int16, uint8): int16,
+ (int16, uint16): int32,
+ (int16, uint32): int64,
+ (int32, uint8): int32,
+ (int32, uint16): int32,
+ (int32, uint32): int64,
+ (int64, uint8): int64,
+ (int64, uint16): int64,
+ (int64, uint32): int64,
+ (uint8, int8): int16,
+ (uint16, int8): int32,
+ (uint32, int8): int64,
+ (uint8, int16): int16,
+ (uint16, int16): int32,
+ (uint32, int16): int64,
+ (uint8, int32): int32,
+ (uint16, int32): int32,
+ (uint32, int32): int64,
+ (uint8, int64): int64,
+ (uint16, int64): int64,
+ (uint32, int64): int64,
+ (float32, float32): float32,
+ (float32, float64): float64,
+ (float64, float32): float64,
+ (float64, float64): float64,
+ (bool, bool): bool,
+}
+
+
+def _result_type(type1, type2):
+ if (type1, type2) in _promotion_table:
+ return _promotion_table[type1, type2]
+ raise TypeError(f"{type1} and {type2} cannot be type promoted together")
diff --git a/Lib/numpy/array_api/_elementwise_functions.py b/Lib/numpy/array_api/_elementwise_functions.py
new file mode 100644
index 00000000000000..c758a09447a0b3
--- /dev/null
+++ b/Lib/numpy/array_api/_elementwise_functions.py
@@ -0,0 +1,729 @@
+from __future__ import annotations
+
+from ._dtypes import (
+ _boolean_dtypes,
+ _floating_dtypes,
+ _integer_dtypes,
+ _integer_or_boolean_dtypes,
+ _numeric_dtypes,
+ _result_type,
+)
+from ._array_object import Array
+
+import numpy as np
+
+
+def abs(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.abs `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in abs")
+ return Array._new(np.abs(x._array))
+
+
+# Note: the function name is different here
+def acos(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.arccos `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in acos")
+ return Array._new(np.arccos(x._array))
+
+
+# Note: the function name is different here
+def acosh(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.arccosh `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in acosh")
+ return Array._new(np.arccosh(x._array))
+
+
+def add(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.add `.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in add")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.add(x1._array, x2._array))
+
+
+# Note: the function name is different here
+def asin(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.arcsin `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in asin")
+ return Array._new(np.arcsin(x._array))
+
+
+# Note: the function name is different here
+def asinh(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.arcsinh `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in asinh")
+ return Array._new(np.arcsinh(x._array))
+
+
+# Note: the function name is different here
+def atan(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.arctan `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in atan")
+ return Array._new(np.arctan(x._array))
+
+
+# Note: the function name is different here
+def atan2(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.arctan2 `.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _floating_dtypes or x2.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in atan2")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.arctan2(x1._array, x2._array))
+
+
+# Note: the function name is different here
+def atanh(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.arctanh `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in atanh")
+ return Array._new(np.arctanh(x._array))
+
+
+def bitwise_and(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.bitwise_and `.
+
+ See its docstring for more information.
+ """
+ if (
+ x1.dtype not in _integer_or_boolean_dtypes
+ or x2.dtype not in _integer_or_boolean_dtypes
+ ):
+ raise TypeError("Only integer or boolean dtypes are allowed in bitwise_and")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.bitwise_and(x1._array, x2._array))
+
+
+# Note: the function name is different here
+def bitwise_left_shift(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.left_shift `.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _integer_dtypes or x2.dtype not in _integer_dtypes:
+ raise TypeError("Only integer dtypes are allowed in bitwise_left_shift")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ # Note: bitwise_left_shift is only defined for x2 nonnegative.
+ if np.any(x2._array < 0):
+ raise ValueError("bitwise_left_shift(x1, x2) is only defined for x2 >= 0")
+ return Array._new(np.left_shift(x1._array, x2._array))
+
+
+# Note: the function name is different here
+def bitwise_invert(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.invert `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _integer_or_boolean_dtypes:
+ raise TypeError("Only integer or boolean dtypes are allowed in bitwise_invert")
+ return Array._new(np.invert(x._array))
+
+
+def bitwise_or(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.bitwise_or `.
+
+ See its docstring for more information.
+ """
+ if (
+ x1.dtype not in _integer_or_boolean_dtypes
+ or x2.dtype not in _integer_or_boolean_dtypes
+ ):
+ raise TypeError("Only integer or boolean dtypes are allowed in bitwise_or")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.bitwise_or(x1._array, x2._array))
+
+
+# Note: the function name is different here
+def bitwise_right_shift(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.right_shift `.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _integer_dtypes or x2.dtype not in _integer_dtypes:
+ raise TypeError("Only integer dtypes are allowed in bitwise_right_shift")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ # Note: bitwise_right_shift is only defined for x2 nonnegative.
+ if np.any(x2._array < 0):
+ raise ValueError("bitwise_right_shift(x1, x2) is only defined for x2 >= 0")
+ return Array._new(np.right_shift(x1._array, x2._array))
+
+
+def bitwise_xor(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.bitwise_xor `.
+
+ See its docstring for more information.
+ """
+ if (
+ x1.dtype not in _integer_or_boolean_dtypes
+ or x2.dtype not in _integer_or_boolean_dtypes
+ ):
+ raise TypeError("Only integer or boolean dtypes are allowed in bitwise_xor")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.bitwise_xor(x1._array, x2._array))
+
+
+def ceil(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.ceil `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in ceil")
+ if x.dtype in _integer_dtypes:
+ # Note: The return dtype of ceil is the same as the input
+ return x
+ return Array._new(np.ceil(x._array))
+
+
+def cos(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.cos `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in cos")
+ return Array._new(np.cos(x._array))
+
+
+def cosh(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.cosh `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in cosh")
+ return Array._new(np.cosh(x._array))
+
+
+def divide(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.divide `.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _floating_dtypes or x2.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in divide")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.divide(x1._array, x2._array))
+
+
+def equal(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.equal `.
+
+ See its docstring for more information.
+ """
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.equal(x1._array, x2._array))
+
+
+def exp(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.exp `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in exp")
+ return Array._new(np.exp(x._array))
+
+
+def expm1(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.expm1 `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in expm1")
+ return Array._new(np.expm1(x._array))
+
+
+def floor(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.floor `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in floor")
+ if x.dtype in _integer_dtypes:
+ # Note: The return dtype of floor is the same as the input
+ return x
+ return Array._new(np.floor(x._array))
+
+
+def floor_divide(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.floor_divide `.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in floor_divide")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.floor_divide(x1._array, x2._array))
+
+
+def greater(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.greater `.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in greater")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.greater(x1._array, x2._array))
+
+
+def greater_equal(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.greater_equal `.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in greater_equal")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.greater_equal(x1._array, x2._array))
+
+
+def isfinite(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.isfinite `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in isfinite")
+ return Array._new(np.isfinite(x._array))
+
+
+def isinf(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.isinf `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in isinf")
+ return Array._new(np.isinf(x._array))
+
+
+def isnan(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.isnan `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in isnan")
+ return Array._new(np.isnan(x._array))
+
+
+def less(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.less `.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in less")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.less(x1._array, x2._array))
+
+
+def less_equal(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.less_equal `.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in less_equal")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.less_equal(x1._array, x2._array))
+
+
+def log(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.log `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in log")
+ return Array._new(np.log(x._array))
+
+
+def log1p(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.log1p `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in log1p")
+ return Array._new(np.log1p(x._array))
+
+
+def log2(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.log2 `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in log2")
+ return Array._new(np.log2(x._array))
+
+
+def log10(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.log10 `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in log10")
+ return Array._new(np.log10(x._array))
+
+
+def logaddexp(x1: Array, x2: Array) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.logaddexp `.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _floating_dtypes or x2.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in logaddexp")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.logaddexp(x1._array, x2._array))
+
+
+def logical_and(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.logical_and `.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _boolean_dtypes or x2.dtype not in _boolean_dtypes:
+ raise TypeError("Only boolean dtypes are allowed in logical_and")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.logical_and(x1._array, x2._array))
+
+
+def logical_not(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.logical_not `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _boolean_dtypes:
+ raise TypeError("Only boolean dtypes are allowed in logical_not")
+ return Array._new(np.logical_not(x._array))
+
+
+def logical_or(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.logical_or `.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _boolean_dtypes or x2.dtype not in _boolean_dtypes:
+ raise TypeError("Only boolean dtypes are allowed in logical_or")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.logical_or(x1._array, x2._array))
+
+
+def logical_xor(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.logical_xor `.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _boolean_dtypes or x2.dtype not in _boolean_dtypes:
+ raise TypeError("Only boolean dtypes are allowed in logical_xor")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.logical_xor(x1._array, x2._array))
+
+
+def multiply(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.multiply `.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in multiply")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.multiply(x1._array, x2._array))
+
+
+def negative(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.negative `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in negative")
+ return Array._new(np.negative(x._array))
+
+
+def not_equal(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.not_equal `.
+
+ See its docstring for more information.
+ """
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.not_equal(x1._array, x2._array))
+
+
+def positive(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.positive `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in positive")
+ return Array._new(np.positive(x._array))
+
+
+# Note: the function name is different here
+def pow(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.power `.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in pow")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.power(x1._array, x2._array))
+
+
+def remainder(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.remainder `.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in remainder")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.remainder(x1._array, x2._array))
+
+
+def round(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.round `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in round")
+ return Array._new(np.round(x._array))
+
+
+def sign(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.sign `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in sign")
+ return Array._new(np.sign(x._array))
+
+
+def sin(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.sin `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in sin")
+ return Array._new(np.sin(x._array))
+
+
+def sinh(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.sinh `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in sinh")
+ return Array._new(np.sinh(x._array))
+
+
+def square(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.square `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in square")
+ return Array._new(np.square(x._array))
+
+
+def sqrt(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.sqrt `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in sqrt")
+ return Array._new(np.sqrt(x._array))
+
+
+def subtract(x1: Array, x2: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.subtract `.
+
+ See its docstring for more information.
+ """
+ if x1.dtype not in _numeric_dtypes or x2.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in subtract")
+ # Call result type here just to raise on disallowed type combinations
+ _result_type(x1.dtype, x2.dtype)
+ x1, x2 = Array._normalize_two_args(x1, x2)
+ return Array._new(np.subtract(x1._array, x2._array))
+
+
+def tan(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.tan `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in tan")
+ return Array._new(np.tan(x._array))
+
+
+def tanh(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.tanh `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _floating_dtypes:
+ raise TypeError("Only floating-point dtypes are allowed in tanh")
+ return Array._new(np.tanh(x._array))
+
+
+def trunc(x: Array, /) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.trunc `.
+
+ See its docstring for more information.
+ """
+ if x.dtype not in _numeric_dtypes:
+ raise TypeError("Only numeric dtypes are allowed in trunc")
+ if x.dtype in _integer_dtypes:
+ # Note: The return dtype of trunc is the same as the input
+ return x
+ return Array._new(np.trunc(x._array))
diff --git a/Lib/numpy/array_api/_manipulation_functions.py b/Lib/numpy/array_api/_manipulation_functions.py
new file mode 100644
index 00000000000000..7991f46a272859
--- /dev/null
+++ b/Lib/numpy/array_api/_manipulation_functions.py
@@ -0,0 +1,98 @@
+from __future__ import annotations
+
+from ._array_object import Array
+from ._data_type_functions import result_type
+
+from typing import List, Optional, Tuple, Union
+
+import numpy as np
+
+# Note: the function name is different here
+def concat(
+ arrays: Union[Tuple[Array, ...], List[Array]], /, *, axis: Optional[int] = 0
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.concatenate `.
+
+ See its docstring for more information.
+ """
+ # Note: Casting rules here are different from the np.concatenate default
+ # (no for scalars with axis=None, no cross-kind casting)
+ dtype = result_type(*arrays)
+ arrays = tuple(a._array for a in arrays)
+ return Array._new(np.concatenate(arrays, axis=axis, dtype=dtype))
+
+
+def expand_dims(x: Array, /, *, axis: int) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.expand_dims `.
+
+ See its docstring for more information.
+ """
+ return Array._new(np.expand_dims(x._array, axis))
+
+
+def flip(x: Array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.flip `.
+
+ See its docstring for more information.
+ """
+ return Array._new(np.flip(x._array, axis=axis))
+
+
+# Note: The function name is different here (see also matrix_transpose).
+# Unlike transpose(), the axes argument is required.
+def permute_dims(x: Array, /, axes: Tuple[int, ...]) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.transpose `.
+
+ See its docstring for more information.
+ """
+ return Array._new(np.transpose(x._array, axes))
+
+
+# Note: the optional argument is called 'shape', not 'newshape'
+def reshape(x: Array, /, shape: Tuple[int, ...]) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.reshape `.
+
+ See its docstring for more information.
+ """
+ return Array._new(np.reshape(x._array, shape))
+
+
+def roll(
+ x: Array,
+ /,
+ shift: Union[int, Tuple[int, ...]],
+ *,
+ axis: Optional[Union[int, Tuple[int, ...]]] = None,
+) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.roll `.
+
+ See its docstring for more information.
+ """
+ return Array._new(np.roll(x._array, shift, axis=axis))
+
+
+def squeeze(x: Array, /, axis: Union[int, Tuple[int, ...]]) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.squeeze `.
+
+ See its docstring for more information.
+ """
+ return Array._new(np.squeeze(x._array, axis=axis))
+
+
+def stack(arrays: Union[Tuple[Array, ...], List[Array]], /, *, axis: int = 0) -> Array:
+ """
+ Array API compatible wrapper for :py:func:`np.stack