NumPy 源码解析(九十)
.\numpy\numpy\_core\_add_newdocs_scalars.py
"""
This file is separate from ``_add_newdocs.py`` so that it can be mocked out by
our sphinx ``conf.py`` during doc builds, where we want to avoid showing
platform-dependent information.
"""
import sys
import os
from numpy._core import dtype
from numpy._core import numerictypes as _numerictypes
from numpy._core.function_base import add_newdoc
def numeric_type_aliases(aliases):
def type_aliases_gen():
for alias, doc in aliases:
try:
alias_type = getattr(_numerictypes, alias)
except AttributeError:
pass
else:
yield (alias_type, alias, doc)
return list(type_aliases_gen())
possible_aliases = numeric_type_aliases([
('int8', '8-bit signed integer (``-128`` to ``127``)'),
('int16', '16-bit signed integer (``-32_768`` to ``32_767``)'),
('int32', '32-bit signed integer (``-2_147_483_648`` to ``2_147_483_647``)'),
('int64', '64-bit signed integer (``-9_223_372_036_854_775_808`` to ``9_223_372_036_854_775_807``)'),
('intp', 'Signed integer large enough to fit pointer, compatible with C ``intptr_t``'),
('uint8', '8-bit unsigned integer (``0`` to ``255``)'),
('uint16', '16-bit unsigned integer (``0`` to ``65_535``)'),
('uint32', '32-bit unsigned integer (``0`` to ``4_294_967_295``)'),
('uint64', '64-bit unsigned integer (``0`` to ``18_446_744_073_709_551_615``)'),
('uintp', 'Unsigned integer large enough to fit pointer, compatible with C ``uintptr_t``'),
('float16', '16-bit-precision floating-point number type: sign bit, 5 bits exponent, 10 bits mantissa'),
('float32', '32-bit-precision floating-point number type: sign bit, 8 bits exponent, 23 bits mantissa'),
('float64', '64-bit precision floating-point number type: sign bit, 11 bits exponent, 52 bits mantissa'),
('float96', '96-bit extended-precision floating-point number type'),
('float128', '128-bit extended-precision floating-point number type'),
('complex64', 'Complex number type composed of 2 32-bit-precision floating-point numbers'),
('complex128', 'Complex number type composed of 2 64-bit-precision floating-point numbers'),
('complex192', 'Complex number type composed of 2 96-bit extended-precision floating-point numbers'),
('complex256', 'Complex number type composed of 2 128-bit extended-precision floating-point numbers'),
])
def _get_platform_and_machine():
try:
system, _, _, _, machine = os.uname()
except AttributeError:
system = sys.platform
if system == 'win32':
machine = os.environ.get('PROCESSOR_ARCHITEW6432', '') \
or os.environ.get('PROCESSOR_ARCHITECTURE', '')
else:
machine = 'unknown'
return system, machine
_system, _machine = _get_platform_and_machine()
_doc_alias_string = f":Alias on this platform ({_system} {_machine}):"
def add_newdoc_for_scalar_type(obj, fixed_aliases, doc):
o = getattr(_numerictypes, obj)
character_code = dtype(o).char
canonical_name_doc = "" if obj == o.__name__ else \
f":Canonical name: `numpy.{obj}`\n "
if fixed_aliases:
alias_doc = ''.join(f":Alias: `numpy.{alias}`\n "
for alias in fixed_aliases)
else:
alias_doc = ''
alias_doc += ''.join(f"{_doc_alias_string} `numpy.{alias}`: {doc}.\n "
for (alias_type, alias, doc) in possible_aliases if alias_type is o)
docstring = f"""
{doc.strip()} # 去除首尾空白字符
:Character code: ``'{character_code}'``
{canonical_name_doc}{alias_doc}
"""
add_newdoc('numpy._core.numerictypes', obj, docstring)
_bool_docstring = (
"""
Boolean type (True or False), stored as a byte.
.. warning::
The :class:`bool` type is not a subclass of the :class:`int_` type
(the :class:`bool` is not even a number type). This is different
than Python's default implementation of :class:`bool` as a
sub-class of :class:`int`.
"""
)
add_newdoc_for_scalar_type('bool', [], _bool_docstring)
add_newdoc_for_scalar_type('bool_', [], _bool_docstring)
add_newdoc_for_scalar_type('byte', [],
"""
Signed integer type, compatible with C ``char``.
""")
add_newdoc_for_scalar_type('short', [],
"""
Signed integer type, compatible with C ``short``.
""")
add_newdoc_for_scalar_type('intc', [],
"""
Signed integer type, compatible with C ``int``.
""")
add_newdoc_for_scalar_type('int_', [],
"""
Default signed integer type, 64bit on 64bit systems and 32bit on 32bit
systems.
""")
add_newdoc_for_scalar_type('longlong', [],
"""
Signed integer type, compatible with C ``long long``.
""")
add_newdoc_for_scalar_type('ubyte', [],
"""
Unsigned integer type, compatible with C ``unsigned char``.
""")
add_newdoc_for_scalar_type('ushort', [],
"""
Unsigned integer type, compatible with C ``unsigned short``.
""")
add_newdoc_for_scalar_type('uintc', [],
"""
Unsigned integer type, compatible with C ``unsigned int``.
""")
add_newdoc_for_scalar_type('uint', [],
"""
Unsigned signed integer type, 64bit on 64bit systems and 32bit on 32bit
systems.
""")
add_newdoc_for_scalar_type('ulonglong', [],
"""
Signed integer type, compatible with C ``unsigned long long``.
""")
add_newdoc_for_scalar_type('half', [],
"""
Half-precision floating-point number type.
""")
add_newdoc_for_scalar_type('single', [],
"""
Single-precision floating-point number type.
""")
Single-precision floating-point number type, compatible with C ``float``.
"""
# 为指定的标量类型添加新的文档字符串
add_newdoc_for_scalar_type('double', [],
"""
双精度浮点数类型,与 Python 中的 :class:`float` 和 C 的 ``double`` 兼容。
""")
# 为指定的标量类型添加新的文档字符串
add_newdoc_for_scalar_type('longdouble', [],
"""
扩展精度浮点数类型,与 C 的 ``long double`` 兼容,但不一定与 IEEE 754 四倍精度兼容。
""")
# 为指定的标量类型添加新的文档字符串
add_newdoc_for_scalar_type('csingle', [],
"""
复数类型,由两个单精度浮点数组成。
""")
# 为指定的标量类型添加新的文档字符串
add_newdoc_for_scalar_type('cdouble', [],
"""
复数类型,由两个双精度浮点数组成,与 Python 的 :class:`complex` 兼容。
""")
# 为指定的标量类型添加新的文档字符串
add_newdoc_for_scalar_type('clongdouble', [],
"""
复数类型,由两个扩展精度浮点数组成。
""")
# 为指定的标量类型添加新的文档字符串
add_newdoc_for_scalar_type('object_', [],
"""
任意 Python 对象。
""")
# 为指定的标量类型添加新的文档字符串
add_newdoc_for_scalar_type('str_', [],
r"""
Unicode 字符串。
此类型会去除尾部的空字符。
>>> s = np.str_("abc\x00")
>>> s
'abc'
不同于内置的 :class:`str`,此类型支持
:ref:`python:bufferobjects`,以 UCS4 的形式展示其内容:
>>> m = memoryview(np.str_("abc"))
>>> m.format
'3w'
>>> m.tobytes()
b'a\x00\x00\x00b\x00\x00\x00c\x00\x00\x00'
""")
# 为指定的标量类型添加新的文档字符串
add_newdoc_for_scalar_type('bytes_', [],
r"""
字节串。
在数组中使用时,此类型会去除尾部的空字节。
""")
# 为指定的标量类型添加新的文档字符串
add_newdoc_for_scalar_type('void', [],
r"""
np.void(length_or_data, /, dtype=None)
创建一个新的结构化或非结构化的空标量。
参数
----------
length_or_data : int, array-like, bytes-like, object
长度或字节数据,用于创建非结构化的空标量。当指定 dtype 时,可以是要存储在新标量中的数据。
这可以是一个类似数组的对象,此时可能返回一个数组。
dtype : dtype, optional
如果提供,则为新标量的数据类型。该数据类型必须是 "void" 类型(即结构化或非结构化的空标量)。
.. versionadded:: 1.24
注意
-----
由于历史原因和空标量可以表示任意字节数据和结构化数据类型的特性,
空构造函数有三种调用约定:
1. ``np.void(5)`` 创建一个填充有五个 ``\0`` 字节的 ``dtype="V5"`` 标量。其中的 5 可以是 Python 或 NumPy 的整数。
2. ``np.void(b"bytes-like")`` 从字节串创建一个空标量。数据类型的项大小将匹配字节串长度,这里是 ``"V10"``。
3. 当传递 ``dtype=`` 时,调用与创建数组类似。但是返回的是空标量而不是数组。
请参阅示例,展示了所有三种不同的约定。
示例
""")
# 创建一个空的 NumPy void 对象,参数为整数 5,默认使用 8 字节填充
np.void(5)
# 创建一个 NumPy void 对象,参数为字节序列 b'abcd',使用对应的 ASCII 码填充
np.void(b'\x00\x00\x00\x00\x00')
# 创建一个 NumPy void 对象,参数为元组 (3.2, b'eggs'),指定数据类型为浮点数和字节串,分别对应字段 'f0' 和 'f1'
np.void((3.2, b'eggs'), dtype=[('f0', '<f8'), ('f1', 'S5')])
# 创建一个 NumPy void 对象,参数为整数 3,指定数据类型为带有字段 'x' 和 'y' 的字节串
np.void((3, 3), dtype=[('x', 'i1'), ('y', 'i1')])
# 为 datetime64 类型添加新的文档字符串
add_newdoc_for_scalar_type('datetime64', [],
"""
如果从 64 位整数创建,则表示相对于 ``1970-01-01T00:00:00`` 的偏移量。
如果从字符串创建,则字符串可以是 ISO 8601 日期或日期时间格式。
当解析包含时区的字符串以创建 datetime 对象时(以 'Z' 或时区偏移量结尾),将丢弃时区并给出用户警告。
Datetime64 对象应被视为 UTC,因此偏移量为 +0000。
>>> np.datetime64(10, 'Y')
np.datetime64('1980')
>>> np.datetime64('1980', 'Y')
np.datetime64('1980')
>>> np.datetime64(10, 'D')
np.datetime64('1970-01-11')
更多信息请参见 :ref:`arrays.datetime`。
""")
# 为 timedelta64 类型添加新的文档字符串
add_newdoc_for_scalar_type('timedelta64', [],
"""
作为 64 位整数存储的 timedelta。
更多信息请参见 :ref:`arrays.datetime`。
""")
# 为 integer 类型的 is_integer 方法添加新的文档字符串
add_newdoc('numpy._core.numerictypes', "integer", ('is_integer',
"""
integer.is_integer() -> bool
如果数是有限的整数值,则返回 ``True``。
.. versionadded:: 1.22
示例
--------
>>> np.int64(-2).is_integer()
True
>>> np.uint32(5).is_integer()
True
"""))
# 为浮点类型(half, single, double, longdouble)的 as_integer_ratio 方法添加新的文档字符串
for float_name in ('half', 'single', 'double', 'longdouble'):
add_newdoc('numpy._core.numerictypes', float_name, ('as_integer_ratio',
"""
{ftype}.as_integer_ratio() -> (int, int)
返回一对整数,其比率恰好等于原始浮点数,并且具有正的分母。
对无穷大返回 `OverflowError`,对 NaN 返回 `ValueError`。
>>> np.{ftype}(10.0).as_integer_ratio()
(10, 1)
>>> np.{ftype}(0.0).as_integer_ratio()
(0, 1)
>>> np.{ftype}(-.25).as_integer_ratio()
(-1, 4)
""".format(ftype=float_name)))
# 为浮点类型(half, single, double, longdouble)的 is_integer 方法添加新的文档字符串
add_newdoc('numpy._core.numerictypes', float_name, ('is_integer',
f"""
{float_name}.is_integer() -> bool
如果浮点数是有限的整数值,则返回 ``True``;否则返回 ``False``。
.. versionadded:: 1.22
示例
--------
>>> np.{float_name}(-2.0).is_integer()
True
>>> np.{float_name}(3.2).is_integer()
False
"""))
# 为整数类型(int8, uint8, int16, uint16, int32, uint32, int64, uint64)添加新的文档字符串
for int_name in ('int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32',
'int64', 'uint64'):
# 通过检查类型码,为有符号情况添加负数示例
# 使用 add_newdoc 函数添加文档字符串到指定的 numpy._core.numerictypes 模块的 int_name 中
add_newdoc('numpy._core.numerictypes', int_name, ('bit_count',
# 定义 bit_count 方法的文档字符串,描述其作用和用法
f"""
{int_name}.bit_count() -> int
Computes the number of 1-bits in the absolute value of the input.
Analogous to the builtin `int.bit_count` or ``popcount`` in C++.
Examples
--------
>>> np.{int_name}(127).bit_count()
7""" +
# 如果 int_name 对应的数据类型是小写,则添加额外的例子说明
(f"""
>>> np.{int_name}(-127).bit_count()
7
""" if dtype(int_name).char.islower() else "")))
.\numpy\numpy\_core\_asarray.py
"""
Functions in the ``as*array`` family that promote array-likes into arrays.
`require` fits this category despite its name not matching this pattern.
"""
from .overrides import (
array_function_dispatch,
set_array_function_like_doc,
set_module,
)
from .multiarray import array, asanyarray
__all__ = ["require"]
POSSIBLE_FLAGS = {
'C': 'C', 'C_CONTIGUOUS': 'C', 'CONTIGUOUS': 'C',
'F': 'F', 'F_CONTIGUOUS': 'F', 'FORTRAN': 'F',
'A': 'A', 'ALIGNED': 'A',
'W': 'W', 'WRITEABLE': 'W',
'O': 'O', 'OWNDATA': 'O',
'E': 'E', 'ENSUREARRAY': 'E'
}
@set_array_function_like_doc
@set_module('numpy')
def require(a, dtype=None, requirements=None, *, like=None):
"""
Return an ndarray of the provided type that satisfies requirements.
This function is useful to be sure that an array with the correct flags
is returned for passing to compiled code (perhaps through ctypes).
Parameters
----------
a : array_like
The object to be converted to a type-and-requirement-satisfying array.
dtype : data-type
The required data-type. If None preserve the current dtype. If your
application requires the data to be in native byteorder, include
a byteorder specification as a part of the dtype specification.
requirements : str or sequence of str
The requirements list can be any of the following
* 'F_CONTIGUOUS' ('F') - ensure a Fortran-contiguous array
* 'C_CONTIGUOUS' ('C') - ensure a C-contiguous array
* 'ALIGNED' ('A') - ensure a data-type aligned array
* 'WRITEABLE' ('W') - ensure a writable array
* 'OWNDATA' ('O') - ensure an array that owns its own data
* 'ENSUREARRAY', ('E') - ensure a base array, instead of a subclass
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
out : ndarray
Array with specified requirements and type if given.
See Also
--------
asarray : Convert input to an ndarray.
asanyarray : Convert to an ndarray, but pass through ndarray subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfortranarray : Convert input to an ndarray with column-major
memory order.
ndarray.flags : Information about the memory layout of the array.
Notes
-----
The returned array will be guaranteed to have the listed requirements
by making a copy if needed.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> x.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : False
WRITEABLE : True
ALIGNED : True
WRITEBACKIFCOPY : False
>>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F'])
>>> y.flags
C_CONTIGUOUS : False
F_CONTIGUOUS : True
OWNDATA : True
WRITEABLE : True
ALIGNED : True
WRITEBACKIFCOPY : False
"""
if like is not None:
return _require_with_like(
like,
a,
dtype=dtype,
requirements=requirements,
)
if not requirements:
return asanyarray(a, dtype=dtype)
requirements = {POSSIBLE_FLAGS[x.upper()] for x in requirements}
if 'E' in requirements:
requirements.remove('E')
subok = False
else:
subok = True
order = 'A'
if requirements >= {'C', 'F'}:
raise ValueError('Cannot specify both "C" and "F" order')
elif 'F' in requirements:
order = 'F'
requirements.remove('F')
elif 'C' in requirements:
order = 'C'
requirements.remove('C')
arr = array(a, dtype=dtype, order=order, copy=None, subok=subok)
for prop in requirements:
if not arr.flags[prop]:
return arr.copy(order)
return arr
_require_with_like = array_function_dispatch()(require)
.\numpy\numpy\_core\_asarray.pyi
from collections.abc import Iterable
from typing import Any, TypeVar, overload, Literal
from numpy._typing import NDArray, DTypeLike, _SupportsArrayFunc
_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
_Requirements = Literal[
"C", "C_CONTIGUOUS", "CONTIGUOUS",
"F", "F_CONTIGUOUS", "FORTRAN",
"A", "ALIGNED",
"W", "WRITEABLE",
"O", "OWNDATA"
]
_E = Literal["E", "ENSUREARRAY"]
_RequirementsWithE = _Requirements | _E
@overload
def require(
a: _ArrayType,
dtype: None = ...,
requirements: None | _Requirements | Iterable[_Requirements] = ...,
*,
like: _SupportsArrayFunc = ...
) -> _ArrayType: ...
@overload
def require(
a: object,
dtype: DTypeLike = ...,
requirements: _E | Iterable[_RequirementsWithE] = ...,
*,
like: _SupportsArrayFunc = ...
) -> NDArray[Any]: ...
@overload
def require(
a: object,
dtype: DTypeLike = ...,
requirements: None | _Requirements | Iterable[_Requirements] = ...,
*,
like: _SupportsArrayFunc = ...
) -> NDArray[Any]: ...
.\numpy\numpy\_core\_dtype.py
"""
A place for code to be called from the implementation of np.dtype
String handling is much easier to do correctly in python.
"""
import numpy as np
_kind_to_stem = {
'u': 'uint',
'i': 'int',
'c': 'complex',
'f': 'float',
'b': 'bool',
'V': 'void',
'O': 'object',
'M': 'datetime',
'm': 'timedelta',
'S': 'bytes',
'U': 'str',
}
def _kind_name(dtype):
try:
return _kind_to_stem[dtype.kind]
except KeyError as e:
raise RuntimeError(
"internal dtype error, unknown kind {!r}"
.format(dtype.kind)
) from None
def __str__(dtype):
if dtype.fields is not None:
return _struct_str(dtype, include_align=True)
elif dtype.subdtype:
return _subarray_str(dtype)
elif issubclass(dtype.type, np.flexible) or not dtype.isnative:
return dtype.str
else:
return dtype.name
def __repr__(dtype):
arg_str = _construction_repr(dtype, include_align=False)
if dtype.isalignedstruct:
arg_str = arg_str + ", align=True"
return "dtype({})".format(arg_str)
def _unpack_field(dtype, offset, title=None):
"""
Helper function to normalize the items in dtype.fields.
Call as:
dtype, offset, title = _unpack_field(*dtype.fields[name])
"""
return dtype, offset, title
def _isunsized(dtype):
return dtype.itemsize == 0
def _construction_repr(dtype, include_align=False, short=False):
"""
Creates a string repr of the dtype, excluding the 'dtype()' part
surrounding the object. This object may be a string, a list, or
a dict depending on the nature of the dtype. This
is the object passed as the first parameter to the dtype
constructor, and if no additional constructor parameters are
given, will reproduce the exact memory layout.
Parameters
----------
short : bool
If true, this creates a shorter repr using 'kind' and 'itemsize',
instead of the longer type name.
include_align : bool
If true, this includes the 'align=True' parameter
inside the struct dtype construction dict when needed. Use this flag
if you want a proper repr string without the 'dtype()' part around it.
If false, this does not preserve the
'align=True' parameter or sticky NPY_ALIGNED_STRUCT flag for
struct arrays like the regular repr does, because the 'align'
flag is not part of first dtype constructor parameter. This
mode is intended for a full 'repr', where the 'align=True' is
provided as the second parameter.
"""
if dtype.fields is not None:
return _struct_str(dtype, include_align=include_align)
elif dtype.subdtype:
return _subarray_str(dtype)
else:
return _scalar_str(dtype, short=short)
def _scalar_str(dtype, short):
byteorder = _byte_order_str(dtype)
if dtype.type == np.bool:
if short:
return "'?'"
else:
return "'bool'"
elif dtype.type == np.object_:
return "'O'"
elif dtype.type == np.bytes_:
if _isunsized(dtype):
return "'S'"
else:
return "'S%d'" % dtype.itemsize
elif dtype.type == np.str_:
if _isunsized(dtype):
return "'%sU'" % byteorder
else:
return "'%sU%d'" % (byteorder, dtype.itemsize / 4)
elif dtype.type == str:
return "'T'"
elif not type(dtype)._legacy:
return f"'{byteorder}{type(dtype).__name__}{dtype.itemsize * 8}'"
elif issubclass(dtype.type, np.void):
if _isunsized(dtype):
return "'V'"
else:
return "'V%d'" % dtype.itemsize
elif dtype.type == np.datetime64:
return "'%sM8%s'" % (byteorder, _datetime_metadata_str(dtype))
elif dtype.type == np.timedelta64:
return "'%sm8%s'" % (byteorder, _datetime_metadata_str(dtype))
elif np.issubdtype(dtype, np.number):
if short or dtype.byteorder not in ('=', '|'):
return "'%s%c%d'" % (byteorder, dtype.kind, dtype.itemsize)
else:
return "'%s%d'" % (_kind_name(dtype), 8*dtype.itemsize)
elif dtype.isbuiltin == 2:
return dtype.type.__name__
else:
raise RuntimeError(
"Internal error: NumPy dtype unrecognized type number")
def _byte_order_str(dtype):
swapped = np.dtype(int).newbyteorder('S')
native = swapped.newbyteorder('S')
byteorder = dtype.byteorder
if byteorder == '=':
return native.byteorder
if byteorder == 'S':
return swapped.byteorder
elif byteorder == '|':
return ''
else:
return byteorder
def _datetime_metadata_str(dtype):
unit, count = np.datetime_data(dtype)
if unit == 'generic':
return ''
elif count == 1:
return '[{}]'.format(unit)
else:
return '[{}{}]'.format(count, unit)
def _struct_dict_str(dtype, includealignedflag):
names = dtype.names
fld_dtypes = []
offsets = []
titles = []
for name in names:
fld_dtype, offset, title = _unpack_field(*dtype.fields[name])
fld_dtypes.append(fld_dtype)
offsets.append(offset)
titles.append(title)
if np._core.arrayprint._get_legacy_print_mode() <= 121:
colon = ":"
fieldsep = ","
else:
colon = ": "
fieldsep = ", "
ret = "{'names'%s[" % colon
ret += fieldsep.join(repr(name) for name in names)
ret += "], 'formats'%s[" % colon
ret += fieldsep.join(
_construction_repr(fld_dtype, short=True) for fld_dtype in fld_dtypes)
ret += "], 'offsets'%s[" % colon
ret += fieldsep.join("%d" % offset for offset in offsets)
if any(title is not None for title in titles):
ret += "], 'titles'%s[" % colon
ret += fieldsep.join(repr(title) for title in titles)
ret += "], 'itemsize'%s%d" % (colon, dtype.itemsize)
if (includealignedflag and dtype.isalignedstruct):
ret += ", 'aligned'%sTrue}" % colon
else:
ret += "}"
return ret
def _aligned_offset(offset, alignment):
return - (-offset // alignment) * alignment
def _is_packed(dtype):
"""
检查结构化数据类型 'dtype' 是否具有简单的布局,即所有字段按顺序排列,
且没有额外的对齐填充。
当返回 True 时,可以从字段名和数据类型的列表重建数据类型,而不需要额外的参数。
复制了 C 中的 `is_dtype_struct_simple_unaligned_layout` 函数。
"""
align = dtype.isalignedstruct
max_alignment = 1
total_offset = 0
for name in dtype.names:
fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name])
if align:
total_offset = _aligned_offset(total_offset, fld_dtype.alignment)
max_alignment = max(max_alignment, fld_dtype.alignment)
if fld_offset != total_offset:
return False
total_offset += fld_dtype.itemsize
if align:
total_offset = _aligned_offset(total_offset, max_alignment)
if total_offset != dtype.itemsize:
return False
return True
def _struct_list_str(dtype):
items = []
for name in dtype.names:
fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name])
item = "("
if title is not None:
item += "({!r}, {!r}), ".format(title, name)
else:
item += "{!r}, ".format(name)
if fld_dtype.subdtype is not None:
base, shape = fld_dtype.subdtype
item += "{}, {}".format(
_construction_repr(base, short=True),
shape
)
else:
item += _construction_repr(fld_dtype, short=True)
item += ")"
items.append(item)
return "[" + ", ".join(items) + "]"
def _struct_str(dtype, include_align):
if not (include_align and dtype.isalignedstruct) and _is_packed(dtype):
sub = _struct_list_str(dtype)
else:
sub = _struct_dict_str(dtype, include_align)
if dtype.type != np.void:
return "({t.__module__}.{t.__name__}, {f})".format(t=dtype.type, f=sub)
else:
return sub
def _subarray_str(dtype):
base, shape = dtype.subdtype
return "({}, {})".format(
_construction_repr(base, short=True),
shape
)
def _name_includes_bit_suffix(dtype):
if dtype.type == np.object_:
return False
elif dtype.type == np.bool:
return False
elif dtype.type is None:
return True
elif np.issubdtype(dtype, np.flexible) and _isunsized(dtype):
return False
else:
return True
def _name_get(dtype):
if dtype.isbuiltin == 2:
return dtype.type.__name__
if not type(dtype)._legacy:
name = type(dtype).__name__
elif issubclass(dtype.type, np.void):
name = dtype.type.__name__
else:
name = _kind_name(dtype)
if _name_includes_bit_suffix(dtype):
name += "{}".format(dtype.itemsize * 8)
if dtype.type in (np.datetime64, np.timedelta64):
name += _datetime_metadata_str(dtype)
return name
.\numpy\numpy\_core\_dtype_ctypes.py
"""
Conversion from ctypes to dtype.
In an ideal world, we could achieve this through the PEP3118 buffer protocol,
something like::
def dtype_from_ctypes_type(t):
# needed to ensure that the shape of `t` is within memoryview.format
class DummyStruct(ctypes.Structure):
_fields_ = [('a', t)]
# empty to avoid memory allocation
ctype_0 = (DummyStruct * 0)()
mv = memoryview(ctype_0)
# convert the struct, and slice back out the field
return _dtype_from_pep3118(mv.format)['a']
Unfortunately, this fails because:
* ctypes cannot handle length-0 arrays with PEP3118 (bpo-32782)
* PEP3118 cannot represent unions, but both numpy and ctypes can
* ctypes cannot handle big-endian structs with PEP3118 (bpo-32780)
"""
import numpy as np
def _from_ctypes_array(t):
"""
Convert a ctypes array to a numpy dtype.
Args:
t: ctypes array type
Returns:
numpy dtype corresponding to the ctypes array type
"""
return np.dtype((dtype_from_ctypes_type(t._type_), (t._length_,)))
def _from_ctypes_structure(t):
"""
Convert a ctypes structure to a numpy dtype.
Args:
t: ctypes structure type
Raises:
TypeError: If ctypes bitfields are encountered
Returns:
numpy dtype corresponding to the ctypes structure type
"""
for item in t._fields_:
if len(item) > 2:
raise TypeError(
"ctypes bitfields have no dtype equivalent")
if hasattr(t, "_pack_"):
import ctypes
formats = []
offsets = []
names = []
current_offset = 0
for fname, ftyp in t._fields_:
names.append(fname)
formats.append(dtype_from_ctypes_type(ftyp))
effective_pack = min(t._pack_, ctypes.alignment(ftyp))
current_offset = (
(current_offset + effective_pack - 1) // effective_pack
) * effective_pack
offsets.append(current_offset)
current_offset += ctypes.sizeof(ftyp)
return np.dtype(dict(
formats=formats,
offsets=offsets,
names=names,
itemsize=ctypes.sizeof(t)))
else:
fields = []
for fname, ftyp in t._fields_:
fields.append((fname, dtype_from_ctypes_type(ftyp)))
return np.dtype(fields, align=True)
def _from_ctypes_scalar(t):
"""
Convert a ctypes scalar type to a numpy dtype.
Args:
t: ctypes scalar type
Returns:
numpy dtype corresponding to the ctypes scalar type
"""
if getattr(t, '__ctype_be__', None) is t:
return np.dtype('>' + t._type_)
elif getattr(t, '__ctype_le__', None) is t:
return np.dtype('<' + t._type_)
else:
return np.dtype(t._type_)
def _from_ctypes_union(t):
"""
Convert a ctypes union to a numpy dtype.
Args:
t: ctypes union type
"""
import ctypes
formats = []
offsets = []
names = []
for fname, ftyp in t._fields_:
names.append(fname)
formats.append(dtype_from_ctypes_type(ftyp))
offsets.append(0)
return np.dtype(dict(
formats=formats,
offsets=offsets,
names=names,
itemsize=ctypes.sizeof(t)))
def dtype_from_ctypes_type(t):
import _ctypes
if issubclass(t, _ctypes.Array):
return _from_ctypes_array(t)
elif issubclass(t, _ctypes._Pointer):
raise TypeError("ctypes pointers have no dtype equivalent")
elif issubclass(t, _ctypes.Structure):
return _from_ctypes_structure(t)
elif issubclass(t, _ctypes.Union):
return _from_ctypes_union(t)
elif isinstance(getattr(t, '_type_', None), str):
return _from_ctypes_scalar(t)
else:
raise NotImplementedError(
"Unknown ctypes type {}".format(t.__name__))
.\numpy\numpy\_core\_exceptions.py
"""
Various richly-typed exceptions, that also help us deal with string formatting
in python where it's easier.
By putting the formatting in `__str__`, we also avoid paying the cost for
users who silence the exceptions.
"""
from .._utils import set_module
def _unpack_tuple(tup):
if len(tup) == 1:
return tup[0]
else:
return tup
def _display_as_base(cls):
"""
A decorator that makes an exception class look like its base.
We use this to hide subclasses that are implementation details - the user
should catch the base type, which is what the traceback will show them.
Classes decorated with this decorator are subject to removal without a
deprecation warning.
"""
assert issubclass(cls, Exception)
cls.__name__ = cls.__base__.__name__
return cls
class UFuncTypeError(TypeError):
""" Base class for all ufunc exceptions """
def __init__(self, ufunc):
self.ufunc = ufunc
@_display_as_base
class _UFuncNoLoopError(UFuncTypeError):
""" Thrown when a ufunc loop cannot be found """
def __init__(self, ufunc, dtypes):
super().__init__(ufunc)
self.dtypes = tuple(dtypes)
def __str__(self):
return (
"ufunc {!r} did not contain a loop with signature matching types "
"{!r} -> {!r}"
).format(
self.ufunc.__name__,
_unpack_tuple(self.dtypes[:self.ufunc.nin]),
_unpack_tuple(self.dtypes[self.ufunc.nin:])
)
@_display_as_base
class _UFuncBinaryResolutionError(_UFuncNoLoopError):
""" Thrown when a binary resolution fails """
def __init__(self, ufunc, dtypes):
super().__init__(ufunc, dtypes)
assert len(self.dtypes) == 2
def __str__(self):
return (
"ufunc {!r} cannot use operands with types {!r} and {!r}"
).format(
self.ufunc.__name__, *self.dtypes
)
@_display_as_base
class _UFuncCastingError(UFuncTypeError):
def __init__(self, ufunc, casting, from_, to):
super().__init__(ufunc)
self.casting = casting
self.from_ = from_
self.to = to
@_display_as_base
class _UFuncInputCastingError(_UFuncCastingError):
""" Thrown when a ufunc input cannot be casted """
def __init__(self, ufunc, casting, from_, to, i):
super().__init__(ufunc, casting, from_, to)
self.in_i = i
def __str__(self):
i_str = "{} ".format(self.in_i) if self.ufunc.nin != 1 else ""
return (
"Cannot cast ufunc {!r} input {}from {!r} to {!r} with casting "
"rule {!r}"
).format(
self.ufunc.__name__, i_str, self.from_, self.to, self.casting
)
@_display_as_base
class _UFuncOutputCastingError(_UFuncCastingError):
""" Thrown when a ufunc output cannot be casted """
def __init__(self, ufunc, casting, from_, to, i):
super().__init__(ufunc, casting, from_, to)
self.out_i = i
def __str__(self):
i_str = "{} ".format(self.out_i) if self.ufunc.nout != 1 else ""
return (
"Cannot cast ufunc {!r} output {}from {!r} to {!r} with casting "
"rule {!r}"
).format(
self.ufunc.__name__, i_str, self.from_, self.to, self.casting
)
@_display_as_base
class _ArrayMemoryError(MemoryError):
""" Thrown when an array cannot be allocated"""
def __init__(self, shape, dtype):
self.shape = shape
self.dtype = dtype
@property
def _total_size(self):
num_bytes = self.dtype.itemsize
for dim in self.shape:
num_bytes *= dim
return num_bytes
@staticmethod
def _size_to_string(num_bytes):
""" Convert a number of bytes into a binary size string """
LOG2_STEP = 10
STEP = 1024
units = ['bytes', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB']
unit_i = max(num_bytes.bit_length() - 1, 1) // LOG2_STEP
unit_val = 1 << (unit_i * LOG2_STEP)
n_units = num_bytes / unit_val
del unit_val
if round(n_units) == STEP:
unit_i += 1
n_units /= STEP
if unit_i >= len(units):
new_unit_i = len(units) - 1
n_units *= 1 << ((unit_i - new_unit_i) * LOG2_STEP)
unit_i = new_unit_i
unit_name = units[unit_i]
if unit_i == 0:
return '{:.0f} {}'.format(n_units, unit_name)
elif round(n_units) < 1000:
return '{:#.3g} {}'.format(n_units, unit_name)
else:
return '{:#.0f} {}'.format(n_units, unit_name)
def __str__(self):
size_str = self._size_to_string(self._total_size)
return (
"Unable to allocate {} for an array with shape {} and data type {}"
.format(size_str, self.shape, self.dtype)
)
.\numpy\numpy\_core\_internal.py
"""
A place for internal code
Some things are more easily handled Python.
"""
import ast
import re
import sys
import warnings
from ..exceptions import DTypePromotionError
from .multiarray import dtype, array, ndarray, promote_types, StringDType
from numpy import _NoValue
try:
import ctypes
except ImportError:
ctypes = None
IS_PYPY = sys.implementation.name == 'pypy'
if sys.byteorder == 'little':
_nbo = '<'
else:
_nbo = '>'
def _makenames_list(adict, align):
allfields = []
for fname, obj in adict.items():
n = len(obj)
if not isinstance(obj, tuple) or n not in (2, 3):
raise ValueError("entry not a 2- or 3- tuple")
if n > 2 and obj[2] == fname:
continue
num = int(obj[1])
if num < 0:
raise ValueError("invalid offset.")
format = dtype(obj[0], align=align)
if n > 2:
title = obj[2]
else:
title = None
allfields.append((fname, format, num, title))
allfields.sort(key=lambda x: x[2])
names = [x[0] for x in allfields]
formats = [x[1] for x in allfields]
offsets = [x[2] for x in allfields]
titles = [x[3] for x in allfields]
return names, formats, offsets, titles
def _usefields(adict, align):
try:
names = adict[-1]
except KeyError:
names = None
if names is None:
names, formats, offsets, titles = _makenames_list(adict, align)
else:
formats = []
offsets = []
titles = []
for name in names:
res = adict[name]
formats.append(res[0])
offsets.append(res[1])
if len(res) > 2:
titles.append(res[2])
else:
titles.append(None)
return dtype({"names": names,
"formats": formats,
"offsets": offsets,
"titles": titles}, align)
def _array_descr(descriptor):
fields = descriptor.fields
if fields is None:
subdtype = descriptor.subdtype
if subdtype is None:
if descriptor.metadata is None:
return descriptor.str
else:
new = descriptor.metadata.copy()
if new:
return (descriptor.str, new)
else:
return descriptor.str
else:
return (_array_descr(subdtype[0]), subdtype[1])
names = descriptor.names
ordered_fields = [fields[x] + (x,) for x in names]
result = []
offset = 0
for field in ordered_fields:
if field[1] > offset:
num = field[1] - offset
result.append(('', f'|V{num}'))
offset += num
elif field[1] < offset:
raise ValueError(
"dtype.descr is not defined for types with overlapping or "
"out-of-order fields")
if len(field) > 3:
name = (field[2], field[3])
else:
name = field[2]
if field[0].subdtype:
tup = (name, _array_descr(field[0].subdtype[0]),
field[0].subdtype[1])
else:
tup = (name, _array_descr(field[0]))
offset += field[0].itemsize
result.append(tup)
if descriptor.itemsize > offset:
num = descriptor.itemsize - offset
result.append(('', f'|V{num}'))
return result
format_re = re.compile(r'(?P<order1>[<>|=]?)'
r'(?P<repeats> *[(]?[ ,0-9]*[)]? *)'
r'(?P<order2>[<>|=]?)'
r'(?P<dtype>[A-Za-z0-9.?]*(?:\[[a-zA-Z0-9,.]+\])?)')
sep_re = re.compile(r'\s*,\s*')
space_re = re.compile(r'\s+$')
_convorder = {'=': _nbo}
def _commastring(astr):
startindex = 0
result = []
islist = False
while startindex < len(astr):
mo = format_re.match(astr, pos=startindex)
try:
(order1, repeats, order2, dtype) = mo.groups()
except (TypeError, AttributeError):
raise ValueError(
f'format number {len(result)+1} of "{astr}" is not recognized'
) from None
startindex = mo.end()
if startindex < len(astr):
if space_re.match(astr, pos=startindex):
startindex = len(astr)
else:
mo = sep_re.match(astr, pos=startindex)
if not mo:
raise ValueError(
'format number %d of "%s" is not recognized' %
(len(result)+1, astr))
startindex = mo.end()
islist = True
if order2 == '':
order = order1
elif order1 == '':
order = order2
else:
order1 = _convorder.get(order1, order1)
order2 = _convorder.get(order2, order2)
if (order1 != order2):
raise ValueError(
'inconsistent byte-order specification %s and %s' %
(order1, order2))
order = order1
if order in ('|', '=', _nbo):
order = ''
dtype = order + dtype
if repeats == '':
newitem = dtype
else:
if (repeats[0] == "(" and repeats[-1] == ")"
and repeats[1:-1].strip() != ""
and "," not in repeats):
warnings.warn(
'Passing in a parenthesized single number for repeats '
'is deprecated; pass either a single number or indicate '
'a tuple with a comma, like "(2,)".', DeprecationWarning,
stacklevel=2)
newitem = (dtype, ast.literal_eval(repeats))
result.append(newitem)
return result if islist else result[0]
class dummy_ctype:
def __init__(self, cls):
self._cls = cls
def __mul__(self, other):
return self
def __call__(self, *other):
return self._cls(other)
def __eq__(self, other):
return self._cls == other._cls
def __ne__(self, other):
return self._cls != other._cls
def _getintp_ctype():
val = _getintp_ctype.cache
if val is not None:
return val
if ctypes is None:
import numpy as np
val = dummy_ctype(np.intp)
else:
char = dtype('n').char
if char == 'i':
val = ctypes.c_int
elif char == 'l':
val = ctypes.c_long
elif char == 'q':
val = ctypes.c_longlong
else:
val = ctypes.c_long
_getintp_ctype.cache = val
return val
_getintp_ctype.cache = None
class _missing_ctypes:
def cast(self, num, obj):
return num.value
class c_void_p:
def __init__(self, ptr):
self.value = ptr
class _ctypes:
def __init__(self, array, ptr=None):
self._arr = array
if ctypes:
self._ctypes = ctypes
self._data = self._ctypes.c_void_p(ptr)
else:
self._ctypes = _missing_ctypes()
self._data = self._ctypes.c_void_p(ptr)
self._data._objects = array
if self._arr.ndim == 0:
self._zerod = True
else:
self._zerod = False
def data_as(self, obj):
"""
Return the data pointer cast to a particular c-types object.
For example, calling ``self._as_parameter_`` is equivalent to
``self.data_as(ctypes.c_void_p)``. Perhaps you want to use
the data as a pointer to a ctypes array of floating-point data:
``self.data_as(ctypes.POINTER(ctypes.c_double))``.
The returned pointer will keep a reference to the array.
"""
ptr = self._ctypes.cast(self._data, obj)
ptr._arr = self._arr
return ptr
def shape_as(self, obj):
"""
Return the shape tuple as an array of some other c-types
type. For example: ``self.shape_as(ctypes.c_short)``.
"""
if self._zerod:
return None
return (obj * self._arr.ndim)(*self._arr.shape)
def strides_as(self, obj):
"""
Return the strides tuple as an array of some other
c-types type. For example: ``self.strides_as(ctypes.c_longlong)``.
"""
if self._zerod:
return None
return (obj * self._arr.ndim)(*self._arr.strides)
@property
def data(self):
"""
返回数组内存区域的指针作为 Python 整数。
这个内存区域可能包含未对齐或不正确字节顺序的数据。
甚至这个内存区域可能是不可写的。
当将这个属性传递给任意的 C 代码时,应该尊重数组的标志和数据类型,
以避免可能导致 Python 崩溃的问题。用户注意!
此属性的值与 `self._array_interface_['data'][0]` 完全相同。
与 `data_as` 不同,注意不会保留对数组的引用:
例如 `ctypes.c_void_p((a + b).ctypes.data)` 将导致指向已释放数组的指针,
应该写作 `(a + b).ctypes.data_as(ctypes.c_void_p)`
"""
return self._data.value
@property
def shape(self):
"""
(c_intp*self.ndim): 长度为 self.ndim 的 ctypes 数组,
基本类型是对应于此平台上 `dtype('p')` 的 C 整数(参见 `~numpy.ctypeslib.c_intp`)。
这个基本类型可能是 `ctypes.c_int`、`ctypes.c_long` 或 `ctypes.c_longlong`,具体取决于平台。
这个 ctypes 数组包含底层数组的形状信息。
"""
return self.shape_as(_getintp_ctype())
@property
def strides(self):
"""
(c_intp*self.ndim): 长度为 self.ndim 的 ctypes 数组,
基本类型与 shape 属性相同。这个 ctypes 数组包含底层数组的跨步信息。
跨步信息很重要,因为它显示了在数组中跳转到下一个元素需要跳过多少字节。
"""
return self.strides_as(_getintp_ctype())
@property
def _as_parameter_(self):
"""
覆盖 ctypes 的半神奇方法
允许 `c_func(some_array.ctypes)`
"""
return self.data_as(ctypes.c_void_p)
def get_data(self):
"""已弃用的 `_ctypes.data` 属性的获取器。
.. deprecated:: 1.21
"""
warnings.warn('"get_data" 已弃用。请使用 "data"',
DeprecationWarning, stacklevel=2)
return self.data
def get_shape(self):
"""已弃用的 `_ctypes.shape` 属性的获取器。
.. deprecated:: 1.21
"""
warnings.warn('"get_shape" 已弃用。请使用 "shape"',
DeprecationWarning, stacklevel=2)
return self.shape
def get_strides(self):
"""已弃用的 `_ctypes.strides` 属性的获取器。
.. deprecated:: 1.21
"""
warnings.warn('"get_strides" 已弃用。请使用 "strides"',
DeprecationWarning, stacklevel=2)
return self.strides
def get_as_parameter(self):
"""Deprecated getter for the `_ctypes._as_parameter_` property.
.. deprecated:: 1.21
"""
warnings.warn(
'"get_as_parameter" is deprecated. Use "_as_parameter_" instead',
DeprecationWarning, stacklevel=2,
)
return self._as_parameter_
def _newnames(datatype, order):
oldnames = datatype.names
nameslist = list(oldnames)
if isinstance(order, str):
order = [order]
seen = set()
if isinstance(order, (list, tuple)):
for name in order:
try:
nameslist.remove(name)
except ValueError:
if name in seen:
raise ValueError(f"duplicate field name: {name}") from None
else:
raise ValueError(f"unknown field name: {name}") from None
seen.add(name)
return tuple(list(order) + nameslist)
raise ValueError(f"unsupported order value: {order}")
def _copy_fields(ary):
"""Return copy of structured array with padding between fields removed.
Parameters
----------
ary : ndarray
Structured array from which to remove padding bytes
Returns
-------
ary_copy : ndarray
Copy of ary with padding bytes removed
"""
dt = ary.dtype
copy_dtype = {'names': dt.names,
'formats': [dt.fields[name][0] for name in dt.names]}
return array(ary, dtype=copy_dtype, copy=True)
def _promote_fields(dt1, dt2):
""" Perform type promotion for two structured dtypes.
Parameters
----------
dt1 : structured dtype
First dtype.
dt2 : structured dtype
Second dtype.
Returns
-------
out : dtype
The promoted dtype
Notes
-----
If one of the inputs is aligned, the result will be. The titles of
both descriptors must match (point to the same field).
"""
if (dt1.names is None or dt2.names is None) or dt1.names != dt2.names:
raise DTypePromotionError(
f"field names `{dt1.names}` and `{dt2.names}` mismatch.")
identical = dt1 is dt2
new_fields = []
for name in dt1.names:
field1 = dt1.fields[name]
field2 = dt2.fields[name]
new_descr = promote_types(field1[0], field2[0])
identical = identical and new_descr is field1[0]
if field1[2:] != field2[2:]:
raise DTypePromotionError(
f"field titles of field '{name}' mismatch")
if len(field1) == 2:
new_fields.append((name, new_descr))
else:
new_fields.append(((field1[2], name), new_descr))
res = dtype(new_fields, align=dt1.isalignedstruct or dt2.isalignedstruct)
if identical and res.itemsize == dt1.itemsize:
for name in dt1.names:
if dt1.fields[name][1] != res.fields[name][1]:
return res
return dt1
return res
def _getfield_is_safe(oldtype, newtype, offset):
""" Checks safety of getfield for object arrays.
As in _view_is_safe, we need to check that memory containing objects is not
reinterpreted as a non-object datatype and vice versa.
Parameters
----------
oldtype : data-type
Data type of the original ndarray.
newtype : data-type
Data type of the field being accessed by ndarray.getfield
offset : int
Offset of the field being accessed by ndarray.getfield
Raises
------
TypeError
If the field access is invalid
"""
if newtype.hasobject or oldtype.hasobject:
if offset == 0 and newtype == oldtype:
return
if oldtype.names is not None:
for name in oldtype.names:
if (oldtype.fields[name][1] == offset and
oldtype.fields[name][0] == newtype):
return
raise TypeError("Cannot get/set field of an object array")
return
def _view_is_safe(oldtype, newtype):
""" Checks safety of a view involving object arrays, for example when
doing::
np.zeros(10, dtype=oldtype).view(newtype)
Parameters
----------
oldtype : data-type
Data type of original ndarray
newtype : data-type
Data type of the view
Raises
------
TypeError
If the new type is incompatible with the old type.
"""
if oldtype == newtype:
return
if newtype.hasobject or oldtype.hasobject:
raise TypeError("Cannot change data-type for array of references.")
return
_pep3118_native_map = {
'?': '?',
'c': 'S1',
'b': 'b',
'B': 'B',
'h': 'h',
'H': 'H',
'i': 'i',
'I': 'I',
'l': 'l',
'L': 'L',
'q': 'q',
'Q': 'Q',
'e': 'e',
'f': 'f',
'd': 'd',
'g': 'g',
'Zf': 'F',
'Zd': 'D',
'Zg': 'G',
's': 'S',
'w': 'U',
'O': 'O',
'x': 'V',
}
_pep3118_native_typechars = ''.join(_pep3118_native_map.keys())
_pep3118_standard_map = {
'?': '?',
'c': 'S1',
'b': 'b',
'B': 'B',
'h': 'i2',
'H': 'u2',
'i': 'i4',
'I': 'u4',
'l': 'i4',
'L': 'u4',
'q': 'i8',
'Q': 'u8',
'e': 'f2',
'f': 'f',
'd': 'd',
'Zf': 'F',
'Zd': 'D',
's': 'S',
'w': 'U',
'O': 'O',
'x': 'V',
}
_pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys())
_pep3118_unsupported_map = {
'u': 'UCS-2 strings',
'&': 'pointers',
't': 'bitfields',
'X': 'function pointers',
}
class _Stream:
def __init__(self, s):
self.s = s
self.byteorder = '@'
def advance(self, n):
res = self.s[:n]
self.s = self.s[n:]
return res
def consume(self, c):
if self.s[:len(c)] == c:
self.advance(len(c))
return True
return False
def consume_until(self, c):
if callable(c):
i = 0
while i < len(self.s) and not c(self.s[i]):
i = i + 1
return self.advance(i)
else:
i = self.s.index(c)
res = self.advance(i)
self.advance(len(c))
return res
@property
def next(self):
return self.s[0]
def __bool__(self):
return bool(self.s)
def _dtype_from_pep3118(spec):
stream = _Stream(spec)
dtype, align = __dtype_from_pep3118(stream, is_subdtype=False)
return dtype
def __dtype_from_pep3118(stream, is_subdtype):
field_spec = dict(
names=[],
formats=[],
offsets=[],
itemsize=0
)
offset = 0
common_alignment = 1
is_padding = False
if stream.byteorder == '@':
field_spec['itemsize'] += (-offset) % common_alignment
if (field_spec['names'] == [None]
and field_spec['offsets'][0] == 0
and field_spec['itemsize'] == field_spec['formats'][0].itemsize
and not is_subdtype):
ret = field_spec['formats'][0]
else:
_fix_names(field_spec)
ret = dtype(field_spec)
return ret, common_alignment
def _fix_names(field_spec):
names = field_spec['names']
for i, name in enumerate(names):
if name is not None:
continue
j = 0
while True:
name = f'f{j}'
if name not in names:
break
j = j + 1
names[i] = name
def _add_trailing_padding(value, padding):
if value.fields is None:
field_spec = dict(
names=['f0'],
formats=[value],
offsets=[0],
itemsize=value.itemsize
)
else:
fields = value.fields
names = value.names
field_spec = dict(
names=names,
formats=[fields[name][0] for name in names],
offsets=[fields[name][1] for name in names],
itemsize=value.itemsize
)
field_spec['itemsize'] += padding
return dtype(field_spec)
def _prod(a):
p = 1
for x in a:
p *= x
return p
def _gcd(a, b):
"""Calculate the greatest common divisor of a and b"""
while b:
a, b = b, a % b
return a
def _lcm(a, b):
return a // _gcd(a, b) * b
def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs):
args_string = ', '.join(['{!r}'.format(arg) for arg in inputs] +
['{}={!r}'.format(k, v)
for k, v in kwargs.items()])
args = inputs + kwargs.get('out', ())
types_string = ', '.join(repr(type(arg).__name__) for arg in args)
return ('operand type(s) all returned NotImplemented from '
'__array_ufunc__({!r}, {!r}, {}): {}'
.format(ufunc, method, args_string, types_string))
def array_function_errmsg_formatter(public_api, types):
""" Format the error message for when __array_ufunc__ gives up. """
func_name = '{}.{}'.format(public_api.__module__, public_api.__name__)
return ("no implementation found for '{}' on types that implement "
'__array_function__: {}'.format(func_name, list(types)))
def _ufunc_doc_signature_formatter(ufunc):
if ufunc.nin == 1:
in_args = 'x'
else:
in_args = ', '.join(f'x{i+1}' for i in range(ufunc.nin))
if ufunc.nout == 0:
out_args = ', /, out=()'
elif ufunc.nout == 1:
out_args = ', /, out=None'
else:
out_args = '[, {positional}], / [, out={default}]'.format(
positional=', '.join(
'out{}'.format(i+1) for i in range(ufunc.nout)),
default=repr((None,)*ufunc.nout)
)
kwargs = (
", casting='same_kind'"
", order='K'"
", dtype=None"
", subok=True"
)
if ufunc.signature is None:
kwargs = f", where=True{kwargs}[, signature]"
else:
kwargs += "[, signature, axes, axis]"
return '{name}({in_args}{out_args}, *{kwargs})'.format(
name=ufunc.__name__,
in_args=in_args,
out_args=out_args,
kwargs=kwargs
)
def npy_ctypes_check(cls):
try:
if IS_PYPY:
ctype_base = cls.__mro__[-3]
else:
ctype_base = cls.__mro__[-2]
return '_ctypes' in ctype_base.__module__
except Exception:
return False
def _convert_to_stringdtype_kwargs(coerce, na_object=_NoValue):
if na_object is _NoValue:
return StringDType(coerce=coerce)
else:
return StringDType(coerce=coerce, na_object=na_object)
.\numpy\numpy\_core\_internal.pyi
import ctypes as ct
from numpy.typing import NDArray
from numpy.ctypeslib import c_intp
_CastT = TypeVar("_CastT", bound=ct._CanCastTo)
_CT = TypeVar("_CT", bound=ct._CData)
_PT = TypeVar("_PT", bound=int)
class _ctypes(Generic[_PT]):
@overload
def __new__(cls, array: NDArray[Any], ptr: None = ...) -> _ctypes[None]: ...
@overload
def __new__(cls, array: NDArray[Any], ptr: _PT) -> _ctypes[_PT]: ...
@property
def data(self) -> _PT: ...
@property
def shape(self) -> ct.Array[c_intp]: ...
@property
def strides(self) -> ct.Array[c_intp]: ...
@property
def _as_parameter_(self) -> ct.c_void_p: ...
def data_as(self, obj: type[_CastT]) -> _CastT: ...
def shape_as(self, obj: type[_CT]) -> ct.Array[_CT]: ...
def strides_as(self, obj: type[_CT]) -> ct.Array[_CT]: ...
.\numpy\numpy\_core\_machar.py
"""
Machine arithmetic - determine the parameters of the
floating-point arithmetic system
Author: Pearu Peterson, September 2003
"""
__all__ = ['MachAr']
from .fromnumeric import any
from ._ufunc_config import errstate
from .._utils import set_module
class MachAr:
"""
诊断机器参数。
Attributes
----------
ibeta : int
表示数字的基数。
it : int
浮点数尾数 M 中基数 `ibeta` 的位数。
machep : int
最小(最负)的 `ibeta` 的幂的指数,加到 1.0 上会得到与 1.0 不同的结果。
eps : float
浮点数 ``beta**machep``(浮点精度)。
negep : int
最小的 `ibeta` 的幂的指数,从 1.0 减去会得到与 1.0 不同的结果。
epsneg : float
浮点数 ``beta**negep``。
iexp : int
指数中的位数(包括其符号和偏置)。
minexp : int
与尾数中没有前导零一致的最小(最负)的 `ibeta` 的幂。
xmin : float
浮点数 ``beta**minexp``(具有完整精度的最小正浮点数)。
maxexp : int
导致溢出的最小(正)的 `ibeta` 的幂。
xmax : float
``(1-epsneg) * beta**maxexp``(可用的最大(按数量级)浮点值)。
irnd : int
在 ``range(6)`` 中,有关舍入方式和如何处理下溢的信息。
ngrd : int
在截断两个尾数以适应表示时使用的“保护位”数量。
epsilon : float
与 `eps` 相同。
tiny : float
`smallest_normal` 的别名,保持向后兼容性。
huge : float
与 `xmax` 相同。
precision : float
``- int(-log10(eps))``
resolution : float
``- 10**(-precision)``
smallest_normal : float
遵循 IEEE-754 标准,在尾数中有 1 作为首位的最小正浮点数。与 `xmin` 相同。
smallest_subnormal : float
遵循 IEEE-754 标准,在尾数中有 0 作为首位的最小正浮点数。
Parameters
----------
float_conv : function, optional
将整数或整数数组转换为浮点数或浮点数数组的函数。默认为 `float`。
int_conv : function, optional
将浮点数或浮点数数组转换为整数或整数数组的函数。默认为 `int`。
float_to_float : function, optional
将浮点数数组转换为浮点数的函数。默认为 `float`。
注意,在当前实现中,这似乎没有任何有用的作用。
def __init__(self, float_conv=float, int_conv=int,
float_to_float=float,
float_to_str=lambda v:'%24.16e' % v,
title='Python floating point number'):
"""
初始化函数,用于创建一个 MachAr 对象。
Parameters:
----------
float_conv : function, optional
用于将整数转换为浮点数的函数(数组)。默认为 `float`。
int_conv : function, optional
用于将浮点数(数组)转换为整数的函数。默认为 `int`。
float_to_float : function, optional
用于将浮点数数组转换为浮点数的函数。默认为 `float`。
float_to_str : function, optional
用于将浮点数数组转换为字符串的函数。默认为 lambda 函数 `lambda v:'%24.16e' % v`。
title : str, optional
MachAr 对象的标题,将在其字符串表示中打印出来。默认为 'Python floating point number'。
"""
# 在此处忽略所有错误,因为我们有意触发下溢以检测运行架构的特性。
with errstate(under='ignore'):
# 调用内部方法 `_do_init` 进行初始化设置
self._do_init(float_conv, int_conv, float_to_float, float_to_str, title)
def __str__(self):
"""
返回 MachAr 对象的字符串表示形式。
Returns:
-------
str
MachAr 对象的字符串表示形式,包含各种机器参数的详细信息。
"""
# 定义格式化字符串模板
fmt = (
'Machine parameters for %(title)s\n'
'---------------------------------------------------------------------\n'
'ibeta=%(ibeta)s it=%(it)s iexp=%(iexp)s ngrd=%(ngrd)s irnd=%(irnd)s\n'
'machep=%(machep)s eps=%(_str_eps)s (beta**machep == epsilon)\n'
'negep =%(negep)s epsneg=%(_str_epsneg)s (beta**epsneg)\n'
'minexp=%(minexp)s xmin=%(_str_xmin)s (beta**minexp == tiny)\n'
'maxexp=%(maxexp)s xmax=%(_str_xmax)s ((1-epsneg)*beta**maxexp == huge)\n'
'smallest_normal=%(smallest_normal)s '
'smallest_subnormal=%(smallest_subnormal)s\n'
'---------------------------------------------------------------------\n'
)
# 使用格式化字符串和对象的属性字典返回字符串表示形式
return fmt % self.__dict__
# 如果当前脚本作为主程序执行(而不是被导入到其他模块),则执行以下代码块
if __name__ == '__main__':
# 打印调用 MachAr() 函数的结果
print(MachAr())
.\numpy\numpy\_core\_methods.py
"""
Array methods which are called by both the C-code for the method
and the Python code for the NumPy-namespace function
"""
import os
import pickle
import warnings
from contextlib import nullcontext
from numpy._core import multiarray as mu
from numpy._core import umath as um
from numpy._core.multiarray import asanyarray
from numpy._core import numerictypes as nt
from numpy._core import _exceptions
from numpy._core._ufunc_config import _no_nep50_warning
from numpy._globals import _NoValue
bool_dt = mu.dtype("bool")
umr_maximum = um.maximum.reduce
umr_minimum = um.minimum.reduce
umr_sum = um.add.reduce
umr_prod = um.multiply.reduce
umr_bitwise_count = um.bitwise_count
umr_any = um.logical_or.reduce
umr_all = um.logical_and.reduce
_complex_to_float = {
nt.dtype(nt.csingle) : nt.dtype(nt.single),
nt.dtype(nt.cdouble) : nt.dtype(nt.double),
}
if nt.dtype(nt.longdouble) != nt.dtype(nt.double):
_complex_to_float.update({
nt.dtype(nt.clongdouble) : nt.dtype(nt.longdouble),
})
def _amax(a, axis=None, out=None, keepdims=False,
initial=_NoValue, where=True):
return umr_maximum(a, axis, None, out, keepdims, initial, where)
def _amin(a, axis=None, out=None, keepdims=False,
initial=_NoValue, where=True):
return umr_minimum(a, axis, None, out, keepdims, initial, where)
def _sum(a, axis=None, dtype=None, out=None, keepdims=False,
initial=_NoValue, where=True):
return umr_sum(a, axis, dtype, out, keepdims, initial, where)
def _prod(a, axis=None, dtype=None, out=None, keepdims=False,
initial=_NoValue, where=True):
return umr_prod(a, axis, dtype, out, keepdims, initial, where)
def _any(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
if dtype is None:
dtype = bool_dt
if where is True:
return umr_any(a, axis, dtype, out, keepdims)
return umr_any(a, axis, dtype, out, keepdims, where=where)
def _all(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
if dtype is None:
dtype = bool_dt
if where is True:
return umr_all(a, axis, dtype, out, keepdims)
return umr_all(a, axis, dtype, out, keepdims, where=where)
def _count_reduce_items(arr, axis, keepdims=False, where=True):
if where is True:
if axis is None:
axis = tuple(range(arr.ndim))
elif not isinstance(axis, tuple):
axis = (axis,)
items = 1
for ax in axis:
items *= arr.shape[mu.normalize_axis_index(ax, arr.ndim)]
items = nt.intp(items)
else:
from numpy.lib._stride_tricks_impl import broadcast_to
items = umr_sum(broadcast_to(where, arr.shape), axis, nt.intp, None,
keepdims)
return items
def _clip(a, min=None, max=None, out=None, **kwargs):
if min is None and max is None:
raise ValueError("One of max or min must be given")
if min is None:
return um.minimum(a, max, out=out, **kwargs)
elif max is None:
return um.maximum(a, min, out=out, **kwargs)
else:
return um.clip(a, min, max, out=out, **kwargs)
def _mean(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
arr = asanyarray(a)
is_float16_result = False
rcount = _count_reduce_items(arr, axis, keepdims=keepdims, where=where)
if rcount == 0 if where is True else umr_any(rcount == 0, axis=None):
warnings.warn("Mean of empty slice.", RuntimeWarning, stacklevel=2)
if dtype is None:
if issubclass(arr.dtype.type, (nt.integer, nt.bool)):
dtype = mu.dtype('f8')
elif issubclass(arr.dtype.type, nt.float16):
dtype = mu.dtype('f4')
is_float16_result = True
ret = umr_sum(arr, axis, dtype, out, keepdims, where=where)
if isinstance(ret, mu.ndarray):
with _no_nep50_warning():
ret = um.true_divide(
ret, rcount, out=ret, casting='unsafe', subok=False)
if is_float16_result and out is None:
ret = arr.dtype.type(ret)
elif hasattr(ret, 'dtype'):
if is_float16_result:
ret = arr.dtype.type(ret / rcount)
else:
ret = ret.dtype.type(ret / rcount)
else:
ret = ret / rcount
return ret
def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *,
where=True, mean=None):
arr = asanyarray(a)
rcount = _count_reduce_items(arr, axis, keepdims=keepdims, where=where)
if ddof >= rcount if where is True else umr_any(ddof >= rcount, axis=None):
warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning,
stacklevel=2)
if dtype is None and issubclass(arr.dtype.type, (nt.integer, nt.bool)):
dtype = mu.dtype('f8')
if mean is not None:
arrmean = mean
else:
arrmean = umr_sum(arr, axis, dtype, keepdims=True, where=where)
if rcount.ndim == 0:
div = rcount
else:
div = rcount.reshape(arrmean.shape)
if isinstance(arrmean, mu.ndarray):
with _no_nep50_warning():
arrmean = um.true_divide(arrmean, div, out=arrmean,
casting='unsafe', subok=False)
elif hasattr(arrmean, "dtype"):
arrmean = arrmean.dtype.type(arrmean / rcount)
else:
arrmean = arrmean / rcount
x = asanyarray(arr - arrmean)
if issubclass(arr.dtype.type, (nt.floating, nt.integer)):
x = um.multiply(x, x, out=x)
elif x.dtype in _complex_to_float:
xv = x.view(dtype=(_complex_to_float[x.dtype], (2,)))
um.multiply(xv, xv, out=xv)
x = um.add(xv[..., 0], xv[..., 1], out=x.real).real
else:
x = um.multiply(x, um.conjugate(x), out=x).real
ret = umr_sum(x, axis, dtype, out, keepdims=keepdims, where=where)
rcount = um.maximum(rcount - ddof, 0)
if isinstance(ret, mu.ndarray):
with _no_nep50_warning():
ret = um.true_divide(
ret, rcount, out=ret, casting='unsafe', subok=False)
elif hasattr(ret, 'dtype'):
ret = ret.dtype.type(ret / rcount)
else:
ret = ret / rcount
return ret
def _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *,
where=True, mean=None):
ret = _var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
keepdims=keepdims, where=where, mean=mean)
if isinstance(ret, mu.ndarray):
ret = um.sqrt(ret, out=ret)
elif hasattr(ret, 'dtype'):
ret = ret.dtype.type(um.sqrt(ret))
else:
ret = um.sqrt(ret)
return ret
def _ptp(a, axis=None, out=None, keepdims=False):
return um.subtract(
umr_maximum(a, axis, None, out, keepdims),
umr_minimum(a, axis, None, None, keepdims),
out
)
def _dump(self, file, protocol=2):
if hasattr(file, 'write'):
ctx = nullcontext(file)
else:
ctx = open(os.fspath(file), "wb")
with ctx as f:
pickle.dump(self, f, protocol=protocol)
def _dumps(self, protocol=2):
return pickle.dumps(self, protocol=protocol)
def _bitwise_count(a, out=None, *, where=True, casting='same_kind',
order='K', dtype=None, subok=True):
return umr_bitwise_count(a, out, where=where, casting=casting,
order=order, dtype=dtype, subok=subok)
.\numpy\numpy\_core\_string_helpers.py
"""
String-handling utilities to avoid locale-dependence.
Used primarily to generate type name aliases.
"""
_all_chars = tuple(map(chr, range(256)))
_ascii_upper = _all_chars[65:65+26]
_ascii_lower = _all_chars[97:97+26]
LOWER_TABLE = _all_chars[:65] + _ascii_lower + _all_chars[65+26:]
UPPER_TABLE = _all_chars[:97] + _ascii_upper + _all_chars[97+26:]
def english_lower(s):
""" Apply English case rules to convert ASCII strings to all lower case.
This is an internal utility function to replace calls to str.lower() such
that we can avoid changing behavior with changing locales. In particular,
Turkish has distinct dotted and dotless variants of the Latin letter "I" in
both lowercase and uppercase. Thus, "I".lower() != "i" in a "tr" locale.
Parameters
----------
s : str
Returns
-------
lowered : str
Examples
--------
>>> from numpy._core.numerictypes import english_lower
>>> english_lower('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz0123456789_'
>>> english_lower('')
''
"""
lowered = s.translate(LOWER_TABLE)
return lowered
def english_upper(s):
""" Apply English case rules to convert ASCII strings to all upper case.
This is an internal utility function to replace calls to str.upper() such
that we can avoid changing behavior with changing locales. In particular,
Turkish has distinct dotted and dotless variants of the Latin letter "I" in
both lowercase and uppercase. Thus, "i".upper() != "I" in a "tr" locale.
Parameters
----------
s : str
Returns
-------
uppered : str
Examples
--------
>>> from numpy._core.numerictypes import english_upper
>>> english_upper('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'
>>> english_upper('')
''
"""
uppered = s.translate(UPPER_TABLE)
return uppered
def english_capitalize(s):
""" Apply English case rules to convert the first character of an ASCII
string to upper case.
This is an internal utility function to replace calls to str.capitalize()
such that we can avoid changing behavior with changing locales.
Parameters
----------
s : str
Returns
-------
capitalized : str
Examples
--------
>>> from numpy._core.numerictypes import english_capitalize
>>> english_capitalize('int8')
'Int8'
>>> english_capitalize('Int8')
'Int8'
>>> english_capitalize('')
''
"""
if s:
return english_upper(s[0]) + s[1:]
else:
return s
.\numpy\numpy\_core\_type_aliases.py
"""
Due to compatibility, numpy has a very large number of different naming
conventions for the scalar types (those subclassing from `numpy.generic`).
This file produces a convoluted set of dictionaries mapping names to types,
and sometimes other mappings too.
.. data:: allTypes
A dictionary of names to types that will be exposed as attributes through
``np._core.numerictypes.*``
.. data:: sctypeDict
Similar to `allTypes`, but maps a broader set of aliases to their types.
.. data:: sctypes
A dictionary keyed by a "type group" string, providing a list of types
under that group.
"""
import numpy._core.multiarray as ma
from numpy._core.multiarray import typeinfo, dtype
sctypeDict = {}
allTypes = {}
c_names_dict = {}
_abstract_type_names = {
"generic", "integer", "inexact", "floating", "number",
"flexible", "character", "complexfloating", "unsignedinteger",
"signedinteger"
}
for _abstract_type_name in _abstract_type_names:
allTypes[_abstract_type_name] = getattr(ma, _abstract_type_name)
for k, v in typeinfo.items():
if k.startswith("NPY_") and v not in c_names_dict:
c_names_dict[k[4:]] = v
else:
concrete_type = v.type
allTypes[k] = concrete_type
sctypeDict[k] = concrete_type
_aliases = {
"double": "float64",
"cdouble": "complex128",
"single": "float32",
"csingle": "complex64",
"half": "float16",
"bool_": "bool",
"int_": "intp",
"uint": "uintp",
}
for k, v in _aliases.items():
sctypeDict[k] = allTypes[v]
allTypes[k] = allTypes[v]
_extra_aliases = {
"float": "float64",
"complex": "complex128",
"object": "object_",
"bytes": "bytes_",
"a": "bytes_",
"int": "int_",
"str": "str_",
"unicode": "str_",
}
for k, v in _extra_aliases.items():
sctypeDict[k] = allTypes[v]
for is_complex, full_name in [(False, "longdouble"), (True, "clongdouble")]:
longdouble_type: type = allTypes[full_name]
bits: int = dtype(longdouble_type).itemsize * 8
base_name: str = "complex" if is_complex else "float"
extended_prec_name: str = f"{base_name}{bits}"
if extended_prec_name not in allTypes:
sctypeDict[extended_prec_name] = longdouble_type
allTypes[extended_prec_name] = longdouble_type
sctypes = {"int": set(), "uint": set(), "float": set(),
"complex": set(), "others": set()}
for type_info in typeinfo.values():
if type_info.kind in ["M", "m"]:
continue
concrete_type = type_info.type
for type_group, abstract_type in [
("int", ma.signedinteger), ("uint", ma.unsignedinteger),
("float", ma.floating), ("complex", ma.complexfloating),
("others", ma.generic)
]:
if issubclass(concrete_type, abstract_type):
sctypes[type_group].add(concrete_type)
break
for sctype_key in sctypes.keys():
sctype_list = list(sctypes[sctype_key])
sctype_list.sort(key=lambda x: dtype(x).itemsize)
sctypes[sctype_key] = sctype_list
.\numpy\numpy\_core\_type_aliases.pyi
from numpy import generic
sctypeDict: dict[int | str, type[generic]]
.\numpy\numpy\_core\_ufunc_config.py
"""
Functions for changing global ufunc configuration
This provides helpers which wrap `_get_extobj_dict` and `_make_extobj`, and
`_extobj_contextvar` from umath.
"""
import collections.abc
import contextlib
import contextvars
import functools
from .._utils import set_module
from .umath import _make_extobj, _get_extobj_dict, _extobj_contextvar
__all__ = [
"seterr", "geterr", "setbufsize", "getbufsize", "seterrcall", "geterrcall",
"errstate", '_no_nep50_warning'
]
@set_module('numpy')
def seterr(all=None, divide=None, over=None, under=None, invalid=None):
"""
Set how floating-point errors are handled.
Note that operations on integer scalar types (such as `int16`) are
handled like floating point, and are affected by these settings.
Parameters
----------
all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Set treatment for all types of floating-point errors at once:
- ignore: Take no action when the exception occurs.
- warn: Print a :exc:`RuntimeWarning` (via the Python `warnings`
module).
- raise: Raise a :exc:`FloatingPointError`.
- call: Call a function specified using the `seterrcall` function.
- print: Print a warning directly to ``stdout``.
- log: Record error in a Log object specified by `seterrcall`.
The default is not to change the current behavior.
divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for division by zero.
over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for floating-point overflow.
under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for floating-point underflow.
invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
Treatment for invalid floating-point operation.
Returns
-------
old_settings : dict
Dictionary containing the old settings.
See also
--------
seterrcall : Set a callback function for the 'call' mode.
geterr, geterrcall, errstate
Notes
-----
The floating-point exceptions are defined in the IEEE 754 standard [1]_:
- Division by zero: infinite result obtained from finite numbers.
- Overflow: result too large to be expressed.
- Underflow: result so close to zero that some precision
was lost.
- Invalid operation: result is not an expressible number, typically
indicates that a NaN was produced.
.. [1] https://en.wikipedia.org/wiki/IEEE_754
Examples
--------
>>> orig_settings = np.seterr(all='ignore') # seterr to known value
>>> np.int16(32000) * np.int16(3)
30464
>>> np.seterr(over='raise')
{'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'}
>>> old_settings = np.seterr(all='warn', over='raise')
>>> np.int16(32000) * np.int16(3)
"""
old_settings = _get_extobj_dict()
new_settings = {}
if all is not None:
new_settings['all'] = _make_extobj(all)
if divide is not None:
new_settings['divide'] = _make_extobj(divide)
if over is not None:
new_settings['over'] = _make_extobj(over)
if under is not None:
new_settings['under'] = _make_extobj(under)
if invalid is not None:
new_settings['invalid'] = _make_extobj(invalid)
_extobj_contextvar.set(new_settings)
return old_settings
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
FloatingPointError: overflow encountered in scalar multiply
>>> old_settings = np.seterr(all='print')
>>> np.geterr()
{'divide': 'print', 'over': 'print', 'under': 'print', 'invalid': 'print'}
>>> np.int16(32000) * np.int16(3)
30464
>>> np.seterr(**orig_settings)
{'divide': 'print', 'over': 'print', 'under': 'print', 'invalid': 'print'}
"""
# 获取当前的扩展对象字典
old = _get_extobj_dict()
# 从字典中移除键为 "call" 和 "bufsize" 的项,如果存在的话
# 这些项在错误状态对象中并不存在,因此需要移除
old.pop("call", None)
old.pop("bufsize", None)
# 使用指定的错误处理设置创建扩展对象
extobj = _make_extobj(
all=all, divide=divide, over=over, under=under, invalid=invalid)
# 将新创建的扩展对象设置为当前的上下文变量
_extobj_contextvar.set(extobj)
# 返回修改后的扩展对象字典
return old
@set_module('numpy')
def geterr():
"""
Get the current way of handling floating-point errors.
Returns
-------
res : dict
A dictionary with keys "divide", "over", "under", and "invalid",
whose values are from the strings "ignore", "print", "log", "warn",
"raise", and "call". The keys represent possible floating-point
exceptions, and the values define how these exceptions are handled.
See Also
--------
geterrcall, seterr, seterrcall
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> np.geterr()
{'divide': 'warn', 'over': 'warn', 'under': 'ignore', 'invalid': 'warn'}
>>> np.arange(3.) / np.arange(3.)
array([nan, 1., 1.])
RuntimeWarning: invalid value encountered in divide
>>> oldsettings = np.seterr(all='warn', invalid='raise')
>>> np.geterr()
{'divide': 'warn', 'over': 'warn', 'under': 'warn', 'invalid': 'raise'}
>>> np.arange(3.) / np.arange(3.)
Traceback (most recent call last):
...
FloatingPointError: invalid value encountered in divide
>>> oldsettings = np.seterr(**oldsettings)
"""
# Retrieve the current error handling settings as a dictionary
res = _get_extobj_dict()
# Remove the entries for "call" and "bufsize" from the dictionary
res.pop("call", None)
res.pop("bufsize", None)
return res
@set_module('numpy')
def setbufsize(size):
"""
Set the size of the buffer used in ufuncs.
.. versionchanged:: 2.0
The scope of setting the buffer is tied to the `numpy.errstate`
context. Exiting a ``with errstate():`` will also restore the bufsize.
Parameters
----------
size : int
Size of buffer.
Returns
-------
bufsize : int
Previous size of ufunc buffer in bytes.
Examples
--------
When exiting a `numpy.errstate` context manager the bufsize is restored:
>>> with np.errstate():
... np.setbufsize(4096)
... print(np.getbufsize())
...
8192
4096
>>> np.getbufsize()
8192
"""
# Retrieve the current bufsize setting
old = _get_extobj_dict()["bufsize"]
# Create a new extended object with the specified bufsize
extobj = _make_extobj(bufsize=size)
# Set the new extended object using context variables
_extobj_contextvar.set(extobj)
return old
@set_module('numpy')
def getbufsize():
"""
Return the size of the buffer used in ufuncs.
Returns
-------
getbufsize : int
Size of ufunc buffer in bytes.
Examples
--------
>>> np.getbufsize()
8192
"""
# Retrieve and return the current bufsize from the extended object
return _get_extobj_dict()["bufsize"]
@set_module('numpy')
def seterrcall(func):
"""
Set the floating-point error callback function or log object.
There are two ways to capture floating-point error messages. The first
is to set the error-handler to 'call', using `seterr`. Then, set
the function to call using this function.
The second is to set the error-handler to 'log', using `seterr`.
"""
# This function is intended to set the error callback function or log object,
# but the actual implementation details are not provided in this excerpt.
"""
Floating-point errors then trigger a call to the 'write' method of
the provided object.
Parameters
----------
func : callable f(err, flag) or object with write method
Function to call upon floating-point errors ('call'-mode) or
object whose 'write' method is used to log such message ('log'-mode).
The call function takes two arguments. The first is a string describing
the type of error (such as "divide by zero", "overflow", "underflow",
or "invalid value"), and the second is the status flag. The flag is a
byte, whose four least-significant bits indicate the type of error, one
of "divide", "over", "under", "invalid"::
[0 0 0 0 divide over under invalid]
In other words, ``flags = divide + 2*over + 4*under + 8*invalid``.
If an object is provided, its write method should take one argument,
a string.
Returns
-------
h : callable, log instance or None
The old error handler.
See Also
--------
seterr, geterr, geterrcall
Examples
--------
Callback upon error:
>>> def err_handler(type, flag):
... print("Floating point error (%s), with flag %s" % (type, flag))
...
>>> orig_handler = np.seterrcall(err_handler)
>>> orig_err = np.seterr(all='call')
>>> np.array([1, 2, 3]) / 0.0
Floating point error (divide by zero), with flag 1
array([inf, inf, inf])
>>> np.seterrcall(orig_handler)
<function err_handler at 0x...>
>>> np.seterr(**orig_err)
{'divide': 'call', 'over': 'call', 'under': 'call', 'invalid': 'call'}
Log error message:
>>> class Log:
... def write(self, msg):
... print("LOG: %s" % msg)
...
>>> log = Log()
>>> saved_handler = np.seterrcall(log)
>>> save_err = np.seterr(all='log')
>>> np.array([1, 2, 3]) / 0.0
LOG: Warning: divide by zero encountered in divide
array([inf, inf, inf])
>>> np.seterrcall(orig_handler)
<numpy.Log object at 0x...>
>>> np.seterr(**orig_err)
{'divide': 'log', 'over': 'log', 'under': 'log', 'invalid': 'log'}
"""
old = _get_extobj_dict()["call"]
extobj = _make_extobj(call=func)
_extobj_contextvar.set(extobj)
return old
# 设置 numpy 模块的默认模块名为 'numpy'
@set_module('numpy')
# 定义函数 geterrcall,返回当前用于处理浮点错误的回调函数
def geterrcall():
"""
Return the current callback function used on floating-point errors.
When the error handling for a floating-point error (one of "divide",
"over", "under", or "invalid") is set to 'call' or 'log', the function
that is called or the log instance that is written to is returned by
`geterrcall`. This function or log instance has been set with
`seterrcall`.
Returns
-------
errobj : callable, log instance or None
The current error handler. If no handler was set through `seterrcall`,
``None`` is returned.
See Also
--------
seterrcall, seterr, geterr
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> np.geterrcall()
>>> orig_settings = np.seterr(all='call')
>>> def err_handler(type, flag):
... print("Floating point error (%s), with flag %s" % (type, flag))
>>> old_handler = np.seterrcall(err_handler)
>>> np.array([1, 2, 3]) / 0.0
Floating point error (divide by zero), with flag 1
array([inf, inf, inf])
>>> cur_handler = np.geterrcall()
>>> cur_handler is err_handler
True
>>> old_settings = np.seterr(**orig_settings)
>>> old_handler = np.seterrcall(None)
"""
return _get_extobj_dict()["call"]
# 定义一个未指定类型的类 _unspecified
class _unspecified:
pass
# 将 _unspecified 类型的实例赋值给 _Unspecified 变量
_Unspecified = _unspecified()
# 设置 numpy 模块的默认模块名为 'numpy',定义一个类 errstate
@set_module('numpy')
class errstate:
"""
errstate(**kwargs)
Context manager for floating-point error handling.
Using an instance of `errstate` as a context manager allows statements in
that context to execute with a known error handling behavior. Upon entering
the context the error handling is set with `seterr` and `seterrcall`, and
upon exiting it is reset to what it was before.
.. versionchanged:: 1.17.0
`errstate` is also usable as a function decorator, saving
a level of indentation if an entire function is wrapped.
.. versionchanged:: 2.0
`errstate` is now fully thread and asyncio safe, but may not be
entered more than once.
It is not safe to decorate async functions using ``errstate``.
Parameters
----------
kwargs : {divide, over, under, invalid}
Keyword arguments. The valid keywords are the possible floating-point
exceptions. Each keyword should have a string value that defines the
treatment for the particular error. Possible values are
{'ignore', 'warn', 'raise', 'call', 'print', 'log'}.
See Also
--------
seterr, geterr, seterrcall, geterrcall
Notes
-----
For complete documentation of the types of floating-point exceptions and
treatment options, see `seterr`.
Examples
--------
>>> olderr = np.seterr(all='ignore')
"""
>>> np.arange(3) / 0.
array([nan, inf, inf])
# 在NumPy中,对一个数组进行除以零操作会产生特定的浮点数错误,结果为NaN和inf。
>>> with np.errstate(divide='ignore'):
... np.arange(3) / 0.
array([nan, inf, inf])
# 使用`np.errstate`上下文管理器来临时忽略浮点数除以零的错误。
>>> np.sqrt(-1)
np.float64(nan)
# 调用`np.sqrt`函数对负数求平方根会引发浮点数无效的错误,结果为NaN。
>>> with np.errstate(invalid='raise'):
... np.sqrt(-1)
Traceback (most recent call last):
File "<stdin>", line 2, in <module>
FloatingPointError: invalid value encountered in sqrt
# 使用`np.errstate`上下文管理器来设置对浮点数无效操作(如求负数的平方根)抛出异常。
Outside the context the error handling behavior has not changed:
>>> np.geterr()
{'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'}
# 在`np.errstate`上下文管理器外,NumPy错误处理状态未发生改变。
>>> olderr = np.seterr(**olderr) # restore original state
# 使用`np.seterr`恢复先前保存的错误处理状态。
"""
__slots__ = (
"_call", "_all", "_divide", "_over", "_under", "_invalid", "_token")
def __init__(self, *, call=_Unspecified,
all=None, divide=None, over=None, under=None, invalid=None):
self._token = None
self._call = call
self._all = all
self._divide = divide
self._over = over
self._under = under
self._invalid = invalid
def __enter__(self):
if self._token is not None:
raise TypeError("Cannot enter `np.errstate` twice.")
if self._call is _Unspecified:
extobj = _make_extobj(
all=self._all, divide=self._divide, over=self._over,
under=self._under, invalid=self._invalid)
else:
extobj = _make_extobj(
call=self._call,
all=self._all, divide=self._divide, over=self._over,
under=self._under, invalid=self._invalid)
self._token = _extobj_contextvar.set(extobj)
def __exit__(self, *exc_info):
_extobj_contextvar.reset(self._token)
def __call__(self, func):
@functools.wraps(func)
def inner(*args, **kwargs):
if self._call is _Unspecified:
extobj = _make_extobj(
all=self._all, divide=self._divide, over=self._over,
under=self._under, invalid=self._invalid)
else:
extobj = _make_extobj(
call=self._call,
all=self._all, divide=self._divide, over=self._over,
under=self._under, invalid=self._invalid)
_token = _extobj_contextvar.set(extobj)
try:
return func(*args, **kwargs)
finally:
_extobj_contextvar.reset(_token)
return inner
NO_NEP50_WARNING = contextvars.ContextVar("_no_nep50_warning", default=False)
@set_module('numpy')
@contextlib.contextmanager
def _no_nep50_warning():
"""
上下文管理器,用于禁用 NEP 50 警告。仅在全局启用 NEP 50 警告时才相关
(这种情况下不是线程/上下文安全)。
此警告上下文管理器本身是完全安全的。
"""
token = NO_NEP50_WARNING.set(True)
try:
yield
finally:
NO_NEP50_WARNING.reset(token)
.\numpy\numpy\_core\_ufunc_config.pyi
from collections.abc import Callable
from typing import Any, Literal, TypedDict
from numpy import _SupportsWrite
_ErrKind = Literal["ignore", "warn", "raise", "call", "print", "log"]
class _ErrDict(TypedDict):
divide: _ErrKind
over: _ErrKind
under: _ErrKind
invalid: _ErrKind
class _ErrDictOptional(TypedDict, total=False):
all: None | _ErrKind
divide: None | _ErrKind
over: None | _ErrKind
under: None | _ErrKind
invalid: None | _ErrKind
def seterr(
all: None | _ErrKind = ...,
divide: None | _ErrKind = ...,
over: None | _ErrKind = ...,
under: None | _ErrKind = ...,
invalid: None | _ErrKind = ...,
) -> _ErrDict: ...
def geterr() -> _ErrDict: ...
def setbufsize(size: int) -> int: ...
def getbufsize() -> int: ...
def seterrcall(
func: None | _ErrFunc | _SupportsWrite[str]
) -> None | _ErrFunc | _SupportsWrite[str]: ...
def geterrcall() -> None | _ErrFunc | _SupportsWrite[str]: ...
.\numpy\numpy\_core\__init__.py
"""
Contains the core of NumPy: ndarray, ufuncs, dtypes, etc.
Please note that this module is private. All functions and objects
are available in the main ``numpy`` namespace - use that instead.
"""
import os
from numpy.version import version as __version__
env_added = []
for envkey in ['OPENBLAS_MAIN_FREE', 'GOTOBLAS_MAIN_FREE']:
if envkey not in os.environ:
os.environ[envkey] = '1'
env_added.append(envkey)
try:
from . import multiarray
except ImportError as exc:
import sys
msg = """
IMPORTANT: PLEASE READ THIS FOR ADVICE ON HOW TO SOLVE THIS ISSUE!
Importing the numpy C-extensions failed. This error can happen for
many reasons, often due to issues with your setup or how NumPy was
installed.
We have compiled some common reasons and troubleshooting tips at:
https://numpy.org/devdocs/user/troubleshooting-importerror.html
Please note and check the following:
* The Python version is: Python%d.%d from "%s"
* The NumPy version is: "%s"
and make sure that they are the versions you expect.
Please carefully study the documentation linked above for further help.
Original error was: %s
""" % (sys.version_info[0], sys.version_info[1], sys.executable,
__version__, exc)
raise ImportError(msg)
finally:
for envkey in env_added:
del os.environ[envkey]
del envkey
del env_added
del os
from . import umath
if not (hasattr(multiarray, '_multiarray_umath') and
hasattr(umath, '_multiarray_umath')):
import sys
path = sys.modules['numpy'].__path__
msg = ("Something is wrong with the numpy installation. "
"While importing we detected an older version of "
"numpy in {}. One method of fixing this is to repeatedly uninstall "
"numpy until none is found, then reinstall this version.")
raise ImportError(msg.format(path))
from . import numerictypes as nt
from .numerictypes import sctypes, sctypeDict
multiarray.set_typeDict(nt.sctypeDict)
from . import numeric
from .numeric import *
from . import fromnumeric
from .fromnumeric import *
from .records import record, recarray
from .memmap import *
from . import function_base
from .function_base import *
from . import _machar
from . import getlimits
from .getlimits import *
from . import shape_base
from .shape_base import *
from . import einsumfunc
from .einsumfunc import *
del nt
from .numeric import absolute as abs
from . import _add_newdocs
from . import _add_newdocs_scalars
from . import _dtype_ctypes
from . import _internal
from . import _dtype
from . import _methods
acos = numeric.arccos
acosh = numeric.arccosh
asin = numeric.arcsin
asinh = numeric.arcsinh
atan = numeric.arctan
atanh = numeric.arctanh
atan2 = numeric.arctan2
concat = numeric.concatenate
bitwise_left_shift = numeric.left_shift
bitwise_invert = numeric.invert
bitwise_right_shift = numeric.right_shift
permute_dims = numeric.transpose
pow = numeric.power
__all__ = [
"abs", "acos", "acosh", "asin", "asinh", "atan", "atanh", "atan2",
"bitwise_invert", "bitwise_left_shift", "bitwise_right_shift", "concat",
"pow", "permute_dims", "memmap", "sctypeDict", "record", "recarray"
]
__all__ += numeric.__all__
__all__ += function_base.__all__
__all__ += getlimits.__all__
__all__ += shape_base.__all__
__all__ += einsumfunc.__all__
def _ufunc_reduce(func):
return func.__name__
def _DType_reconstruct(scalar_type):
return type(dtype(scalar_type))
def _DType_reduce(DType):
if not DType._legacy or DType.__module__ == "numpy.dtypes":
return DType.__name__
scalar_type = DType.type
return _DType_reconstruct, (scalar_type,)
def __getattr__(name):
if name == "MachAr":
import warnings
warnings.warn(
"The `np._core.MachAr` is considered private API (NumPy 1.24)",
DeprecationWarning, stacklevel=2,
)
return _machar.MachAr
raise AttributeError(f"Module {__name__!r} has no attribute {name!r}")
import copyreg
copyreg.pickle(ufunc, _ufunc_reduce)
copyreg.pickle(type(dtype), _DType_reduce, _DType_reconstruct)
del copyreg, _ufunc_reduce, _DType_reduce
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
.\numpy\numpy\_core\__init__.pyi
.\numpy\numpy\_distributor_init.py
"""
Distributor init file
Distributors: you can add custom code here to support particular distributions
of numpy.
For example, this is a good place to put any BLAS/LAPACK initialization code.
The numpy standard source distribution will not put code in this file, so you
can safely replace this file with your own version.
"""
try:
from . import _distributor_init_local
except ImportError:
pass
.\numpy\numpy\_expired_attrs_2_0.py
"""
Dict of expired attributes that are discontinued since 2.0 release.
Each item is associated with a migration note.
"""
__expired_attributes__ = {
"geterrobj": "Use the np.errstate context manager instead.",
"seterrobj": "Use the np.errstate context manager instead.",
"cast": "Use `np.asarray(arr, dtype=dtype)` instead.",
"source": "Use `inspect.getsource` instead.",
"lookfor": "Search NumPy's documentation directly.",
"who": "Use an IDE variable explorer or `locals()` instead.",
"fastCopyAndTranspose": "Use `arr.T.copy()` instead.",
"set_numeric_ops":
"For the general case, use `PyUFunc_ReplaceLoopBySignature`. "
"For ndarray subclasses, define the ``__array_ufunc__`` method "
"and override the relevant ufunc.",
"NINF": "Use `-np.inf` instead.",
"PINF": "Use `np.inf` instead.",
"NZERO": "Use `-0.0` instead.",
"PZERO": "Use `0.0` instead.",
"add_newdoc":
"It's still available as `np.lib.add_newdoc`.",
"add_docstring":
"It's still available as `np.lib.add_docstring`.",
"add_newdoc_ufunc":
"It's an internal function and doesn't have a replacement.",
"compat": "There's no replacement, as Python 2 is no longer supported.",
"safe_eval": "Use `ast.literal_eval` instead.",
"float_": "Use `np.float64` instead.",
"complex_": "Use `np.complex128` instead.",
"longfloat": "Use `np.longdouble` instead.",
"singlecomplex": "Use `np.complex64` instead.",
"cfloat": "Use `np.complex128` instead.",
"longcomplex": "Use `np.clongdouble` instead.",
"clongfloat": "Use `np.clongdouble` instead.",
"string_": "Use `np.bytes_` instead.",
"unicode_": "Use `np.str_` instead.",
"Inf": "Use `np.inf` instead.",
"Infinity": "Use `np.inf` instead.",
"NaN": "Use `np.nan` instead.",
"infty": "Use `np.inf` instead.",
"issctype": "Use `issubclass(rep, np.generic)` instead.",
"maximum_sctype":
"Use a specific dtype instead. You should avoid relying "
"on any implicit mechanism and select the largest dtype of "
"a kind explicitly in the code.",
"obj2sctype": "Use `np.dtype(obj).type` instead.",
"sctype2char": "Use `np.dtype(obj).char` instead.",
"sctypes": "Access dtypes explicitly instead.",
"issubsctype": "Use `np.issubdtype` instead.",
"set_string_function":
"Use `np.set_printoptions` instead with a formatter for "
"custom printing of NumPy objects.",
"asfarray": "Use `np.asarray` with a proper dtype instead.",
"issubclass_": "Use `issubclass` builtin instead.",
"tracemalloc_domain": "It's now available from `np.lib`.",
"mat": "Use `np.asmatrix` instead.",
"recfromcsv": "Use `np.genfromtxt` with comma delimiter instead.",
"recfromtxt": "Use `np.genfromtxt` instead.",
"deprecate": "Emit `DeprecationWarning` with `warnings.warn` directly, "
"or use `typing.deprecated`.",
"deprecate_with_doc": "Emit `DeprecationWarning` with `warnings.warn` "
"directly, or use `typing.deprecated`.",
"disp": "Use your own printing function instead.",
"find_common_type":
"Use `numpy.promote_types` or `numpy.result_type` instead. "
"To achieve semantics for the `scalar_types` argument, use "
"`numpy.result_type` and pass the Python values `0`, `0.0`, or `0j`.",
"round_": "Use `np.round` instead.",
"get_array_wrap": "",
"DataSource": "It's still available as `np.lib.npyio.DataSource`.",
"nbytes": "Use `np.dtype(<dtype>).itemsize` instead.",
"byte_bounds": "Now it's available under `np.lib.array_utils.byte_bounds`",
"compare_chararrays":
"It's still available as `np.char.compare_chararrays`.",
"format_parser": "It's still available as `np.rec.format_parser`."
}
注释:
.\numpy\numpy\_globals.py
"""
Module defining global singleton classes.
This module raises a RuntimeError if an attempt to reload it is made. In that
way the identities of the classes defined here are fixed and will remain so
even if numpy itself is reloaded. In particular, a function like the following
will still work correctly after numpy is reloaded::
def foo(arg=np._NoValue):
if arg is np._NoValue:
...
That was not the case when the singleton classes were defined in the numpy
``__init__.py`` file. See gh-7844 for a discussion of the reload problem that
motivated this module.
"""
import enum
from ._utils import set_module as _set_module
__all__ = ['_NoValue', '_CopyMode']
if '_is_loaded' in globals():
raise RuntimeError('Reloading numpy._globals is not allowed')
_is_loaded = True
class _NoValueType:
"""Special keyword value.
The instance of this class may be used as the default value assigned to a
keyword if no other obvious default (e.g., `None`) is suitable,
Common reasons for using this keyword are:
- A new keyword is added to a function, and that function forwards its
inputs to another function or method which can be defined outside of
NumPy. For example, ``np.std(x)`` calls ``x.std``, so when a ``keepdims``
keyword was added that could only be forwarded if the user explicitly
specified ``keepdims``; downstream array libraries may not have added
the same keyword, so adding ``x.std(..., keepdims=keepdims)``
unconditionally could have broken previously working code.
- A keyword is being deprecated, and a deprecation warning must only be
emitted when the keyword is used.
"""
__instance = None
def __new__(cls):
if not cls.__instance:
cls.__instance = super().__new__(cls)
return cls.__instance
def __repr__(self):
return "<no value>"
_NoValue = _NoValueType()
@_set_module("numpy")
class _CopyMode(enum.Enum):
"""
An enumeration for the copy modes supported
by numpy.copy() and numpy.array(). The following three modes are supported,
- ALWAYS: This means that a deep copy of the input
array will always be taken.
- IF_NEEDED: This means that a deep copy of the input
array will be taken only if necessary.
- NEVER: This means that the deep copy will never be taken.
If a copy cannot be avoided then a `ValueError` will be
raised.
Note that the buffer-protocol could in theory do copies. NumPy currently
assumes an object exporting the buffer protocol will never do this.
"""
ALWAYS = True
NEVER = False
IF_NEEDED = 2
def __bool__(self):
if self == _CopyMode.ALWAYS:
return True
if self == _CopyMode.NEVER:
return False
raise ValueError(f"{self} is neither True nor False.")
.\numpy\numpy\_pyinstaller\hook-numpy.py
"""This hook should collect all binary files and any hidden modules that numpy
needs.
Our (some-what inadequate) docs for writing PyInstaller hooks are kept here:
https://pyinstaller.readthedocs.io/en/stable/hooks.html
"""
from PyInstaller.compat import is_conda, is_pure_conda
from PyInstaller.utils.hooks import collect_dynamic_libs, is_module_satisfies
binaries = collect_dynamic_libs("numpy", ".")
if is_pure_conda:
from PyInstaller.utils.hooks import conda_support
datas = conda_support.collect_dynamic_libs("numpy", dependencies=True)
hiddenimports = ['numpy._core._dtype_ctypes', 'numpy._core._multiarray_tests']
excludedimports = [
"scipy",
"pytest",
"f2py",
"setuptools",
"numpy.f2py",
"distutils",
"numpy.distutils",
]
.\numpy\numpy\_pyinstaller\tests\pyinstaller-smoke.py
"""A crude *bit of everything* smoke test to verify PyInstaller compatibility.
PyInstaller typically goes wrong by forgetting to package modules, extension
modules or shared libraries. This script should aim to touch as many of those
as possible in an attempt to trip a ModuleNotFoundError or a DLL load failure
due to an uncollected resource. Missing resources are unlikely to lead to
arithmetic errors so there's generally no need to verify any calculation's
output - merely that it made it to the end OK. This script should not
explicitly import any of numpy's submodules as that gives PyInstaller undue
hints that those submodules exist and should be collected (accessing implicitly
loaded submodules is OK).
"""
import numpy as np
a = np.arange(1., 10.).reshape((3, 3)) % 5
np.linalg.det(a)
a @ a
a @ a.T
np.linalg.inv(a)
np.sin(np.exp(a))
np.linalg.svd(a)
np.linalg.eigh(a)
np.unique(np.random.randint(0, 10, 100))
np.sort(np.random.uniform(0, 10, 100))
np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
np.ma.masked_array(np.arange(10), np.random.rand(10) < .5).sum()
np.polynomial.Legendre([7, 8, 9]).roots()
print("I made it!")
.\numpy\numpy\_pyinstaller\tests\test_pyinstaller.py
import subprocess
from pathlib import Path
import pytest
@pytest.mark.filterwarnings('ignore::DeprecationWarning')
@pytest.mark.filterwarnings('ignore::ResourceWarning')
@pytest.mark.parametrize("mode", ["--onedir", "--onefile"])
@pytest.mark.slow
def test_pyinstaller(mode, tmp_path):
"""Compile and run pyinstaller-smoke.py using PyInstaller."""
pyinstaller_cli = pytest.importorskip("PyInstaller.__main__").run
source = Path(__file__).with_name("pyinstaller-smoke.py").resolve()
args = [
'--workpath', str(tmp_path / "build"),
'--distpath', str(tmp_path / "dist"),
'--specpath', str(tmp_path),
mode,
str(source),
]
pyinstaller_cli(args)
if mode == "--onefile":
exe = tmp_path / "dist" / source.stem
else:
exe = tmp_path / "dist" / source.stem / source.stem
p = subprocess.run([str(exe)], check=True, stdout=subprocess.PIPE)
assert p.stdout.strip() == b"I made it!"
.\numpy\numpy\_pyinstaller\tests\__init__.py
from numpy.testing import IS_WASM, IS_EDITABLE
if IS_WASM:
pytest.skip(
"WASM/Pyodide does not use or support Fortran",
allow_module_level=True
)
if IS_EDITABLE:
pytest.skip(
"Editable install doesn't support tests with a compile step",
allow_module_level=True
)
.\numpy\numpy\_pyinstaller\__init__.py
from datetime import datetime, timedelta
def add_days(date_str, days):
date = datetime.strptime(date_str, '%Y-%m-%d')
delta = timedelta(days=days)
new_date = date + delta
return new_date.strftime('%Y-%m-%d')
.\numpy\numpy\_pytesttester.py
"""
Pytest test running.
This module implements the ``test()`` function for NumPy modules. The usual
boiler plate for doing that is to put the following in the module
``__init__.py`` file::
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
Warnings filtering and other runtime settings should be dealt with in the
``pytest.ini`` file in the numpy repo root. The behavior of the test depends on
whether or not that file is found as follows:
* ``pytest.ini`` is present (develop mode)
All warnings except those explicitly filtered out are raised as error.
* ``pytest.ini`` is absent (release mode)
DeprecationWarnings and PendingDeprecationWarnings are ignored, other
warnings are passed through.
In practice, tests run from the numpy repo are run in development mode with
``spin``, through the standard ``spin test`` invocation or from an inplace
build with ``pytest numpy``.
This module is imported by every numpy subpackage, so lies at the top level to
simplify circular import issues. For the same reason, it contains no numpy
imports at module scope, instead importing numpy within function calls.
"""
import sys
import os
__all__ = ['PytestTester']
class PytestTester:
"""
Pytest test runner.
A test function is typically added to a package's __init__.py like so::
from numpy._pytesttester import PytestTester
test = PytestTester(__name__).test
del PytestTester
Calling this test function finds and runs all tests associated with the
module and all its sub-modules.
Attributes
----------
module_name : str
Full path to the package to test.
Parameters
----------
module_name : module name
The name of the module to test.
Notes
-----
Unlike the previous ``nose``-based implementation, this class is not
publicly exposed as it performs some ``numpy``-specific warning
suppression.
"""
def __init__(self, module_name):
self.module_name = module_name
.\numpy\numpy\_pytesttester.pyi
from collections.abc import Iterable
from typing import Literal as L
__all__: list[str]
class PytestTester:
module_name: str
def __init__(self, module_name: str) -> None: ...
def __call__(
self,
label: L["fast", "full"] = ...,
verbose: int = ...,
extra_argv: None | Iterable[str] = ...,
doctests: L[False] = ...,
coverage: bool = ...,
durations: int = ...,
tests: None | Iterable[str] = ...,
) -> bool: ...
.\numpy\numpy\_typing\_add_docstring.py
"""A module for creating docstrings for sphinx ``data`` domains."""
import re
import textwrap
from ._array_like import NDArray
_docstrings_list = []
def add_newdoc(name: str, value: str, doc: str) -> None:
"""Append ``_docstrings_list`` with a docstring for `name`.
Parameters
----------
name : str
The name of the object.
value : str
A string-representation of the object.
doc : str
The docstring of the object.
"""
_docstrings_list.append((name, value, doc))
def _parse_docstrings() -> str:
"""Convert all docstrings in ``_docstrings_list`` into a single
sphinx-legible text block.
"""
type_list_ret = []
for name, value, doc in _docstrings_list:
s = textwrap.dedent(doc).replace("\n", "\n ")
lines = s.split("\n")
new_lines = []
indent = ""
for line in lines:
m = re.match(r'^(\s+)[-=]+\s*$', line)
if m and new_lines:
prev = textwrap.dedent(new_lines.pop())
if prev == "Examples":
indent = ""
new_lines.append(f'{m.group(1)}.. rubric:: {prev}')
else:
indent = 4 * " "
new_lines.append(f'{m.group(1)}.. admonition:: {prev}')
new_lines.append("")
else:
new_lines.append(f"{indent}{line}")
s = "\n".join(new_lines)
s_block = f""".. data:: {name}\n :value: {value}\n {s}"""
type_list_ret.append(s_block)
return "\n".join(type_list_ret)
add_newdoc('ArrayLike', 'typing.Union[...]',
"""
A `~typing.Union` representing objects that can be coerced
into an `~numpy.ndarray`.
Among others this includes the likes of:
* Scalars.
* (Nested) sequences.
* Objects implementing the `~class.__array__` protocol.
.. versionadded:: 1.20
See Also
--------
:term:`array_like`:
Any scalar or sequence that can be interpreted as an ndarray.
Examples
--------
.. code-block:: python
>>> import numpy as np
>>> import numpy.typing as npt
>>> def as_array(a: npt.ArrayLike) -> np.ndarray:
... return np.array(a)
""")
add_newdoc('DTypeLike', 'typing.Union[...]',
"""
A `~typing.Union` representing objects that can be coerced
into a `~numpy.dtype`.
Among others this includes the likes of:
* :class:`type` objects.
* Character codes or the names of :class:`type` objects.
* Objects with the ``.dtype`` attribute.
.. versionadded:: 1.20
See Also
--------
:ref:`Specifying and constructing data types <arrays.dtypes.constructing>`
A comprehensive overview of all objects that can be coerced
into data types.
Examples
--------
"""
import numpy as np
import numpy.typing as npt
def as_dtype(d: npt.DTypeLike) -> np.dtype:
return np.dtype(d)
add_newdoc('NDArray', repr(NDArray),
"""
定义一个 `np.ndarray[Any, np.dtype[+ScalarType]] <numpy.ndarray>` 类型的别名,
其中 `dtype.type <numpy.dtype.type>` 是关于泛型类型的概念。
可以在运行时用于定义具有给定 dtype 和未指定形状的数组。
.. versionadded:: 1.21
示例
--------
.. code-block:: python
>>> import numpy as np
>>> import numpy.typing as npt
>>> print(npt.NDArray)
numpy.ndarray[typing.Any, numpy.dtype[+_ScalarType_co]]
>>> print(npt.NDArray[np.float64])
numpy.ndarray[typing.Any, numpy.dtype[numpy.float64]]
>>> NDArrayInt = npt.NDArray[np.int_]
>>> a: NDArrayInt = np.arange(10)
>>> def func(a: npt.ArrayLike) -> npt.NDArray[Any]:
... return np.array(a)
"""
)
_docstrings = _parse_docstrings()
.\numpy\numpy\_typing\_array_like.py
from __future__ import annotations
import sys
from collections.abc import Collection, Callable, Sequence
from typing import Any, Protocol, Union, TypeVar, runtime_checkable
import numpy as np
from numpy import (
ndarray,
dtype,
generic,
unsignedinteger,
integer,
floating,
complexfloating,
number,
timedelta64,
datetime64,
object_,
void,
str_,
bytes_,
)
from ._nested_sequence import _NestedSequence
_T = TypeVar("_T")
_ScalarType = TypeVar("_ScalarType", bound=generic)
_ScalarType_co = TypeVar("_ScalarType_co", bound=generic, covariant=True)
_DType = TypeVar("_DType", bound=dtype[Any])
_DType_co = TypeVar("_DType_co", covariant=True, bound=dtype[Any])
NDArray = ndarray[Any, dtype[_ScalarType_co]]
@runtime_checkable
class _SupportsArray(Protocol[_DType_co]):
def __array__(self) -> ndarray[Any, _DType_co]: ...
@runtime_checkable
class _SupportsArrayFunc(Protocol):
"""A protocol class representing `~class.__array_function__`."""
def __array_function__(
self,
func: Callable[..., Any],
types: Collection[type[Any]],
args: tuple[Any, ...],
kwargs: dict[str, Any],
) -> object: ...
_FiniteNestedSequence = Union[
_T,
Sequence[_T],
Sequence[Sequence[_T]],
Sequence[Sequence[Sequence[_T]]],
Sequence[Sequence[Sequence[Sequence[_T]]]],
]
_ArrayLike = Union[
_SupportsArray[dtype[_ScalarType]],
_NestedSequence[_SupportsArray[dtype[_ScalarType]]],
]
_DualArrayLike = Union[
_SupportsArray[_DType],
_NestedSequence[_SupportsArray[_DType]],
_T,
_NestedSequence[_T],
]
if sys.version_info >= (3, 12):
from collections.abc import Buffer
ArrayLike = Buffer | _DualArrayLike[
dtype[Any],
Union[bool, int, float, complex, str, bytes],
]
else:
ArrayLike = _DualArrayLike[
dtype[Any],
Union[bool, int, float, complex, str, bytes],
]
_ArrayLikeBool_co = _DualArrayLike[
dtype[np.bool],
bool,
]
_ArrayLikeUInt_co = _DualArrayLike[
dtype[Union[np.bool, unsignedinteger[Any]]],
bool,
]
_ArrayLikeInt_co = _DualArrayLike[
dtype[Union[np.bool, integer[Any]]],
Union[bool, int],
]
_ArrayLikeFloat_co = _DualArrayLike[
dtype[Union[np.bool, integer[Any], floating[Any]]],
Union[bool, int, float],
_ArrayLikeComplex_co = _DualArrayLike[
dtype[Union[
np.bool,
integer[Any],
floating[Any],
complexfloating[Any, Any],
]],
Union[bool, int, float, complex],
]
_ArrayLikeNumber_co = _DualArrayLike[
dtype[Union[np.bool, number[Any]]],
Union[bool, int, float, complex],
]
_ArrayLikeTD64_co = _DualArrayLike[
dtype[Union[np.bool, integer[Any], timedelta64]],
Union[bool, int],
]
_ArrayLikeDT64_co = Union[
_SupportsArray[dtype[datetime64]],
_NestedSequence[_SupportsArray[dtype[datetime64]]],
]
_ArrayLikeObject_co = Union[
_SupportsArray[dtype[object_]],
_NestedSequence[_SupportsArray[dtype[object_]]],
]
_ArrayLikeVoid_co = Union[
_SupportsArray[dtype[void]],
_NestedSequence[_SupportsArray[dtype[void]]],
]
_ArrayLikeStr_co = _DualArrayLike[
dtype[str_],
str,
]
_ArrayLikeBytes_co = _DualArrayLike[
dtype[bytes_],
bytes,
]
_ArrayLikeInt = _DualArrayLike[
dtype[integer[Any]],
int,
]
class _UnknownType:
...
_ArrayLikeUnknown = _DualArrayLike[
dtype[_UnknownType],
_UnknownType,
]
.\numpy\numpy\_typing\_callable.pyi
"""
A module with various ``typing.Protocol`` subclasses that implement
the ``__call__`` magic method.
See the `Mypy documentation`_ on protocols for more details.
.. _`Mypy documentation`: https://mypy.readthedocs.io/en/stable/protocols.html#callback-protocols
"""
from __future__ import annotations
from typing import (
TypeVar,
overload,
Any,
NoReturn,
Protocol,
)
import numpy as np
from numpy import (
generic,
timedelta64,
number,
integer,
unsignedinteger,
signedinteger,
int8,
int_,
floating,
float64,
complexfloating,
complex128,
)
from ._nbit import _NBitInt, _NBitDouble
from ._scalars import (
_BoolLike_co,
_IntLike_co,
_FloatLike_co,
_NumberLike_co,
)
from . import NBitBase
from ._array_like import NDArray
from ._nested_sequence import _NestedSequence
_T1 = TypeVar("_T1")
_T2 = TypeVar("_T2")
_T1_contra = TypeVar("_T1_contra", contravariant=True)
_T2_contra = TypeVar("_T2_contra", contravariant=True)
_2Tuple = tuple[_T1, _T1]
_NBit1 = TypeVar("_NBit1", bound=NBitBase)
_NBit2 = TypeVar("_NBit2", bound=NBitBase)
_IntType = TypeVar("_IntType", bound=integer)
_FloatType = TypeVar("_FloatType", bound=floating)
_NumberType = TypeVar("_NumberType", bound=number)
_NumberType_co = TypeVar("_NumberType_co", covariant=True, bound=number)
_GenericType_co = TypeVar("_GenericType_co", covariant=True, bound=generic)
class _BoolOp(Protocol[_GenericType_co]):
@overload
def __call__(self, other: _BoolLike_co, /) -> _GenericType_co: ...
@overload
def __call__(self, other: int, /) -> int_: ...
@overload
def __call__(self, other: float, /) -> float64: ...
@overload
def __call__(self, other: complex, /) -> complex128: ...
@overload
def __call__(self, other: _NumberType, /) -> _NumberType: ...
class _BoolBitOp(Protocol[_GenericType_co]):
@overload
def __call__(self, other: _BoolLike_co, /) -> _GenericType_co: ...
@overload
def __call__(self, other: int, /) -> int_: ...
@overload
def __call__(self, other: _IntType, /) -> _IntType: ...
class _BoolSub(Protocol):
@overload
def __call__(self, other: bool, /) -> NoReturn: ...
@overload
def __call__(self, other: int, /) -> int_: ...
@overload
def __call__(self, other: float, /) -> float64: ...
@overload
def __call__(self, other: complex, /) -> complex128: ...
@overload
def __call__(self, other: _NumberType, /) -> _NumberType: ...
class _BoolTrueDiv(Protocol):
@overload
def __call__(self, other: float | _IntLike_co, /) -> float64: ...
@overload
def __call__(self, other: complex, /) -> complex128: ...
@overload
def __call__(self, other: _NumberType, /) -> _NumberType: ...
class _BoolMod(Protocol):
@overload
def __call__(self, other: _BoolLike_co, /) -> int8: ...
@overload
def __call__(self, other: int, /) -> int_: ...
@overload
def __call__(self, other: float, /) -> float64: ...
@overload
def __call__(self, other: _IntType, /) -> _IntType: ...
@overload
def __call__(self, other: _FloatType, /) -> _FloatType: ...
class _BoolDivMod(Protocol):
@overload
def __call__(self, other: _BoolLike_co, /) -> _2Tuple[int8]: ...
@overload
def __call__(self, other: int, /) -> _2Tuple[int_]: ...
@overload
def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ...
@overload
def __call__(self, other: _IntType, /) -> _2Tuple[_IntType]: ...
@overload
def __call__(self, other: _FloatType, /) -> _2Tuple[_FloatType]: ...
class _TD64Div(Protocol[_NumberType_co]):
@overload
def __call__(self, other: timedelta64, /) -> _NumberType_co: ...
@overload
def __call__(self, other: _BoolLike_co, /) -> NoReturn: ...
@overload
def __call__(self, other: _FloatLike_co, /) -> timedelta64: ...
class _IntTrueDiv(Protocol[_NBit1]):
@overload
def __call__(self, other: bool, /) -> floating[_NBit1]: ...
@overload
def __call__(self, other: int, /) -> floating[_NBit1 | _NBitInt]: ...
@overload
def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ...
@overload
def __call__(
self, other: complex, /,
) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ...
@overload
def __call__(self, other: integer[_NBit2], /) -> floating[_NBit1 | _NBit2]: ...
class _UnsignedIntOp(Protocol[_NBit1]):
@overload
def __call__(self, other: bool, /) -> unsignedinteger[_NBit1]: ...
@overload
def __call__(
self, other: int | signedinteger[Any], /
) -> Any: ...
@overload
def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ...
@overload
def __call__(
self, other: complex, /,
) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ...
@overload
def __call__(
self, other: unsignedinteger[_NBit2], /
) -> unsignedinteger[_NBit1 | _NBit2]: ...
class _UnsignedIntBitOp(Protocol[_NBit1]):
@overload
def __call__(self, other: bool, /) -> unsignedinteger[_NBit1]: ...
@overload
def __call__(self, other: int, /) -> signedinteger[Any]: ...
@overload
def __call__(self, other: signedinteger[Any], /) -> signedinteger[Any]: ...
@overload
def __call__(
self, other: unsignedinteger[_NBit2], /
) -> unsignedinteger[_NBit1 | _NBit2]: ...
class _UnsignedIntMod(Protocol[_NBit1]):
@overload
def __call__(self, other: bool, /) -> unsignedinteger[_NBit1]: ...
@overload
def __call__(
self, other: int | signedinteger[Any], /
) -> Any: ...
@overload
def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ...
@overload
def __call__(
self, other: unsignedinteger[_NBit2], /
) -> unsignedinteger[_NBit1 | _NBit2]: ...
class _UnsignedIntDivMod(Protocol[_NBit1]):
@overload
def __call__(self, other: bool, /) -> _2Tuple[signedinteger[_NBit1]]: ...
@overload
def __call__(
self, other: int | signedinteger[Any], /
) -> _2Tuple[Any]: ...
@overload
def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ...
@overload
def __call__(
self, other: unsignedinteger[_NBit2], /
) -> _2Tuple[unsignedinteger[_NBit1 | _NBit2]]: ...
class _SignedIntOp(Protocol[_NBit1]):
@overload
def __call__(self, other: bool, /) -> signedinteger[_NBit1]: ...
@overload
def __call__(self, other: int, /) -> signedinteger[_NBit1 | _NBitInt]: ...
@overload
def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ...
@overload
def __call__(
self, other: complex, /,
) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ...
@overload
def __call__(
self, other: signedinteger[_NBit2], /,
) -> signedinteger[_NBit1 | _NBit2]: ...
class _SignedIntBitOp(Protocol[_NBit1]):
@overload
def __call__(self, other: bool, /) -> signedinteger[_NBit1]: ...
@overload
def __call__(self, other: int, /) -> signedinteger[_NBit1 | _NBitInt]: ...
@overload
def __call__(
self, other: signedinteger[_NBit2], /,
) -> signedinteger[_NBit1 | _NBit2]: ...
class _SignedIntMod(Protocol[_NBit1]):
@overload
def __call__(self, other: bool, /) -> signedinteger[_NBit1]: ...
@overload
def __call__(self, other: int, /) -> signedinteger[_NBit1 | _NBitInt]: ...
@overload
def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ...
@overload
def __call__(
self, other: signedinteger[_NBit2], /,
) -> signedinteger[_NBit1 | _NBit2]: ...
class _SignedIntDivMod(Protocol[_NBit1]):
@overload
def __call__(self, other: bool, /) -> _2Tuple[signedinteger[_NBit1]]: ...
@overload
def __call__(self, other: int, /) -> _2Tuple[signedinteger[_NBit1 | _NBitInt]]: ...
@overload
def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ...
@overload
def __call__(
self, other: signedinteger[_NBit2], /,
) -> _2Tuple[signedinteger[_NBit1 | _NBit2]]: ...
class _FloatOp(Protocol[_NBit1]):
@overload
def __call__(self, other: bool, /) -> floating[_NBit1]: ...
@overload
def __call__(self, other: int, /) -> floating[_NBit1 | _NBitInt]: ...
@overload
def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ...
@overload
def __call__(
self, other: complex, /,
) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ...
@overload
def __call__(
self, other: integer[_NBit2] | floating[_NBit2], /
) -> floating[_NBit1 | _NBit2]: ...
class _FloatMod(Protocol[_NBit1]):
@overload
def __call__(self, other: bool, /) -> floating[_NBit1]: ...
@overload
def __call__(self, other: int, /) -> floating[_NBit1 | _NBitInt]: ...
@overload
def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ...
@overload
def __call__(
self, other: integer[_NBit2] | floating[_NBit2], /
) -> floating[_NBit1 | _NBit2]: ...
class _FloatDivMod(Protocol[_NBit1]):
def __call__(self, other: int, /) -> _2Tuple[floating[_NBit1 | _NBitInt]]: ...
@overload
def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ...
@overload
def __call__(
self, other: integer[_NBit2] | floating[_NBit2], /
) -> _2Tuple[floating[_NBit1 | _NBit2]]: ...
class _ComplexOp(Protocol[_NBit1]):
@overload
def __call__(self, other: bool, /) -> complexfloating[_NBit1, _NBit1]: ...
@overload
def __call__(self, other: int, /) -> complexfloating[_NBit1 | _NBitInt, _NBit1 | _NBitInt]: ...
@overload
def __call__(
self, other: complex, /,
) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ...
@overload
def __call__(
self,
other: (
integer[_NBit2]
| floating[_NBit2]
| complexfloating[_NBit2, _NBit2]
), /,
) -> complexfloating[_NBit1 | _NBit2, _NBit1 | _NBit2]: ...
class _NumberOp(Protocol):
def __call__(self, other: _NumberLike_co, /) -> Any: ...
class _SupportsLT(Protocol):
def __lt__(self, other: Any, /) -> object: ...
class _SupportsGT(Protocol):
def __gt__(self, other: Any, /) -> object: ...
class _ComparisonOp(Protocol[_T1_contra, _T2_contra]):
@overload
def __call__(self, other: _T1_contra, /) -> np.bool: ...
@overload
def __call__(self, other: _T2_contra, /) -> NDArray[np.bool]: ...
@overload
def __call__(
self,
other: _SupportsLT | _SupportsGT | _NestedSequence[_SupportsLT | _SupportsGT],
/,
) -> Any: ...