NumPy 源码解析(十九)
.\numpy\numpy\lib\_nanfunctions_impl.pyi
from numpy._core.fromnumeric import (
amin,
amax,
argmin,
argmax,
sum,
prod,
cumsum,
cumprod,
mean,
var,
std
)
from numpy.lib._function_base_impl import (
median,
percentile,
quantile
)
__all__: list[str]
nanmin = amin
nanmax = amax
nanargmin = argmin
nanargmax = argmax
nansum = sum
nanprod = prod
nancumsum = cumsum
nancumprod = cumprod
nanmean = mean
nanvar = var
nanstd = std
nanmedian = median
nanpercentile = percentile
nanquantile = quantile
.\numpy\numpy\lib\_npyio_impl.py
"""
IO related functions.
"""
import os
import re
import functools
import itertools
import warnings
import weakref
import contextlib
import operator
from operator import itemgetter, index as opindex, methodcaller
from collections.abc import Mapping
import pickle
import numpy as np
from . import format
from ._datasource import DataSource
from numpy._core import overrides
from numpy._core.multiarray import packbits, unpackbits
from numpy._core._multiarray_umath import _load_from_filelike
from numpy._core.overrides import set_array_function_like_doc, set_module
from ._iotools import (
LineSplitter, NameValidator, StringConverter, ConverterError,
ConverterLockError, ConversionWarning, _is_string_like,
has_nested_fields, flatten_dtype, easy_dtype, _decode_line
)
from numpy._utils import asunicode, asbytes
__all__ = [
'savetxt', 'loadtxt', 'genfromtxt', 'load', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex'
]
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
class BagObj:
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib._npyio_impl import BagObj as BO
>>> class BagDemo:
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
self._obj = weakref.proxy(obj)
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError(key) from None
def __dir__(self):
"""
Enables dir(bagobj) to list the files in an NpzFile.
This also enables tab-completion in an interpreter or IPython.
"""
return list(object.__getattribute__(self, '_obj').keys())
def zipfile_factory(file, *args, **kwargs):
"""
Create a ZipFile.
Allows for Zip64, and the `file` argument can accept file, str, or
pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile
constructor.
"""
if not hasattr(file, 'read'):
file = os.fspath(file)
import zipfile
kwargs['allowZip64'] = True
return zipfile.ZipFile(file, *args, **kwargs)
@set_module('numpy.lib.npyio')
class NpzFile(Mapping):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ``.npy`` extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ``.npy`` extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ``.npy`` extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
allow_pickle : bool, optional
Allow loading pickled data. Default: False
.. versionchanged:: 1.16.3
Made default False in response to CVE-2019-6446.
pickle_kwargs : dict, optional
Additional keyword arguments to pass on to pickle.load.
These are only useful when loading object arrays saved on
Python 2 when using Python 3.
max_header_size : int, optional
Maximum allowed size of the header. Large headers may not be safe
to load securely and thus require explicitly passing a larger value.
See :py:func:`ast.literal_eval()` for details.
This option is ignored when `allow_pickle` is passed. In that case
the file is by definition trusted and the limit is unnecessary.
Parameters
----------
fid : file, str, or pathlib.Path
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
own_fid : bool, optional
Whether NpzFile should close the file handle.
Requires that `fid` is a file-like object.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> _ = outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.npyio.NpzFile)
True
>>> npz
NpzFile 'object' with keys: x, y
>>> sorted(npz.files)
['x', 'y']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
zip = None
fid = None
_MAX_REPR_ARRAY_COUNT = 5
def __init__(self, fid, own_fid=False, allow_pickle=False,
pickle_kwargs=None, *,
max_header_size=format._MAX_HEADER_SIZE):
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
self.allow_pickle = allow_pickle
self.max_header_size = max_header_size
self.pickle_kwargs = pickle_kwargs
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
if own_fid:
self.fid = fid
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""
Close the file.
"""
if self.zip is not None:
self.zip.close()
self.zip = None
if self.fid is not None:
self.fid.close()
self.fid = None
self.f = None
def __del__(self):
self.close()
def __iter__(self):
return iter(self.files)
def __len__(self):
return len(self.files)
def __getitem__(self, key):
member = False
if key in self._files:
member = True
elif key in self.files:
member = True
key += '.npy'
if member:
bytes = self.zip.open(key)
magic = bytes.read(len(format.MAGIC_PREFIX))
bytes.close()
if magic == format.MAGIC_PREFIX:
bytes = self.zip.open(key)
return format.read_array(bytes,
allow_pickle=self.allow_pickle,
pickle_kwargs=self.pickle_kwargs,
max_header_size=self.max_header_size)
else:
return self.zip.read(key)
else:
raise KeyError(f"{key} is not a file in the archive")
def __contains__(self, key):
return (key in self._files or key in self.files)
def __repr__(self):
if isinstance(self.fid, str):
filename = self.fid
else:
filename = getattr(self.fid, "name", "object")
array_names = ', '.join(self.files[:self._MAX_REPR_ARRAY_COUNT])
if len(self.files) > self._MAX_REPR_ARRAY_COUNT:
array_names += "..."
return f"NpzFile {filename!r} with keys: {array_names}"
def get(self, key, default=None, /):
"""
返回键为 k 的值,如果 k 存在于 D 中;否则返回 d,默认为 None。
"""
return Mapping.get(self, key, default)
def items(self):
"""
返回提供视图的类似集合的对象,该视图显示 D 的项
"""
return Mapping.items(self)
def keys(self):
"""
返回提供视图的类似集合的对象,该视图显示 D 的键
"""
return Mapping.keys(self)
def values(self):
"""
返回提供视图的类似集合的对象,该视图显示 D 的值
"""
return Mapping.values(self)
@set_module('numpy')
def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True,
encoding='ASCII', *, max_header_size=format._MAX_HEADER_SIZE):
"""
Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
.. warning:: Loading files that contain object arrays uses the ``pickle``
module, which is not secure against erroneous or maliciously
constructed data. Consider passing ``allow_pickle=False`` to
load data that is known not to contain object arrays for the
safer handling of untrusted sources.
Parameters
----------
file : file-like object, string, or pathlib.Path
The file to read. File-like objects must support the
``seek()`` and ``read()`` methods and must always
be opened in binary mode. Pickled files require that the
file-like object support the ``readline()`` method as well.
mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode (see
`numpy.memmap` for a detailed description of the modes). A
memory-mapped array is kept on disk. However, it can be accessed
and sliced like any ndarray. Memory mapping is especially useful
for accessing small fragments of large files without reading the
entire file into memory.
allow_pickle : bool, optional
Allow loading pickled object arrays stored in npy files. Reasons for
disallowing pickles include security, as loading pickled data can
execute arbitrary code. If pickles are disallowed, loading object
arrays will fail. Default: False
.. versionchanged:: 1.16.3
Made default False in response to CVE-2019-6446.
fix_imports : bool, optional
Only useful when loading Python 2 generated pickled files on Python 3,
which includes npy/npz files containing object arrays. If `fix_imports`
is True, pickle will try to map the old Python 2 names to the new names
used in Python 3.
encoding : str, optional
What encoding to use when reading Python 2 strings. Only useful when
loading Python 2 generated pickled files in Python 3, which includes
npy/npz files containing object arrays. Values other than 'latin1',
'ASCII', and 'bytes' are not allowed, as they can corrupt numerical
data. Default: 'ASCII'
max_header_size : int, optional
Maximum allowed size of the header. Large headers may not be safe
to load securely and thus require explicitly passing a larger value.
See :py:func:`ast.literal_eval()` for details.
This option is ignored when `allow_pickle` is passed. In that case
the file is by definition trusted and the limit is unnecessary.
Returns
-------
"""
result : array, tuple, dict, etc.
存储在文件中的数据。对于 `.npz` 文件,返回的 NpzFile 类的实例必须关闭,以避免泄漏文件描述符。
Raises
------
OSError
如果输入文件不存在或无法读取。
UnpicklingError
如果 `allow_pickle=True`,但文件无法作为 pickle 加载。
ValueError
文件包含对象数组,但给定了 `allow_pickle=False`。
EOFError
在同一文件句柄上多次调用 `np.load` 时,如果已经读取了所有数据。
See Also
--------
save, savez, savez_compressed, loadtxt
memmap : 创建一个内存映射到存储在磁盘上的数组。
lib.format.open_memmap : 创建或加载一个内存映射的 `.npy` 文件。
Notes
-----
- 如果文件包含 pickle 数据,则返回存储在 pickle 中的对象。
- 如果文件是 `.npy` 文件,则返回单个数组。
- 如果文件是 `.npz` 文件,则返回类似字典的对象,包含 `{filename: array}` 键值对,每个键值对对应存档中的一个文件。
- 如果文件是 `.npz` 文件,则返回的值支持上下文管理器协议,类似于 open 函数的用法::
with load('foo.npz') as data:
a = data['a']
当退出 'with' 块时关闭底层文件描述符。
Examples
--------
将数据存储到磁盘并再次加载:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
将压缩数据存储到磁盘并再次加载:
>>> a=np.array([[1, 2, 3], [4, 5, 6]])
>>> b=np.array([1, 2])
>>> np.savez('/tmp/123.npz', a=a, b=b)
>>> data = np.load('/tmp/123.npz')
>>> data['a']
array([[1, 2, 3],
[4, 5, 6]])
>>> data['b']
array([1, 2])
>>> data.close()
内存映射存储的数组,然后直接从磁盘访问第二行:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
if encoding not in ('ASCII', 'latin1', 'bytes'):
raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'")
pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports)
with contextlib.ExitStack() as stack:
if hasattr(file, 'read'):
fid = file
own_fid = False
else:
fid = stack.enter_context(open(os.fspath(file), "rb"))
own_fid = True
_ZIP_PREFIX = b'PK\x03\x04'
_ZIP_SUFFIX = b'PK\x05\x06'
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
if not magic:
raise EOFError("No data left in file")
fid.seek(-min(N, len(magic)), 1)
if magic.startswith(_ZIP_PREFIX) or magic.startswith(_ZIP_SUFFIX):
stack.pop_all()
ret = NpzFile(fid, own_fid=own_fid, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs,
max_header_size=max_header_size)
return ret
elif magic == format.MAGIC_PREFIX:
if mmap_mode:
if allow_pickle:
max_header_size = 2**64
return format.open_memmap(file, mode=mmap_mode,
max_header_size=max_header_size)
else:
return format.read_array(fid, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs,
max_header_size=max_header_size)
else:
if not allow_pickle:
raise ValueError("Cannot load file containing pickled data "
"when allow_pickle=False")
try:
return pickle.load(fid, **pickle_kwargs)
except Exception as e:
raise pickle.UnpicklingError(
f"Failed to interpret file {file!r} as a pickle") from e
@array_function_dispatch(_save_dispatcher)
def save(file, arr, allow_pickle=True, fix_imports=np._NoValue):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file, str, or pathlib.Path
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string or Path,
a ``.npy`` extension will be appended to the filename if it does not
already have one.
arr : array_like
Array data to be saved.
allow_pickle : bool, optional
Allow saving object arrays using Python pickles. Reasons for
disallowing pickles include security (loading pickled data can execute
arbitrary code) and portability (pickled objects may not be loadable
on different Python installations, for example if the stored objects
require libraries that are not available, and not all pickled data is
compatible between different versions of Python).
Default: True
fix_imports : bool, optional
The `fix_imports` flag is deprecated and has no effect.
.. deprecated:: 2.1
This flag is ignored since NumPy 1.17 and was only needed to
support loading some files in Python 2 written in Python 3.
See Also
--------
savez : Save several arrays into a ``.npz`` archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
Any data saved to the file is appended to the end of the file.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> _ = outfile.seek(0) # Only needed to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> with open('test.npy', 'wb') as f:
... np.save(f, np.array([1, 2]))
... np.save(f, np.array([1, 3]))
>>> with open('test.npy', 'rb') as f:
... a = np.load(f)
... b = np.load(f)
>>> print(a, b)
# [1 2] [1 3]
"""
if fix_imports is not np._NoValue:
warnings.warn(
"The 'fix_imports' flag is deprecated and has no effect. "
"(Deprecated in NumPy 2.1)",
DeprecationWarning, stacklevel=2)
if hasattr(file, 'write'):
file_ctx = contextlib.nullcontext(file)
else:
file = os.fspath(file)
if not file.endswith('.npy'):
file = file + '.npy'
file_ctx = open(file, "wb")
with file_ctx as fid:
arr = np.asanyarray(arr)
format.write_array(fid, arr, allow_pickle=allow_pickle,
pickle_kwargs=dict(fix_imports=fix_imports))
def _save_dispatcher(file, arr, allow_pickle=None, fix_imports=None):
return (arr,)
def _savez_dispatcher(file, *args, **kwds):
yield from args
yield from kwds.values()
@array_function_dispatch(_savez_dispatcher)
def savez(file, *args, **kwds):
"""Save several arrays into a single file in uncompressed ``.npz`` format.
Provide arrays as keyword arguments to store them under the
corresponding name in the output file: ``savez(fn, x=x, y=y)``.
If arrays are specified as positional arguments, i.e., ``savez(fn,
x, y)``, their names will be `arr_0`, `arr_1`, etc.
Parameters
----------
file : file, str, or pathlib.Path
Either the filename (string) or an open file (file-like object)
where the data will be saved. If file is a string or a Path, the
``.npz`` extension will be appended to the filename if it is not
already there.
args : Arguments, optional
Arrays to save to the file. Please use keyword arguments (see
`kwds` below) to assign names to arrays. Arrays specified as
args will be named "arr_0", "arr_1", and so on.
kwds : Keyword arguments, optional
Arrays to save to the file. Each array will be saved to the
output file with its corresponding keyword name.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
When opening the saved ``.npz`` file with `load` a `~lib.npyio.NpzFile`
object is returned. This is a dictionary-like object which can be queried
for its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Keys passed in `kwds` are used as filenames inside the ZIP archive.
Therefore, keys should be valid filenames; e.g., avoid keys that begin with
``/`` or contain ``.``.
When naming variables with keyword arguments, it is not possible to name a
variable ``file``, as this would cause the ``file`` argument to be defined
twice in the call to ``savez``.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with \\*args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> _ = outfile.seek(0) # Only needed to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_0', 'arr_1']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with \\**kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> _ = outfile.seek(0)
"""
pass
>>> npzfile = np.load(outfile)
>>> sorted(npzfile.files)
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
# 调用 _savez 函数,将参数 file、args、kwds 和 False 传递给它
_savez(file, args, kwds, False)
# 导入必要的 zipfile 模块,用于处理压缩文件
import zipfile
# 如果传入的文件参数没有 'write' 方法,则将其转换为文件路径字符串
if not hasattr(file, 'write'):
file = os.fspath(file)
# 如果文件路径不以 '.npz' 结尾,则添加 '.npz' 后缀
if not file.endswith('.npz'):
file = file + '.npz'
# 将关键字参数(即传入的数组)存储到 namedict 字典中
namedict = kwds
# 遍历参数列表并为每个参数创建一个键,格式为'arr_i',其中i是参数的索引
for i, val in enumerate(args):
key = 'arr_%d' % i
# 检查当前键是否已存在于命名字典中,如果存在则抛出数值错误异常
if key in namedict.keys():
raise ValueError(
"Cannot use un-named variables and keyword %s" % key)
# 将当前键值对添加到命名字典中
namedict[key] = val
# 根据压缩选项确定压缩类型:如果压缩为True,则使用ZIP_DEFLATED进行压缩,否则使用ZIP_STORED
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
# 使用工厂函数创建一个zip文件对象,指定文件名和打开模式为写入,同时指定压缩类型
zipf = zipfile_factory(file, mode="w", compression=compression)
# 遍历命名字典中的每个键值对
for key, val in namedict.items():
# 为当前值生成文件名,形式为'arr_i.npy'
fname = key + '.npy'
# 将值转换为NumPy数组(如果尚未是数组的话)
val = np.asanyarray(val)
# 使用zip文件对象的open方法打开当前文件名对应的文件,以写入模式,强制使用ZIP64格式
with zipf.open(fname, 'w', force_zip64=True) as fid:
# 使用格式化对象的write_array方法将数组写入到文件中
format.write_array(fid, val,
allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
# 关闭zip文件对象,完成写入操作
zipf.close()
# 确保参数 ndmin 在 _ensure_ndmin_ndarray 函数中被支持
def _ensure_ndmin_ndarray_check_param(ndmin):
"""Just checks if the param ndmin is supported on
_ensure_ndmin_ndarray. It is intended to be used as
verification before running anything expensive.
e.g. loadtxt, genfromtxt
"""
# 检查 ndmin 参数的合法性
if ndmin not in [0, 1, 2]:
raise ValueError(f"Illegal value of ndmin keyword: {ndmin}")
def _ensure_ndmin_ndarray(a, *, ndmin: int):
"""This is a helper function of loadtxt and genfromtxt to ensure
proper minimum dimension as requested
ndim : int. Supported values 1, 2, 3
^^ whenever this changes, keep in sync with
_ensure_ndmin_ndarray_check_param
"""
# 验证数组至少具有 `ndmin` 指定的维度
# 调整数组的大小和形状,去除多余的维度
if a.ndim > ndmin:
a = np.squeeze(a)
# 确保数组至少具有指定的最小维度
# - 为了奇怪的情况,如 ndmin=1 且 a.squeeze().ndim=0
if a.ndim < ndmin:
if ndmin == 1:
a = np.atleast_1d(a)
elif ndmin == 2:
a = np.atleast_2d(a).T
return a
# loadtxt 一次读取的行数,可以为了测试目的进行重写
_loadtxt_chunksize = 50000
def _check_nonneg_int(value, name="argument"):
try:
operator.index(value)
except TypeError:
raise TypeError(f"{name} must be an integer") from None
if value < 0:
raise ValueError(f"{name} must be nonnegative")
def _preprocess_comments(iterable, comments, encoding):
"""
Generator that consumes a line iterated iterable and strips out the
multiple (or multi-character) comments from lines.
This is a pre-processing step to achieve feature parity with loadtxt
(we assume that this feature is a nieche feature).
"""
for line in iterable:
if isinstance(line, bytes):
# 需要在此处处理转换,否则分割将会失败
line = line.decode(encoding)
for c in comments:
line = line.split(c, 1)[0]
yield line
# 遇到参数化数据类型时一次读取的行数
_loadtxt_chunksize = 50000
def _read(fname, *, delimiter=',', comment='#', quote='"',
imaginary_unit='j', usecols=None, skiplines=0,
max_rows=None, converters=None, ndmin=None, unpack=False,
dtype=np.float64, encoding=None):
r"""
从文本文件中读取一个 NumPy 数组。
这是 loadtxt 的辅助函数。
Parameters
----------
fname : file, str, or pathlib.Path
要读取的文件名或文件路径。
delimiter : str, optional
文件中字段的分隔符。
默认为逗号 ','。如果为 None,则任何空白序列都视为分隔符。
comment : str or sequence of str or None, optional
quote : str or None, optional
imaginary_unit : str, optional
usecols : array_like, optional
skiplines : int, optional
max_rows : int, optional
converters : dict or callable, optional
ndmin : int, optional
unpack : bool, optional
dtype : numpy data type
encoding : str, optional
Returns
-------
ndarray
NumPy array.
"""
# 处理特殊的 'bytes' 编码关键字
byte_converters = False
if encoding == 'bytes':
encoding = None
byte_converters = True
# 如果未提供 dtype 参数,则抛出类型错误异常
if dtype is None:
raise TypeError("a dtype must be provided.")
# 将 dtype 转换为 NumPy 的数据类型实例
dtype = np.dtype(dtype)
# 用于对象块间通过 dtype 读取
read_dtype_via_object_chunks = None
# 如果 dtype 的类型是 'SUM' 并且满足以下条件之一:
# dtype 等于 "S0"、"U0"、"M8" 或 'm8',则表示这是一个旧的“灵活”dtype。
# 目前核心代码中不真正支持参数化的 dtype(核心中没有 dtype 探测步骤),
# 但为了向后兼容性,我们必须支持这些类型。
if dtype.kind in 'SUM' and (
dtype == "S0" or dtype == "U0" or dtype == "M8" or dtype == 'm8'):
# 将 dtype 保存到 read_dtype_via_object_chunks 变量中
read_dtype_via_object_chunks = dtype
# 将 dtype 设置为通用的 object 类型
dtype = np.dtype(object)
# 如果 usecols 不为 None,则进行以下处理:
if usecols is not None:
# 尝试将 usecols 转换为整数列表,如果无法转换,则将其视为单个整数
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols]
# 检查和确保 ndmin 的值符合数组的最小维度要求
_ensure_ndmin_ndarray_check_param(ndmin)
# 如果 comment 为 None,则将 comments 设置为 None;否则进行如下处理:
if comment is None:
comments = None
else:
# 假设 comments 是一个字符串序列,如果其中包含空字符串,则抛出异常
if "" in comment:
raise ValueError(
"comments cannot be an empty string. Use comments=None to "
"disable comments."
)
# 将 comments 转换为元组类型
comments = tuple(comment)
# 将 comment 设置为 None,表示不使用单个字符作为 comment
comment = None
# 如果 comments 长度为 0,则将其设置为 None,表示不需要注释
if len(comments) == 0:
comments = None # 没有任何注释
# 如果 comments 长度为 1,则进一步判断:
elif len(comments) == 1:
# 如果只有一个注释,并且该注释只有一个字符,则正常解析即可处理
if isinstance(comments[0], str) and len(comments[0]) == 1:
comment = comments[0]
comments = None
else:
# 如果有多个注释字符,则进行输入验证
if delimiter in comments:
raise TypeError(
f"Comment characters '{comments}' cannot include the "
f"delimiter '{delimiter}'"
)
# 现在 comment 可能是一个 1 或 0 个字符的字符串,或者是一个元组
if comments is not None:
# 注意:早期版本支持两个字符的注释(并且可以扩展到多个字符),我们假设这种情况不常见,因此不做优化处理。
if quote is not None:
raise ValueError(
"when multiple comments or a multi-character comment is "
"given, quotes are not supported. In this case quotechar "
"must be set to None.")
# 检查虚数单位 imaginary_unit 的长度是否为 1
if len(imaginary_unit) != 1:
raise ValueError('len(imaginary_unit) must be 1.')
# 检查 skiplines 是否为非负整数
_check_nonneg_int(skiplines)
# 如果 max_rows 不为 None,则检查其是否为非负整数;否则将 max_rows 设置为 -1,表示读取整个文件。
if max_rows is not None:
_check_nonneg_int(max_rows)
else:
max_rows = -1 # 将 -1 传递给 C 代码表示“读取整个文件”。
# 创建一个空的上下文管理器作为文件句柄的关闭上下文,默认 filelike 为 False
fh_closing_ctx = contextlib.nullcontext()
filelike = False
try:
# 如果 fname 是 os.PathLike 类型,将其转换为字符串路径
if isinstance(fname, os.PathLike):
fname = os.fspath(fname)
# 如果 fname 是字符串类型
if isinstance(fname, str):
# 使用 numpy 的 _datasource 模块打开文件以供读取,并指定以文本模式打开
fh = np.lib._datasource.open(fname, 'rt', encoding=encoding)
# 如果 encoding 参数为 None,则尝试从文件句柄中获取编码,否则使用 'latin1' 编码
if encoding is None:
encoding = getattr(fh, 'encoding', 'latin1')
# 使用 contextlib.closing 包装文件句柄,以便在退出上下文时自动关闭
fh_closing_ctx = contextlib.closing(fh)
# 将数据指向文件句柄
data = fh
# 标记文件类型为类文件对象
filelike = True
else:
# 如果 fname 不是字符串类型
# 如果 encoding 参数为 None,则尝试从 fname 对象中获取编码,否则使用 'latin1' 编码
if encoding is None:
encoding = getattr(fname, 'encoding', 'latin1')
# 将数据指向 fname 对象的迭代器
data = iter(fname)
# 捕获 TypeError 异常
except TypeError as e:
# 抛出 ValueError 异常,说明传入的 fname 参数类型不正确
raise ValueError(
f"fname must be a string, filehandle, list of strings,\n"
f"or generator. Got {type(fname)} instead.") from e
with fh_closing_ctx:
# 使用上下文管理器 fh_closing_ctx 进行文件句柄的安全关闭操作
if comments is not None:
# 如果 comments 参数不为 None,则需要预处理数据中的注释信息
if filelike:
# 如果 filelike 标志为 True,则将 data 转换为迭代器
data = iter(data)
filelike = False
# 对数据进行预处理,处理其中的注释信息和编码
data = _preprocess_comments(data, comments, encoding)
if read_dtype_via_object_chunks is None:
# 如果 read_dtype_via_object_chunks 为 None,则直接从文件对象中加载数据到 arr
arr = _load_from_filelike(
data, delimiter=delimiter, comment=comment, quote=quote,
imaginary_unit=imaginary_unit,
usecols=usecols, skiplines=skiplines, max_rows=max_rows,
converters=converters, dtype=dtype,
encoding=encoding, filelike=filelike,
byte_converters=byte_converters)
else:
# 如果 read_dtype_via_object_chunks 不为 None,则使用对象数组的方式读取文件,并转换为指定的 dtype
# 这种方法确保正确发现字符串长度和日期时间单位(例如 `arr.astype()`)
# 由于分块处理,某些错误报告可能不太清晰,目前如此。
if filelike:
# 如果 filelike 标志为 True,则无法在从文件中读取时使用分块处理
data = iter(data)
c_byte_converters = False
if read_dtype_via_object_chunks == "S":
# 如果 read_dtype_via_object_chunks 是 "S",则使用 latin1 编码而不是 ascii
c_byte_converters = True
chunks = []
while max_rows != 0:
# 循环读取数据直到达到 max_rows 为止
if max_rows < 0:
chunk_size = _loadtxt_chunksize
else:
chunk_size = min(_loadtxt_chunksize, max_rows)
# 从文件对象中加载数据块并转换为指定的 dtype
next_arr = _load_from_filelike(
data, delimiter=delimiter, comment=comment, quote=quote,
imaginary_unit=imaginary_unit,
usecols=usecols, skiplines=skiplines, max_rows=max_rows,
converters=converters, dtype=dtype,
encoding=encoding, filelike=filelike,
byte_converters=byte_converters,
c_byte_converters=c_byte_converters)
# 在此处进行类型转换。我们希望这样做对于大文件更好,因为存储更紧凑。
# 可以适应(原则上连接可以进行类型转换)。
chunks.append(next_arr.astype(read_dtype_via_object_chunks))
skiprows = 0 # 只需对第一个块进行跳过行数操作
if max_rows >= 0:
max_rows -= chunk_size
if len(next_arr) < chunk_size:
# 请求的数据量少于块大小,则表示已经完成读取。
break
# 至少需要一个块,但如果为空,则最后一个块可能有错误的形状。
if len(chunks) > 1 and len(chunks[-1]) == 0:
del chunks[-1]
if len(chunks) == 1:
arr = chunks[0]
else:
# 将所有块连接成一个数组
arr = np.concatenate(chunks, axis=0)
# 注意:对于结构化的 dtype,ndmin 的功能如广告所述,但通常情况下...
# 确保数组至少具有指定的最小维数,同时保持数组的原始结构维度。
# 如果数组本身是一维的,则 ndmin=2 将添加一个额外的维度,即使没有进行挤压操作。
# 在某些情况下,使用 `squeeze=False` 可能是一个更好的解决方案(pandas 使用挤压操作)。
arr = _ensure_ndmin_ndarray(arr, ndmin=ndmin)
# 检查数组的形状是否非空
if arr.shape:
# 如果数组的第一个维度长度为 0,则发出警告,指出输入数据不包含任何内容。
warnings.warn(
f'loadtxt: input contained no data: "{fname}"',
category=UserWarning,
stacklevel=3
)
# 如果设置了 unpack 参数
if unpack:
# 获取数组的数据类型
dt = arr.dtype
# 如果数据类型具有字段名(即结构化数组)
if dt.names is not None:
# 对于结构化数组,返回每个字段的数组
return [arr[field] for field in dt.names]
else:
# 对于非结构化数组,返回转置后的数组
return arr.T
else:
# 如果未设置 unpack 参数,直接返回原始数组
return arr
# 设置装饰器,使该函数的文档类似于数组函数的文档
# 设置函数的模块名称为 'numpy'
@set_array_function_like_doc
@set_module('numpy')
# 定义函数 loadtxt,用于从文本文件加载数据
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False,
ndmin=0, encoding=None, max_rows=None, *, quotechar=None,
like=None):
r"""
从文本文件加载数据。
Parameters
----------
fname : file, str, pathlib.Path, list of str, generator
要读取的文件、文件名、列表或生成器。如果文件名的扩展名为 ``.gz`` 或 ``.bz2``,则首先解压文件。
注意,生成器必须返回字节或字符串。列表中的字符串或生成器生成的字符串将被视为行。
dtype : data-type, optional
结果数组的数据类型;默认为 float。如果这是结构化数据类型,则结果数组将是一维的,并且每行将被解释为数组的一个元素。
在这种情况下,使用的列数必须与数据类型中的字段数相匹配。
comments : str or sequence of str or None, optional
用于指示注释开始的字符或字符列表。None 表示没有注释。为了向后兼容,字节字符串将解码为 'latin1'。默认为 '#'.
delimiter : str, optional
用于分隔值的字符。为了向后兼容,字节字符串将解码为 'latin1'。默认为空白字符。
.. versionchanged:: 1.23.0
仅支持单个字符分隔符。不能使用换行符作为分隔符。
converters : dict or callable, optional
自定义值解析的转换器函数。如果 `converters` 是可调用的,则该函数将应用于所有列;否则,它必须是将列号映射到解析器函数的字典。
更多详细信息请参见示例。
默认值为 None。
.. versionchanged:: 1.23.0
添加了将单个可调用函数传递给所有列的功能。
skiprows : int, optional
跳过前 `skiprows` 行,包括注释;默认为 0。
usecols : int or sequence, optional
要读取的列,从 0 开始计数。例如,``usecols = (1,4,5)`` 将提取第 2、第 5 和第 6 列。
默认为 None,表示读取所有列。
.. versionchanged:: 1.11.0
当需要读取单个列时,可以使用整数而不是元组。例如,``usecols = 3`` 读取第四列,与 ``usecols = (3,)`` 的效果相同。
unpack : bool, optional
如果为 True,则返回的数组进行转置,以便可以使用 ``x, y, z = loadtxt(...)`` 进行拆包。
当与结构化数据类型一起使用时,为每个字段返回数组。
默认为 False。
ndmin : int, optional
Legal values: 0 (default), 1 or 2.
.. versionadded:: 1.6.0
encoding : str, optional
特殊值 'bytes' 启用向后兼容的工作,确保尽可能以字节数组形式接收结果,并将 'latin1' 编码的字符串传递给转换器。
覆盖此值以接收 Unicode 数组,并将字符串作为输入传递给转换器。如果设置为 None,则使用系统默认值。默认值为 'bytes'。
.. versionadded:: 1.14.0
.. versionchanged:: 2.0
在 NumPy 2 之前,默认为 ``'bytes'``,用于 Python 2 兼容性。现在默认为 ``None``。
max_rows : int, optional
注意,不计入 `max_rows` 的空行(如空行和注释行),而这些行在 `skiprows` 中计数。
.. versionadded:: 1.16.0
.. versionchanged:: 1.23.0
不计入不包含数据的行,包括注释行(例如以 '#' 开头或通过 `comments` 指定的行),以计算 `max_rows`。
quotechar : unicode character or None, optional
在引用项内部忽略定界符或注释字符的出现。默认值为 ``quotechar=None``,表示禁用引用支持。
如果在引用字段内找到两个连续的 `quotechar` 实例,则第一个将被视为转义字符。请参见示例。
.. versionadded:: 1.23.0
${ARRAY_FUNCTION_LIKE}
.. versionadded:: 1.20.0
Returns
-------
out : ndarray
See Also
--------
load, fromstring, fromregex
genfromtxt : 以指定方式处理缺失值的加载数据。
scipy.io.loadmat : 读取 MATLAB 数据文件
Notes
-----
`genfromtxt` 函数提供更复杂的处理,例如具有缺失值的行。
输入文本文件中的每行必须具有相同数量的值,才能读取所有值。
如果所有行的值数量不同,则可以通过 `usecols` 指定列来读取最多 n 列(其中 n 是所有行中最少的值的数量)。
.. versionadded:: 1.10.0
Python 的 float.hex 方法生成的字符串可用作浮点数的输入。
Examples
--------
>>> from io import StringIO
>>> c = StringIO("0 1\n2 3")
>>> np.loadtxt(c)
array([[0., 1.],
[2., 3.]])
>>> d = StringIO("M 21 72\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([(b'M', 21, 72.), (b'F', 35, 58.)],
dtype=[('gender', 'S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([1., 3.])
>>> y
array([2., 4.])
The `converters` argument is used to specify functions to preprocess the
text prior to parsing. `converters` can be a dictionary that maps
preprocessing functions to each column:
>>> s = StringIO("1.618, 2.296\n3.141, 4.669\n")
>>> conv = {
... 0: lambda x: np.floor(float(x)),
... 1: lambda x: np.ceil(float(x)),
... }
>>> np.loadtxt(s, delimiter=",", converters=conv)
array([[1., 3.],
[3., 5.]])
`converters` can be a callable instead of a dictionary, in which case it
is applied to all columns:
>>> s = StringIO("0xDE 0xAD\n0xC0 0xDE")
>>> import functools
>>> conv = functools.partial(int, base=16)
>>> np.loadtxt(s, converters=conv)
array([[222., 173.],
[192., 222.]])
This example shows how `converters` can be used to convert a field
with a trailing minus sign into a negative number.
>>> s = StringIO("10.01 31.25-\n19.22 64.31\n17.57- 63.94")
>>> def conv(fld):
... return -float(fld[:-1]) if fld.endswith("-") else float(fld)
...
>>> np.loadtxt(s, converters=conv)
array([[ 10.01, -31.25],
[ 19.22, 64.31],
[-17.57, 63.94]])
Using a callable as the converter can be particularly useful for handling
values with different formatting, e.g. floats with underscores:
>>> s = StringIO("1 2.7 100_000")
>>> np.loadtxt(s, converters=float)
array([1.e+00, 2.7e+00, 1.e+05])
This idea can be extended to automatically handle values specified in
many different formats, such as hex values:
>>> def conv(val):
... try:
... return float(val)
... except ValueError:
... return float.fromhex(val)
>>> s = StringIO("1, 2.5, 3_000, 0b4, 0x1.4000000000000p+2")
>>> np.loadtxt(s, delimiter=",", converters=conv)
array([1.0e+00, 2.5e+00, 3.0e+03, 1.8e+02, 5.0e+00])
Or a format where the ``-`` sign comes after the number:
>>> s = StringIO("10.01 31.25-\n19.22 64.31\n17.57- 63.94")
>>> conv = lambda x: -float(x[:-1]) if x.endswith("-") else float(x)
>>> np.loadtxt(s, converters=conv)
array([[ 10.01, -31.25],
[ 19.22, 64.31],
[-17.57, 63.94]])
Support for quoted fields is enabled with the `quotechar` parameter.
if like is not None:
return _loadtxt_with_like(
like, fname, dtype=dtype, comments=comments, delimiter=delimiter,
converters=converters, skiprows=skiprows, usecols=usecols,
unpack=unpack, ndmin=ndmin, encoding=encoding,
max_rows=max_rows
)
if isinstance(delimiter, bytes):
delimiter.decode("latin1")
if dtype is None:
dtype = np.float64
comment = comments
if comment is not None:
if isinstance(comment, (str, bytes)):
comment = [comment]
comment = [
x.decode('latin1') if isinstance(x, bytes) else x for x in comment]
if isinstance(delimiter, bytes):
delimiter = delimiter.decode('latin1')
arr = _read(fname, dtype=dtype, comment=comment, delimiter=delimiter,
converters=converters, skiplines=skiprows, usecols=usecols,
unpack=unpack, ndmin=ndmin, encoding=encoding,
max_rows=max_rows, quote=quotechar)
return arr
_loadtxt_with_like = array_function_dispatch()(loadtxt)
def _savetxt_dispatcher(fname, X, fmt=None, delimiter=None, newline=None,
header=None, footer=None, comments=None,
encoding=None):
return (X,)
@array_function_dispatch(_savetxt_dispatcher)
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
footer='', comments='# ', encoding=None):
"""
Save an array to a text file.
Parameters
----------
fname : filename, file handle or pathlib.Path
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : 1D or 2D array_like
Data to be saved to a text file.
fmt : str or sequence of strs, optional
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored. For complex `X`, the legal options
for `fmt` are:
* a single specifier, ``fmt='%.4e'``, resulting in numbers formatted
like ``' (%s+%sj)' % (fmt, fmt)``
* a full string specifying every real and imaginary part, e.g.
``' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'`` for 3 columns
* a list of specifiers, one per column - in this case, the real
and imaginary part must have separate specifiers,
e.g. ``['%.3e + %.3ej', '(%.15e%+.15ej)']`` for 2 columns
delimiter : str, optional
String or character separating columns.
newline : str, optional
String or character separating lines.
.. versionadded:: 1.5.0
header : str, optional
String that will be written at the beginning of the file.
.. versionadded:: 1.7.0
footer : str, optional
String that will be written at the end of the file.
.. versionadded:: 1.7.0
comments : str, optional
String that will be prepended to the ``header`` and ``footer`` strings,
to mark them as comments. Default: '# ', as expected by e.g.
``numpy.loadtxt``.
.. versionadded:: 1.7.0
encoding : {None, str}, optional
Encoding used to encode the outputfile. Does not apply to output
streams. If the encoding is something other than 'bytes' or 'latin1'
you will not be able to load the file in NumPy versions < 1.14. Default
is 'latin1'.
.. versionadded:: 1.14.0
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into an uncompressed ``.npz`` archive
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
"""
own_fh = False
如果 fname 是 os.PathLike 的实例,转换为路径字符串
if isinstance(fname, os.PathLike):
fname = os.fspath(fname)
如果 fname 类型类似字符串:
open(fname, 'wt').close()
使用 np.lib._datasource 打开 fname 文件进行写入操作,指定编码为 encoding
fh = np.lib._datasource.open(fname, 'wt', encoding=encoding)
own_fh = True
否则,如果 fname 具有 'write' 属性:
fh = WriteWrap(fname, encoding or 'latin1')
else:
raise ValueError('fname must be a string or file handle')
try:
X = np.asarray(X)
if X.ndim == 0 or X.ndim > 2:
raise ValueError(
"Expected 1D or 2D array, got %dD array instead" % X.ndim)
elif X.ndim == 1:
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
else:
ncol = len(X.dtype.names)
else:
ncol = X.shape[1]
iscomplex_X = np.iscomplexobj(X)
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = delimiter.join(fmt)
elif isinstance(fmt, str):
n_fmt_chars = fmt.count('%')
error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
if n_fmt_chars == 1:
if iscomplex_X:
fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol
else:
fmt = [fmt, ] * ncol
format = delimiter.join(fmt)
elif iscomplex_X and n_fmt_chars != (2 * ncol):
raise error
elif ((not iscomplex_X) and n_fmt_chars != ncol):
raise error
else:
format = fmt
else:
raise ValueError('invalid fmt: %r' % (fmt,))
if len(header) > 0:
header = header.replace('\n', '\n' + comments)
fh.write(comments + header + newline)
if iscomplex_X:
for row in X:
row2 = []
for number in row:
row2.append(number.real)
row2.append(number.imag)
s = format % tuple(row2) + newline
fh.write(s.replace('+-', '-'))
else:
for row in X:
try:
v = format % tuple(row) + newline
except TypeError as e:
raise TypeError("Mismatch between array dtype ('%s') and "
"format specifier ('%s')"
% (str(X.dtype), format)) from e
fh.write(v)
if len(footer) > 0:
footer = footer.replace('\n', '\n' + comments)
fh.write(comments + footer + newline)
finally:
if own_fh:
fh.close()
@set_module('numpy')
def fromregex(file, regexp, dtype, encoding=None):
r"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : file, str, or pathlib.Path
Filename or file object to read.
.. versionchanged:: 1.22.0
Now accepts `os.PathLike` implementations.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array; must be a structured datatype.
encoding : str, optional
Encoding used to decode the inputfile. Does not apply to input streams.
.. versionadded:: 1.14.0
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`basics.rec`.
Examples
--------
>>> from io import StringIO
>>> text = StringIO("1312 foo\n1534 bar\n444 qux")
>>> regexp = r"(\d+)\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex(text, regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312, b'foo'), (1534, b'bar'), ( 444, b'qux')],
dtype=[('num', '<i8'), ('key', 'S3')])
>>> output['num']
array([1312, 1534, 444])
"""
own_fh = False
if not hasattr(file, "read"):
file = os.fspath(file)
file = np.lib._datasource.open(file, 'rt', encoding=encoding)
own_fh = True
try:
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
if dtype.names is None:
raise TypeError('dtype must be a structured datatype.')
content = file.read()
if isinstance(content, bytes) and isinstance(regexp, str):
regexp = asbytes(regexp)
if not hasattr(regexp, 'match'):
regexp = re.compile(regexp)
seq = regexp.findall(content)
if seq and not isinstance(seq[0], tuple):
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
finally:
if own_fh:
file.close()
@set_array_function_like_doc
@set_module('numpy')
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skip_header=0, skip_footer=0, converters=None,
missing_values=None, filling_values=None, usecols=None,
names=None, excludelist=None,
deletechars=''.join(sorted(NameValidator.defaultdeletechars)),
replace_space='_', autostrip=False, case_sensitive=True,
defaultfmt="f%i", unpack=None, usemask=False, loose=True,
invalid_raise=True, max_rows=None, encoding=None,
*, ndmin=0, like=None):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skip_header` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file, str, pathlib.Path, list of str, generator
File, filename, list, or generator to read. If the filename
extension is ``.gz`` or ``.bz2``, the file is first decompressed. Note
that generators must return bytes or strings. The strings
in a list or produced by a generator are treated as lines.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded.
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skiprows : int, optional
`skiprows` was removed in numpy 1.10. Please use `skip_header` instead.
skip_header : int, optional
The number of lines to skip at the beginning of the file.
skip_footer : int, optional
The number of lines to skip at the end of the file.
converters : variable, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
... (以下代码省略)
usecols : sequence, optional
# 定义一个可选参数,用于指定要读取的列的序列,其中第一个列为0。
# 例如,使用 usecols = (1, 4, 5) 将提取第2、第5和第6列。
names : {None, True, str, sequence}, optional
# 如果 names 为 True,则从 skip_header 后的第一行读取字段名称。
# 此行可以选择以注释分隔符开头。注释分隔符之前的任何内容都将被丢弃。
# 如果 names 是一个序列或逗号分隔的单个字符串,则这些名称将用于定义结构化 dtype 的字段名称。
# 如果 names 是 None,则使用 dtype 字段的名称(如果有)。
excludelist : sequence, optional
# 要排除的名称列表。此列表将附加到默认列表 ['return','file','print']。
# 排除的名称将附加下划线,例如,'file' 将变为 'file_'。
deletechars : str, optional
# 要从名称中删除的无效字符的字符串。
defaultfmt : str, optional
# 用于定义默认字段名称的格式,例如 "f%i" 或 "f_%02i"。
autostrip : bool, optional
# 是否自动删除变量名称中的空格。
replace_space : char, optional
# 用于替换变量名称中空格的字符。默认为 '_'。
case_sensitive : {True, False, 'upper', 'lower'}, optional
# 如果为 True,则字段名称区分大小写。
# 如果为 False 或 'upper',字段名称转换为大写。
# 如果为 'lower',字段名称转换为小写。
unpack : bool, optional
# 如果为 True,则返回的数组是转置的,因此可以使用 ``x, y, z = genfromtxt(...)`` 来解包。
# 当与结构化数据类型一起使用时,为每个字段返回数组。
# 默认为 False。
usemask : bool, optional
# 如果为 True,则返回一个掩码数组。
# 如果为 False,则返回一个常规数组。
loose : bool, optional
# 如果为 True,则不会为无效值引发错误。
invalid_raise : bool, optional
# 如果为 True,则在检测到列数不一致时引发异常。
# 如果为 False,则发出警告并跳过有问题的行。
max_rows : int, optional
# 要读取的最大行数。不能与 skip_footer 同时使用。
# 如果给定,值必须至少为1。默认为读取整个文件。
.. versionadded:: 1.10.0
# `encoding`参数:用于解码输入文件的编码方式。当`fname`是文件对象时不适用。
# 特殊值'bytes'保证在可能的情况下接收字节数组,并将latin1编码的字符串传递给转换器,以确保向后兼容性。
# 将此值覆盖为接收Unicode数组并将字符串传递给转换器。如果设为None,则使用系统默认值。
# 默认值为'bytes'。
# `ndmin`参数:与`loadtxt`函数相同的参数。
# 返回值:返回从文本文件中读取的数据。如果`usemask`为True,则返回一个掩码数组。
# 参见:numpy.loadtxt函数,在没有数据缺失时的等效函数。
# 注意:
# * 当空格用作分隔符或没有给定分隔符输入时,两个字段之间不应有缺失数据。
# * 当变量被命名(通过灵活的dtype或`names`序列)时,文件中不应有任何标题(否则会引发ValueError异常)。
# * 默认情况下,单个值不会被去除空格。使用自定义转换器时,请确保该函数删除空格。
# * 由于dtype的发现,自定义转换器可能会收到意外的值。
# 参考资料:
# [1] NumPy用户指南,`I/O with NumPy`章节。
# 设置分隔符,指定为列表 [1, 3, 5]
... delimiter=[1,3,5])
# 示例数据
>>> data
# 创建一个数组,包含整数、浮点数和字符串,每列的数据类型分别为 '<i8'、'<f8' 和 '<U5'
array((1, 1.3, 'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '<U5')])
# 一个演示用的例子,展示如何添加注释
>>> f = StringIO('''
... text,# of chars
... hello world,11
... numpy,5''')
# 使用 StringIO 创建一个包含文本数据的对象 f,每行使用逗号分隔
>>> np.genfromtxt(f, dtype='S12,S12', delimiter=',')
# 从文本文件中加载数据到 NumPy 数组,指定每列的数据类型为 'S12',使用逗号作为分隔符
array([(b'text', b''), (b'hello world', b'11'), (b'numpy', b'5')],
dtype=[('f0', 'S12'), ('f1', 'S12')])
"""
if like is not None:
return _genfromtxt_with_like(
like, fname, dtype=dtype, comments=comments, delimiter=delimiter,
skip_header=skip_header, skip_footer=skip_footer,
converters=converters, missing_values=missing_values,
filling_values=filling_values, usecols=usecols, names=names,
excludelist=excludelist, deletechars=deletechars,
replace_space=replace_space, autostrip=autostrip,
case_sensitive=case_sensitive, defaultfmt=defaultfmt,
unpack=unpack, usemask=usemask, loose=loose,
invalid_raise=invalid_raise, max_rows=max_rows, encoding=encoding,
ndmin=ndmin,
)
_ensure_ndmin_ndarray_check_param(ndmin)
if max_rows is not None:
if skip_footer:
raise ValueError(
"The keywords 'skip_footer' and 'max_rows' can not be "
"specified at the same time.")
if max_rows < 1:
raise ValueError("'max_rows' must be at least 1.")
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
user_converters = converters or {}
if not isinstance(user_converters, dict):
raise TypeError(
"The input argument 'converter' should be a valid dictionary "
"(got '%s' instead)" % type(user_converters))
if encoding == 'bytes':
encoding = None
byte_converters = True
else:
byte_converters = False
if isinstance(fname, os.PathLike):
fname = os.fspath(fname)
if isinstance(fname, str):
fid = np.lib._datasource.open(fname, 'rt', encoding=encoding)
fid_ctx = contextlib.closing(fid)
else:
fid = fname
fid_ctx = contextlib.nullcontext(fid)
try:
fhd = iter(fid)
except TypeError as e:
raise TypeError(
"fname must be a string, a filehandle, a sequence of strings,\n"
f"or an iterator of strings. Got {type(fname)} instead."
) from e
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = [itemgetter(i)(_m) for _m in rows]
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = map(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
nbinvalid = len(invalid)
if nbinvalid > 0:
nbrows = len(rows) + nbinvalid - skip_footer
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbinvalid_skipped = len([_ for _ in invalid
if _[0] > nbrows + skip_header])
invalid = invalid[:nbinvalid - nbinvalid_skipped]
skip_footer -= nbinvalid_skipped
nbrows -= skip_footer
errmsg = [template % (i, nb)
for (i, nb) in invalid]
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
if loose:
rows = list(
zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
else:
rows = list(
zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
data = rows
if dtype is None:
column_types = [conv.type for conv in converters]
strcolidx = [i for (i, v) in enumerate(column_types)
if v == np.str_]
if byte_converters and strcolidx:
warnings.warn(
"Reading unicode strings without specifying the encoding "
"argument is deprecated. Set the encoding, use None for the "
"system default.",
np.exceptions.VisibleDeprecationWarning, stacklevel=2)
def encode_unicode_cols(row_tup):
row = list(row_tup)
for i in strcolidx:
row[i] = row[i].encode('latin1')
return tuple(row)
try:
data = [encode_unicode_cols(r) for r in data]
except UnicodeEncodeError:
pass
else:
for i in strcolidx:
column_types[i] = np.bytes_
sized_column_types = column_types[:]
for i, col_type in enumerate(column_types):
if np.issubdtype(col_type, np.character):
n_chars = max(len(row[i]) for row in data)
sized_column_types[i] = (col_type, n_chars)
if names is None:
base = {
c_type
for c, c_type in zip(converters, column_types)
if c._checked}
if len(base) == 1:
uniform_type, = base
(ddtype, mdtype) = (uniform_type, bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(sized_column_types)]
if usemask:
mdtype = [(defaultfmt % i, bool)
for (i, dt) in enumerate(sized_column_types)]
else:
ddtype = list(zip(names, sized_column_types))
mdtype = list(zip(names, [bool] * len(sized_column_types)))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
if names and dtype.names is not None:
dtype.names = names
if len(dtype_flat) > 1:
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
raise NotImplementedError(
"Nested fields involving objects are not supported...")
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
if usemask:
rowmasks = np.array(
masks, dtype=np.dtype([('', bool) for t in dtype_flat]))
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
else:
if user_converters:
ishomogeneous = True
descr = []
for i, ttype in enumerate([conv.type for conv in converters]):
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if np.issubdtype(ttype, np.character):
ttype = (ttype, max(len(row[i]) for row in data))
descr.append(('', ttype))
else:
descr.append(('', dtype))
if not ishomogeneous:
if len(descr) > 1:
dtype = np.dtype(descr)
else:
dtype = np.dtype(ttype)
output = np.array(data, dtype)
if usemask:
if dtype.names is not None:
mdtype = [(_, bool) for _ in dtype.names]
else:
mdtype = bool
outputmask = np.array(masks, dtype=mdtype)
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names, converters):
missing_values = [conv(_) for _ in conv.missing_values if _ != '']
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
output = _ensure_ndmin_ndarray(output, ndmin=ndmin)
if unpack:
if names is None:
return output.T
elif len(names) == 1:
return output[names[0]]
else:
return [output[field] for field in names]
return output
_genfromtxt_with_like = array_function_dispatch()(genfromtxt)
def recfromtxt(fname, **kwargs):
"""
从文件中加载 ASCII 数据,并以记录数组的形式返回。
如果 `usemask=False`,则返回标准的 `recarray`,
如果 `usemask=True`,则返回一个 MaskedRecords 数组。
.. deprecated:: 2.0
使用 `numpy.genfromtxt` 替代。
Parameters
----------
fname, kwargs : 输入参数的描述,请参见 `genfromtxt`。
See Also
--------
numpy.genfromtxt : 通用函数
Notes
-----
默认情况下,`dtype` 是 None,这意味着输出数组的数据类型将从数据中确定。
"""
warnings.warn(
"`recfromtxt` 已弃用,"
"请使用 `numpy.genfromtxt` 替代。"
"(在 NumPy 2.0 中弃用)",
DeprecationWarning,
stacklevel=2
)
kwargs.setdefault("dtype", None)
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
从逗号分隔的文件中加载 ASCII 数据。
返回的数组是一个记录数组(如果 `usemask=False`,参见 `recarray`)或一个掩码记录数组(如果 `usemask=True`,
参见 `ma.mrecords.MaskedRecords`)。
.. deprecated:: 2.0
使用 `numpy.genfromtxt` 并将逗号作为 `delimiter` 替代。
Parameters
----------
fname, kwargs : 输入参数的描述,请参见 `genfromtxt`。
See Also
--------
numpy.genfromtxt : 用于加载 ASCII 数据的通用函数。
Notes
-----
默认情况下,`dtype` 是 None,这意味着输出数组的数据类型将从数据中确定。
"""
warnings.warn(
"`recfromcsv` 已弃用,"
"请使用 `numpy.genfromtxt` 并将逗号作为 `delimiter` 替代。"
"(在 NumPy 2.0 中弃用)",
DeprecationWarning,
stacklevel=2
)
kwargs.setdefault("case_sensitive", "lower")
kwargs.setdefault("names", True)
kwargs.setdefault("delimiter", ",")
kwargs.setdefault("dtype", None)
output = genfromtxt(fname, **kwargs)
usemask = kwargs.get("usemask", False)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
.\numpy\numpy\lib\_npyio_impl.pyi
import os
import sys
import zipfile
import types
from re import Pattern
from collections.abc import Collection, Mapping, Iterator, Sequence, Callable, Iterable
from typing import (
Literal as L,
Any,
TypeVar,
Generic,
IO,
overload,
Protocol,
)
from numpy import (
ndarray,
recarray,
dtype,
generic,
float64,
void,
record,
)
from numpy.ma.mrecords import MaskedRecords
from numpy._typing import (
ArrayLike,
DTypeLike,
NDArray,
_DTypeLike,
_SupportsArrayFunc,
)
from numpy._core.multiarray import (
packbits as packbits,
unpackbits as unpackbits,
)
_T = TypeVar("_T")
_T_contra = TypeVar("_T_contra", contravariant=True)
_T_co = TypeVar("_T_co", covariant=True)
_SCT = TypeVar("_SCT", bound=generic)
_CharType_co = TypeVar("_CharType_co", str, bytes, covariant=True)
_CharType_contra = TypeVar("_CharType_contra", str, bytes, contravariant=True)
class _SupportsGetItem(Protocol[_T_contra, _T_co]):
def __getitem__(self, key: _T_contra, /) -> _T_co: ...
class _SupportsRead(Protocol[_CharType_co]):
def read(self) -> _CharType_co: ...
class _SupportsReadSeek(Protocol[_CharType_co]):
def read(self, n: int, /) -> _CharType_co: ...
def seek(self, offset: int, whence: int, /) -> object: ...
class _SupportsWrite(Protocol[_CharType_contra]):
def write(self, s: _CharType_contra, /) -> object: ...
__all__: list[str]
class BagObj(Generic[_T_co]):
def __init__(self, obj: _SupportsGetItem[str, _T_co]) -> None: ...
def __getattribute__(self, key: str) -> _T_co: ...
def __dir__(self) -> list[str]: ...
class NpzFile(Mapping[str, NDArray[Any]]):
zip: zipfile.ZipFile
fid: None | IO[str]
files: list[str]
allow_pickle: bool
pickle_kwargs: None | Mapping[str, Any]
_MAX_REPR_ARRAY_COUNT: int
@property
def f(self: _T) -> BagObj[_T]: ...
@f.setter
def f(self: _T, value: BagObj[_T]) -> None: ...
def __init__(
self,
fid: IO[str],
own_fid: bool = ...,
allow_pickle: bool = ...,
pickle_kwargs: None | Mapping[str, Any] = ...,
) -> None: ...
def __enter__(self: _T) -> _T: ...
def __exit__(
self,
exc_type: None | type[BaseException],
exc_value: None | BaseException,
traceback: None | types.TracebackType,
/,
) -> None: ...
def close(self) -> None: ...
def __del__(self) -> None: ...
def __iter__(self) -> Iterator[str]: ...
def __len__(self) -> int: ...
def __getitem__(self, key: str) -> NDArray[Any]: ...
def __contains__(self, key: str) -> bool: ...
def __repr__(self) -> str: ...
class DataSource:
def __init__(
self,
destpath: None | str | os.PathLike[str] = ...,
) -> None: ...
def __del__(self) -> None: ...
def abspath(self, path: str) -> str: ...
def exists(self, path: str) -> bool: ...
def open(
self,
path: str,
mode: str = ...,
encoding: None | str = ...,
newline: None | str = ...,
) -> IO[Any]: ...
def load(
file: str | bytes | os.PathLike[Any] | _SupportsReadSeek[bytes],
mmap_mode: L[None, "r+", "r", "w+", "c"] = ...,
allow_pickle: bool = ...,
fix_imports: bool = ...,
encoding: L["ASCII", "latin1", "bytes"] = ...,
) -> Any: ...
def save(
file: str | os.PathLike[str] | _SupportsWrite[bytes],
arr: ArrayLike,
allow_pickle: bool = ...,
fix_imports: bool = ...,
) -> None: ...
def savez(
file: str | os.PathLike[str] | _SupportsWrite[bytes],
*args: ArrayLike,
**kwds: ArrayLike,
) -> None: ...
def savez_compressed(
file: str | os.PathLike[str] | _SupportsWrite[bytes],
*args: ArrayLike,
**kwds: ArrayLike,
) -> None: ...
@overload
def loadtxt(
fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
dtype: None = ...,
comments: None | str | Sequence[str] = ...,
delimiter: None | str = ...,
converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
skiprows: int = ...,
usecols: int | Sequence[int] = ...,
unpack: bool = ...,
ndmin: L[0, 1, 2] = ...,
encoding: None | str = ...,
max_rows: None | int = ...,
*,
quotechar: None | str = ...,
like: None | _SupportsArrayFunc = ...
) -> NDArray[float64]: ...
@overload
def loadtxt(
fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
dtype: _DTypeLike[_SCT],
comments: None | str | Sequence[str] = ...,
delimiter: None | str = ...,
converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
skiprows: int = ...,
usecols: int | Sequence[int] = ...,
unpack: bool = ...,
ndmin: L[0, 1, 2] = ...,
encoding: None | str = ...,
max_rows: None | int = ...,
*,
quotechar: None | str = ...,
like: None | _SupportsArrayFunc = ...
) -> NDArray[_SCT]: ...
@overload
def loadtxt(
fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
dtype: DTypeLike,
comments: None | str | Sequence[str] = ...,
delimiter: None | str = ...,
converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
skiprows: int = ...,
usecols: int | Sequence[int] = ...,
unpack: bool = ...,
ndmin: L[0, 1, 2] = ...,
encoding: None | str = ...,
max_rows: None | int = ...,
*,
quotechar: None | str = ...,
like: None | _SupportsArrayFunc = ...
) -> NDArray[Any]: ...
def savetxt(
fname: str | os.PathLike[str] | _SupportsWrite[str] | _SupportsWrite[bytes],
X: ArrayLike,
fmt: str | Sequence[str] = ...,
delimiter: str = ...,
newline: str = ...,
header: str = ...,
footer: str = ...,
comments: str = ...,
encoding: None | str = ...,
) -> None: ...
@overload
def fromregex(
file: str | os.PathLike[str] | _SupportsRead[str] | _SupportsRead[bytes],
regexp: str | bytes | Pattern[Any],
dtype: _DTypeLike[_SCT],
encoding: None | str = ...
@overload
def fromregex(
file: str | os.PathLike[str] | _SupportsRead[str] | _SupportsRead[bytes],
regexp: str | bytes | Pattern[Any],
dtype: DTypeLike,
encoding: None | str = ...
) -> NDArray[Any]: ...
@overload
def genfromtxt(
fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
dtype: None = ...,
comments: str = ...,
delimiter: None | str | int | Iterable[int] = ...,
skip_header: int = ...,
skip_footer: int = ...,
converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
missing_values: Any = ...,
filling_values: Any = ...,
usecols: None | Sequence[int] = ...,
names: L[None, True] | str | Collection[str] = ...,
excludelist: None | Sequence[str] = ...,
deletechars: str = ...,
replace_space: str = ...,
autostrip: bool = ...,
case_sensitive: bool | L['upper', 'lower'] = ...,
defaultfmt: str = ...,
unpack: None | bool = ...,
usemask: bool = ...,
loose: bool = ...,
invalid_raise: bool = ...,
max_rows: None | int = ...,
encoding: str = ...,
*,
ndmin: L[0, 1, 2] = ...,
like: None | _SupportsArrayFunc = ...
) -> NDArray[Any]: ...
@overload
def genfromtxt(
fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
dtype: _DTypeLike[_SCT],
comments: str = ...,
delimiter: None | str | int | Iterable[int] = ...,
skip_header: int = ...,
skip_footer: int = ...,
converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
missing_values: Any = ...,
filling_values: Any = ...,
usecols: None | Sequence[int] = ...,
names: L[None, True] | str | Collection[str] = ...,
excludelist: None | Sequence[str] = ...,
deletechars: str = ...,
replace_space: str = ...,
autostrip: bool = ...,
case_sensitive: bool | L['upper', 'lower'] = ...,
defaultfmt: str = ...,
unpack: None | bool = ...,
usemask: bool = ...,
loose: bool = ...,
invalid_raise: bool = ...,
max_rows: None | int = ...,
encoding: str = ...,
*,
ndmin: L[0, 1, 2] = ...,
like: None | _SupportsArrayFunc = ...
) -> NDArray[_SCT]: ...
@overload
def genfromtxt(
fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
dtype: DTypeLike,
comments: str = ...,
delimiter: None | str | int | Iterable[int] = ...,
skip_header: int = ...,
skip_footer: int = ...,
converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
missing_values: Any = ...,
filling_values: Any = ...,
usecols: None | Sequence[int] = ...,
names: L[None, True] | str | Collection[str] = ...,
excludelist: None | Sequence[str] = ...,
deletechars: str = ...,
replace_space: str = ...,
autostrip: bool = ...,
case_sensitive: bool | L['upper', 'lower'] = ...,
defaultfmt: str = ...,
unpack: None | bool = ...,
usemask: bool = ...,
loose: bool = ...,
invalid_raise: bool = ...,
max_rows: None | int = ...,
encoding: str = ...,
*,
ndmin: L[0, 1, 2] = ...,
like: None | _SupportsArrayFunc = ...
) -> NDArray[_SCT]: ...
max_rows: None | int = ...,
encoding: str = ...,
*,
ndmin: L[0, 1, 2] = ...,
like: None | _SupportsArrayFunc = ...,
def recfromtxt(
fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
*,
usemask: L[False] = ...,
**kwargs: Any,
) -> NDArray[Any]: ...
@overload
def recfromtxt(
fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
*,
usemask: L[False] = ...,
**kwargs: Any,
) -> recarray[Any, dtype[record]]: ...
@overload
def recfromtxt(
fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
*,
usemask: L[True],
**kwargs: Any,
) -> MaskedRecords[Any, dtype[void]]: ...
def recfromcsv(
fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
*,
usemask: L[False] = ...,
**kwargs: Any,
) -> NDArray[Any]: ...
@overload
def recfromcsv(
fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
*,
usemask: L[False] = ...,
**kwargs: Any,
) -> recarray[Any, dtype[record]]: ...
@overload
def recfromcsv(
fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
*,
usemask: L[True],
**kwargs: Any,
) -> MaskedRecords[Any, dtype[void]]: ...
.\numpy\numpy\lib\_polynomial_impl.py
"""
Functions to operate on polynomials.
"""
__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
'polyfit']
import functools
import re
import warnings
from .._utils import set_module
import numpy._core.numeric as NX
from numpy._core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array,
ones)
from numpy._core import overrides
from numpy.exceptions import RankWarning
from numpy.lib._twodim_base_impl import diag, vander
from numpy.lib._function_base_impl import trim_zeros
from numpy.lib._type_check_impl import iscomplex, real, imag, mintypecode
from numpy.linalg import eigvals, lstsq, inv
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
def _poly_dispatcher(seq_of_zeros):
return seq_of_zeros
@array_function_dispatch(_poly_dispatcher)
def poly(seq_of_zeros):
"""
Find the coefficients of a polynomial with the given sequence of roots.
.. note::
This forms part of the old polynomial API. Since version 1.4, the
new polynomial API defined in `numpy.polynomial` is preferred.
A summary of the differences can be found in the
:doc:`transition guide </reference/routines.polynomials>`.
Returns the coefficients of the polynomial whose leading coefficient
is one for the given sequence of zeros (multiple roots must be included
in the sequence as many times as their multiplicity; see Examples).
A square matrix (or array, which will be treated as a matrix) can also
be given, in which case the coefficients of the characteristic polynomial
of the matrix are returned.
Parameters
----------
seq_of_zeros : array_like, shape (N,) or (N, N)
A sequence of polynomial roots, or a square array or matrix object.
Returns
-------
c : ndarray
1D array of polynomial coefficients from highest to lowest degree:
``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
where c[0] always equals 1.
Raises
------
ValueError
If input is the wrong shape (the input must be a 1-D or square
2-D array).
See Also
--------
polyval : Compute polynomial values.
roots : Return the roots of a polynomial.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
Specifying the roots of a polynomial still leaves one degree of
freedom, typically represented by an undetermined leading
coefficient. [1]_ In the case of this function, that coefficient -
the first one in the returned array - is always taken as one. (If
for some reason you have one other point, the only automatic way
presently to leverage that information is to use ``polyfit``.)
The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
matrix **A** is given by
"""
seq_of_zeros = atleast_1d(seq_of_zeros)
sh = seq_of_zeros.shape
if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
seq_of_zeros = eigvals(seq_of_zeros)
elif len(sh) == 1:
dt = seq_of_zeros.dtype
if dt != object:
seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char))
else:
raise ValueError("input must be 1d or non-empty square 2d array.")
if len(seq_of_zeros) == 0:
return 1.0
dt = seq_of_zeros.dtype
a = ones((1,), dtype=dt)
for zero in seq_of_zeros:
a = NX.convolve(a, array([1, -zero], dtype=dt), mode='full')
if issubclass(a.dtype.type, NX.complexfloating):
roots = NX.asarray(seq_of_zeros, complex)
if NX.all(NX.sort(roots) == NX.sort(roots.conjugate())):
a = a.real.copy()
return a
def _roots_dispatcher(p):
return p
@array_function_dispatch(_roots_dispatcher)
def roots(p):
"""
Return the roots of a polynomial with coefficients given in p.
.. note::
This forms part of the old polynomial API. Since version 1.4, the
new polynomial API defined in `numpy.polynomial` is preferred.
A summary of the differences can be found in the
:doc:`transition guide </reference/routines.polynomials>`.
The values in the rank-1 array `p` are coefficients of a polynomial.
If the length of `p` is n+1 then the polynomial is described by::
p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
Parameters
----------
p : array_like
Rank-1 array of polynomial coefficients.
Returns
-------
out : ndarray
An array containing the roots of the polynomial.
Raises
------
ValueError
When `p` cannot be converted to a rank-1 array.
See also
--------
poly : Find the coefficients of a polynomial with a given sequence
of roots.
polyval : Compute polynomial values.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
The algorithm relies on computing the eigenvalues of the
companion matrix [1]_.
References
----------
.. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK:
Cambridge University Press, 1999, pp. 146-7.
Examples
--------
>>> coeff = [3.2, 2, 1]
>>> np.roots(coeff)
array([-0.3125+0.46351241j, -0.3125-0.46351241j])
"""
p = atleast_1d(p)
if p.ndim != 1:
raise ValueError("Input must be a rank-1 array.")
non_zero = NX.nonzero(NX.ravel(p))[0]
if len(non_zero) == 0:
return NX.array([])
trailing_zeros = len(p) - non_zero[-1] - 1
p = p[int(non_zero[0]):int(non_zero[-1])+1]
if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
p = p.astype(float)
N = len(p)
if N > 1:
A = diag(NX.ones((N-2,), p.dtype), -1)
A[0,:] = -p[1:] / p[0]
roots = eigvals(A)
else:
roots = NX.array([])
roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
return roots
def _polyint_dispatcher(p, m=None, k=None):
return (p,)
@array_function_dispatch(_polyint_dispatcher)
def polyint(p, m=1, k=None):
"""
Return an antiderivative (indefinite integral) of a polynomial.
"""
m = int(m)
if m < 0:
raise ValueError("Order of integral must be positive (see polyder)")
if k is None:
k = NX.zeros(m, float)
k = atleast_1d(k)
if len(k) == 1 and m > 1:
k = k[0]*NX.ones(m, float)
if len(k) < m:
raise ValueError(
"k must be a scalar or a rank-1 array of length 1 or >m.")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
if m == 0:
if truepoly:
return poly1d(p)
return p
else:
y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))
val = polyint(y, m - 1, k=k[1:])
if truepoly:
return poly1d(val)
return val
def _polyder_dispatcher(p, m=None):
return (p,)
@array_function_dispatch(_polyder_dispatcher)
def polyder(p, m=1):
"""
Return the derivative of the specified order of a polynomial.
.. note::
This forms part of the old polynomial API. Since version 1.4, the
new polynomial API defined in `numpy.polynomial` is preferred.
A summary of the differences can be found in the
:doc:`transition guide </reference/routines.polynomials>`.
Parameters
----------
p : poly1d or sequence
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of differentiation (default: 1)
Returns
-------
der : poly1d
A new polynomial representing the derivative.
See Also
--------
polyint : Anti-derivative of a polynomial.
poly1d : Class for one-dimensional polynomials.
Examples
--------
The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:
>>> p = np.poly1d([1,1,1,1])
>>> p2 = np.polyder(p)
>>> p2
poly1d([3, 2, 1])
which evaluates to:
>>> p2(2.)
17.0
We can verify this, approximating the derivative with
``(f(x + h) - f(x))/h``:
>>> (p(2. + 0.001) - p(2.)) / 0.001
17.007000999997857
The fourth-order derivative of a 3rd-order polynomial is zero:
>>> np.polyder(p, 2)
poly1d([6, 2])
>>> np.polyder(p, 3)
poly1d([6])
>>> np.polyder(p, 4)
poly1d([0])
"""
m = int(m)
if m < 0:
raise ValueError("Order of derivative must be positive (see polyint)")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
n = len(p) - 1
y = p[:-1] * NX.arange(n, 0, -1)
if m == 0:
val = p
else:
val = polyder(y, m - 1)
if truepoly:
val = poly1d(val)
return val
def _polyfit_dispatcher(x, y, deg, rcond=None, full=None, w=None, cov=None):
return (x, y, w)
@array_function_dispatch(_polyfit_dispatcher)
def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
"""
Least squares polynomial fit.
.. note::
This forms part of the old polynomial API. Since version 1.4, the
new polynomial API defined in `numpy.polynomial` is preferred.
A summary of the differences can be found in the
:doc:`transition guide </reference/routines.polynomials>`.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
# y 是样本点的 y 坐标,可以是形状为 (M,) 或 (M, K) 的数组
# 其中 M 是样本点的数量,K 是数据集的数量
y : array_like, shape (M,) or (M, K)
# deg 是拟合多项式的阶数
deg : int
# rcond 是拟合条件数的相对值,小于这个值的奇异值将被忽略
# 默认值为 len(x)*eps,其中 eps 是浮点数的相对精度,大约为 2e-16
rcond : float, optional
# full 控制返回值的性质,当为 False 时(默认值),仅返回系数
# 当为 True 时,还返回奇异值分解的诊断信息
full : bool, optional
# w 是样本点的权重,形状为 (M,) 的数组,可选参数
# 如果不为 None,则权重 w[i] 适用于 x[i] 处的未平方残差 y[i] - y_hat[i]
# 默认值为 None
w : array_like, shape (M,), optional
# cov 控制是否返回估计值及其协方差矩阵
# 如果给出且不是 False,则返回估计值及其协方差矩阵
# 默认情况下,协方差按 chi2/dof 缩放,其中 dof = M - (deg + 1)
# 如果 cov='unscaled',则省略此缩放,适用于权重 w = 1/sigma 的情况
cov : bool or str, optional
# 返回结果
# p 是形状为 (deg + 1,) 或 (deg + 1, K) 的数组
# 包含多项式系数,最高阶次排在最前面
# 如果 y 是二维数组,则第 k 个数据集的系数在 p[:,k] 中
Returns
-------
p : ndarray, shape (deg + 1,) or (deg + 1, K)
# 仅当 full == True 时返回以下值
# residuals -- 最小二乘拟合的残差平方和
# rank -- 缩放后的 Vandermonde 系数矩阵的有效秩
# singular_values -- 缩放后的 Vandermonde 系数矩阵的奇异值
# rcond -- rcond 的值
residuals, rank, singular_values, rcond
# 仅当 full == False 且 cov == True 时才存在
# V 是形状为 (deg + 1, deg + 1) 或 (deg + 1, deg + 1, K) 的数组
# 包含多项式系数估计的协方差矩阵
# 对角线上的元素是每个系数的方差估计
# 如果 y 是二维数组,则第 k 个数据集的协方差矩阵在 V[:,:,k] 中
V : ndarray, shape (deg + 1, deg + 1) or (deg + 1, deg + 1, K)
# 警告
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if ``full == False``.
The warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', np.exceptions.RankWarning)
See Also
--------
polyval : Compute polynomial values.
linalg.lstsq : Computes a least-squares fit.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution minimizes the squared error
.. math::
E = \\sum_{j=0}^k |p(x_j) - y_j|^2
in the equations::
x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0]
x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1]
...
x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k]
The coefficient matrix of the coefficients `p` is a Vandermonde matrix.
`polyfit` issues a `~exceptions.RankWarning` when the least-squares fit is
badly conditioned. This implies that the best fit is not well-defined due
to numerical error. The results may be improved by lowering the polynomial
degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
can also be set to a value smaller than its default, but the resulting
fit may be spurious: including contributions from the small singular
values can add numerical noise to the result.
Note that fitting polynomial coefficients is inherently badly conditioned
when the degree of the polynomial is large or the interval of sample points
is badly centered. The quality of the fit should always be checked in these
cases. When polynomial fits are not satisfactory, splines may be a good
alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
https://en.wikipedia.org/wiki/Curve_fitting
.. [2] Wikipedia, "Polynomial interpolation",
https://en.wikipedia.org/wiki/Polynomial_interpolation
Examples
--------
>>> import warnings
>>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
>>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
>>> z = np.polyfit(x, y, 3)
>>> z
array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254]) # may vary
It is convenient to use `poly1d` objects for dealing with polynomials:
>>> p = np.poly1d(z)
>>> p(0.5)
0.6143849206349179 # may vary
>>> p(3.5)
-0.34732142857143039 # may vary
>>> p(10)
22.579365079365115 # may vary
High-order polynomials may oscillate wildly:
>>> with warnings.catch_warnings():
... warnings.simplefilter('ignore', np.exceptions.RankWarning)
... p30 = np.poly1d(np.polyfit(x, y, 30))
...
>>> p30(4)
-0.80000000000000204 # may vary
>>> p30(5)
-0.99999999999999445 # may vary
>>> p30(4.5)
-0.10547061179440398 # may vary
Illustration:
>>> import matplotlib.pyplot as plt
>>> xp = np.linspace(-2, 6, 100)
>>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
# 绘制散点图、一次多项式拟合曲线和三十次多项式拟合曲线
>>> plt.ylim(-2,2)
# 设置 y 轴的显示范围为 -2 到 2
(-2, 2)
>>> plt.show()
# 显示绘制的图形
"""
order = int(deg) + 1
x = NX.asarray(x) + 0.0
y = NX.asarray(y) + 0.0
if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if x.shape[0] != y.shape[0]:
raise TypeError("expected x and y to have same length")
if rcond is None:
rcond = len(x)*finfo(x.dtype).eps
lhs = vander(x, order)
rhs = y
if w is not None:
w = NX.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected a 1-d array for weights")
if w.shape[0] != y.shape[0]:
raise TypeError("expected w and y to have the same length")
lhs *= w[:, NX.newaxis]
if rhs.ndim == 2:
rhs *= w[:, NX.newaxis]
else:
rhs *= w
scale = NX.sqrt((lhs*lhs).sum(axis=0))
lhs /= scale
c, resids, rank, s = lstsq(lhs, rhs, rcond)
c = (c.T/scale).T
if rank != order and not full:
msg = "Polyfit may be poorly conditioned"
warnings.warn(msg, RankWarning, stacklevel=2)
if full:
return c, resids, rank, s, rcond
elif cov:
Vbase = inv(dot(lhs.T, lhs))
Vbase /= NX.outer(scale, scale)
if cov == "unscaled":
fac = 1
else:
if len(x) <= order:
raise ValueError("the number of data points must exceed order "
"to scale the covariance matrix")
fac = resids / (len(x) - order)
if y.ndim == 1:
return c, Vbase * fac
else:
return c, Vbase[:,:, NX.newaxis] * fac
else:
return c
def _polyval_dispatcher(p, x):
return (p, x)
@array_function_dispatch(_polyval_dispatcher)
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
.. note::
This forms part of the old polynomial API. Since version 1.4, the
new polynomial API defined in `numpy.polynomial` is preferred.
A summary of the differences can be found in the
:doc:`transition guide </reference/routines.polynomials>`.
If `p` is of length N, this function returns the value::
p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]
If `x` is a sequence, then ``p(x)`` is returned for each element of ``x``.
If `x` is another polynomial then the composite polynomial ``p(x(t))``
is returned.
Parameters
----------
p : array_like or poly1d object
1D array of polynomial coefficients (including coefficients equal
to zero) from highest degree to the constant term, or an
instance of poly1d.
x : array_like or poly1d object
A number, an array of numbers, or an instance of poly1d, at
which to evaluate `p`.
Returns
-------
values : ndarray or poly1d
If `x` is a poly1d instance, the result is the composition of the two
polynomials, i.e., `x` is "substituted" in `p` and the simplified
result is returned. In addition, the type of `x` - array_like or
poly1d - governs the type of the output: `x` array_like => `values`
array_like, `x` a poly1d object => `values` is also.
See Also
--------
poly1d: A polynomial class.
Notes
-----
Horner's scheme [1]_ is used to evaluate the polynomial. Even so,
for polynomials of high degree the values may be inaccurate due to
rounding errors. Use carefully.
If `x` is a subtype of `ndarray` the return value will be of the same type.
References
----------
.. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand
Reinhold Co., 1985, pg. 720.
Examples
--------
>>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
76
>>> np.polyval([3,0,1], np.poly1d(5))
poly1d([76])
>>> np.polyval(np.poly1d([3,0,1]), 5)
76
>>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
poly1d([76])
"""
p = NX.asarray(p)
if isinstance(x, poly1d):
y = 0
else:
x = NX.asanyarray(x)
y = NX.zeros_like(x)
for pv in p:
y = y * x + pv
return y
def _binary_op_dispatcher(a1, a2):
return (a1, a2)
@array_function_dispatch(_binary_op_dispatcher)
def polyadd(a1, a2):
"""
Find the sum of two polynomials.
.. note::
This forms part of the old polynomial API. Since version 1.4, the
new polynomial API defined in `numpy.polynomial` is preferred.
A summary of the differences can be found in the
:doc:`transition guide </reference/routines.polynomials>`.
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 + a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) + a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 + NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
@array_function_dispatch(_binary_op_dispatcher)
def polysub(a1, a2):
"""
Difference (subtraction) of two polynomials.
.. note::
This forms part of the old polynomial API. Since version 1.4, the
new polynomial API defined in `numpy.polynomial` is preferred.
A summary of the differences can be found in the
:doc:`transition guide </reference/routines.polynomials>`.
Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
`a1` and `a2` can be either array_like sequences of the polynomials'
coefficients (including coefficients equal to zero), or `poly1d` objects.
Parameters
----------
a1, a2 : array_like or poly1d
Minuend and subtrahend polynomials, respectively.
Returns
-------
out : ndarray or poly1d
Array or `poly1d` object of the difference polynomial's coefficients.
See Also
--------
polyval, polydiv, polymul, polyadd
Examples
--------
.. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)
>>> np.polysub([2, 10, -2], [3, 10, -4])
array([-1, 0, 2])
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 - a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) - a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 - NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
@array_function_dispatch(_binary_op_dispatcher)
def polymul(a1, a2):
"""
Find the product of two polynomials.
.. note::
This forms part of the old polynomial API. Since version 1.4, the
new polynomial API defined in `numpy.polynomial` is preferred.
A summary of the differences can be found in the
:doc:`transition guide </reference/routines.polynomials>`.
Finds the polynomial resulting from the multiplication of the two input
polynomials. Each input must be either a poly1d object or a 1D sequence
of polynomial coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The polynomial resulting from the multiplication of the inputs. If
either inputs is a poly1d object, then the output is also a poly1d
object. Otherwise, it is a 1D array of polynomial coefficients from
highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
convolve : Array convolution. Same output as polymul, but has parameter
for overlap mode.
Examples
--------
>>> np.polymul([1, 2, 3], [9, 5, 1])
array([ 9, 23, 38, 17, 3])
Using poly1d objects:
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
val = NX.polymul(a1, a2)
if truepoly:
val = poly1d(val)
return val
p1 = np.poly1d([1, 2, 3])
p2 = np.poly1d([9, 5, 1])
print(p1)
print(p2)
print(np.polymul(p1, p2))
"""
判断输入参数 a1 和 a2 是否为 poly1d 类型的对象
如果其中一个是 poly1d 类型,则 truepoly 为 True,否则为 False
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1, a2 = poly1d(a1), poly1d(a2)
val = NX.convolve(a1, a2)
if truepoly:
val = poly1d(val)
return val
def _polydiv_dispatcher(u, v):
return (u, v)
@array_function_dispatch(_polydiv_dispatcher)
def polydiv(u, v):
"""
Returns the quotient and remainder of polynomial division.
.. note::
This forms part of the old polynomial API. Since version 1.4, the
new polynomial API defined in `numpy.polynomial` is preferred.
A summary of the differences can be found in the
:doc:`transition guide </reference/routines.polynomials>`.
The input arrays are the coefficients (including any coefficients
equal to zero) of the "numerator" (dividend) and "denominator"
(divisor) polynomials, respectively.
Parameters
----------
u : array_like or poly1d
Dividend polynomial's coefficients.
v : array_like or poly1d
Divisor polynomial's coefficients.
Returns
-------
q : ndarray
Coefficients, including those equal to zero, of the quotient.
r : ndarray
Coefficients, including those equal to zero, of the remainder.
See Also
--------
poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub
polyval
Notes
-----
Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need
not equal `v.ndim`. In other words, all four possible combinations -
``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,
``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.
Examples
--------
.. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25
>>> x = np.array([3.0, 5.0, 2.0])
>>> y = np.array([2.0, 1.0])
>>> np.polydiv(x, y)
(array([1.5 , 1.75]), array([0.25]))
"""
truepoly = (isinstance(u, poly1d) or isinstance(v, poly1d))
u = atleast_1d(u) + 0.0
v = atleast_1d(v) + 0.0
w = u[0] + v[0]
m = len(u) - 1
n = len(v) - 1
scale = 1. / v[0]
q = NX.zeros((max(m - n + 1, 1),), w.dtype)
r = u.astype(w.dtype)
for k in range(0, m-n+1):
d = scale * r[k]
q[k] = d
r[k:k+n+1] -= d*v
while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):
r = r[1:]
if truepoly:
return poly1d(q), poly1d(r)
return q, r
_poly_mat = re.compile(r"\*\*([0-9]*)")
def _raise_power(astr, wrap=70):
n = 0
line1 = ''
line2 = ''
output = ' '
while True:
mat = _poly_mat.search(astr, n)
if mat is None:
break
span = mat.span()
power = mat.groups()[0]
partstr = astr[n:span[0]]
n = span[1]
toadd2 = partstr + ' '*(len(power)-1)
toadd1 = ' '*(len(partstr)-1) + power
if ((len(line2) + len(toadd2) > wrap) or
(len(line1) + len(toadd1) > wrap)):
output += line1 + "\n" + line2 + "\n "
line1 = toadd1
line2 = toadd2
else:
line2 += partstr + ' '*(len(power)-1)
line1 += ' '*(len(partstr)-1) + power
output += line1 + "\n" + line2
return output + astr[n:]
@set_module('numpy')
class poly1d:
"""
一维多项式类。
.. note::
这是旧的多项式 API 的一部分。从版本 1.4 开始,推荐使用 `numpy.polynomial` 中定义的新多项式 API。
有关差异的摘要,请参阅 :doc:`过渡指南 </reference/routines.polynomials>`。
一个便捷的类,用于封装多项式的自然操作,使这些操作可以在代码中按照习惯的方式执行(参见示例)。
Parameters
----------
c_or_r : array_like
多项式的系数,按降幂排列;或者如果第二个参数的值为 True,则是多项式的根(使得多项式为 0 的值)。
例如,`poly1d([1, 2, 3])` 返回表示 :math:`x^2 + 2x + 3` 的对象,而 `poly1d([1, 2, 3], True)` 返回
表示 :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x - 6` 的对象。
r : bool, optional
如果为 True,`c_or_r` 指定多项式的根;默认为 False。
variable : str, optional
更改打印多项式 `p` 时使用的变量名称,从 `x` 改为 `variable`(参见示例)。
Examples
--------
构造多项式 :math:`x^2 + 2x + 3`:
>>> p = np.poly1d([1, 2, 3])
>>> print(np.poly1d(p))
2
1 x + 2 x + 3
在 :math:`x = 0.5` 处求多项式的值:
>>> p(0.5)
4.25
查找多项式的根:
>>> p.r
array([-1.+1.41421356j, -1.-1.41421356j])
>>> p(p.r)
array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j]) # 可能会有所不同
上一行中的数字表示(0, 0)的机器精度
显示多项式的系数:
>>> p.c
array([1, 2, 3])
显示多项式的阶数(移除了前导零系数):
>>> p.order
2
显示多项式中第 k 次幂的系数
(等同于 `p.c[-(i+1)]`):
>>> p[1]
2
多项式可以进行加法、减法、乘法和除法
(返回商和余数):
>>> p * p
poly1d([ 1, 4, 10, 12, 9])
>>> (p**3 + 4) / p
(poly1d([ 1., 4., 10., 12., 9.]), poly1d([4.]))
`asarray(p)` 返回系数数组,因此多项式可以在接受数组的所有函数中使用:
>>> p**2 # 多项式的平方
poly1d([ 1, 4, 10, 12, 9])
>>> np.square(p) # 各系数的平方
array([1, 4, 9])
在多项式的字符串表示中可以修改使用的变量,
使用 `variable` 参数:
>>> p = np.poly1d([1,2,3], variable='z')
>>> print(p)
2
1 z + 2 z + 3
从根构造多项式:
>>> np.poly1d([1, 2], True)
poly1d([ 1., -3., 2.])
这与以下方式获得的多项式相同:
>>> np.poly1d([1, -1]) * np.poly1d([1, -2])
poly1d([ 1, -3, 2])
"""
__hash__ = None
@property
def coeffs(self):
""" The polynomial coefficients """
return self._coeffs
@coeffs.setter
def coeffs(self, value):
if value is not self._coeffs:
raise AttributeError("Cannot set attribute")
@property
def variable(self):
""" The name of the polynomial variable """
return self._variable
@property
def order(self):
""" The order or degree of the polynomial """
return len(self._coeffs) - 1
@property
def roots(self):
""" The roots of the polynomial, where self(x) == 0 """
return roots(self._coeffs)
@property
def _coeffs(self):
return self.__dict__['coeffs']
@_coeffs.setter
def _coeffs(self, coeffs):
self.__dict__['coeffs'] = coeffs
r = roots
c = coef = coefficients = coeffs
o = order
def __init__(self, c_or_r, r=False, variable=None):
if isinstance(c_or_r, poly1d):
self._variable = c_or_r._variable
self._coeffs = c_or_r._coeffs
if set(c_or_r.__dict__) - set(self.__dict__):
msg = ("In the future extra properties will not be copied "
"across when constructing one poly1d from another")
warnings.warn(msg, FutureWarning, stacklevel=2)
self.__dict__.update(c_or_r.__dict__)
if variable is not None:
self._variable = variable
return
if r:
c_or_r = poly(c_or_r)
c_or_r = atleast_1d(c_or_r)
if c_or_r.ndim > 1:
raise ValueError("Polynomial must be 1d only.")
c_or_r = trim_zeros(c_or_r, trim='f')
if len(c_or_r) == 0:
c_or_r = NX.array([0], dtype=c_or_r.dtype)
self._coeffs = c_or_r
if variable is None:
variable = 'x'
self._variable = variable
def __array__(self, t=None, copy=None):
if t:
return NX.asarray(self.coeffs, t, copy=copy)
else:
return NX.asarray(self.coeffs, copy=copy)
def __repr__(self):
vals = repr(self.coeffs)
vals = vals[6:-1]
return "poly1d(%s)" % vals
def __len__(self):
return self.order
def __str__(self):
thestr = "0"
var = self.variable
coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
N = len(coeffs) - 1
def fmt_float(q):
s = '%.4g' % q
if s.endswith('.0000'):
s = s[:-5]
return s
for k, coeff in enumerate(coeffs):
if not iscomplex(coeff):
coefstr = fmt_float(real(coeff))
elif real(coeff) == 0:
coefstr = '%sj' % fmt_float(imag(coeff))
else:
coefstr = '(%s + %sj)' % (fmt_float(real(coeff)),
fmt_float(imag(coeff)))
power = (N - k)
if power == 0:
if coefstr != '0':
newstr = '%s' % (coefstr,)
else:
if k == 0:
newstr = '0'
else:
newstr = ''
elif power == 1:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = var
else:
newstr = '%s %s' % (coefstr, var)
else:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = '%s**%d' % (var, power,)
else:
newstr = '%s %s**%d' % (coefstr, var, power)
if k > 0:
if newstr != '':
if newstr.startswith('-'):
thestr = "%s - %s" % (thestr, newstr[1:])
else:
thestr = "%s + %s" % (thestr, newstr)
else:
thestr = newstr
return _raise_power(thestr)
def __call__(self, val):
return polyval(self.coeffs, val)
def __neg__(self):
return poly1d(-self.coeffs)
def __pos__(self):
return self
def __mul__(self, other):
if isscalar(other):
return poly1d(self.coeffs * other)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __rmul__(self, other):
if isscalar(other):
return poly1d(other * self.coeffs)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __add__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __radd__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __pow__(self, val):
if not isscalar(val) or int(val) != val or val < 0:
raise ValueError("Power to non-negative integers only.")
res = [1]
for _ in range(val):
res = polymul(self.coeffs, res)
return poly1d(res)
def __sub__(self, other):
other = poly1d(other)
return poly1d(polysub(self.coeffs, other.coeffs))
def __rsub__(self, other):
other = poly1d(other)
return poly1d(polysub(other.coeffs, self.coeffs))
def __div__(self, other):
if isscalar(other):
return poly1d(self.coeffs / other)
else:
other = poly1d(other)
return polydiv(self, other)
__truediv__ = __div__
def __rdiv__(self, other):
if isscalar(other):
return poly1d(other / self.coeffs)
else:
other = poly1d(other)
return polydiv(other, self)
__rtruediv__ = __rdiv__
def __eq__(self, other):
if not isinstance(other, poly1d):
return NotImplemented
if self.coeffs.shape != other.coeffs.shape:
return False
return (self.coeffs == other.coeffs).all()
def __ne__(self, other):
if not isinstance(other, poly1d):
return NotImplemented
return not self.__eq__(other)
def __getitem__(self, val):
ind = self.order - val
if val > self.order:
return self.coeffs.dtype.type(0)
if val < 0:
return self.coeffs.dtype.type(0)
return self.coeffs[ind]
def __setitem__(self, key, val):
ind = self.order - key
if key < 0:
raise ValueError("Does not support negative powers.")
if key > self.order:
zr = NX.zeros(key - self.order, self.coeffs.dtype)
self._coeffs = NX.concatenate((zr, self.coeffs))
ind = 0
self._coeffs[ind] = val
return
def __iter__(self):
return iter(self.coeffs)
def integ(self, m=1, k=0):
"""
Return an antiderivative (indefinite integral) of this polynomial.
Refer to `polyint` for full documentation.
See Also
--------
polyint : equivalent function
"""
return poly1d(polyint(self.coeffs, m=m, k=k))
def deriv(self, m=1):
"""
Return a derivative of this polynomial.
Refer to `polyder` for full documentation.
See Also
--------
polyder : equivalent function
"""
return poly1d(polyder(self.coeffs, m=m))
warnings.simplefilter('always', RankWarning)
.\numpy\numpy\lib\_polynomial_impl.pyi
from typing import (
Literal as L,
overload,
Any,
SupportsInt,
SupportsIndex,
TypeVar,
NoReturn,
)
import numpy as np
from numpy import (
poly1d as poly1d,
unsignedinteger,
signedinteger,
floating,
complexfloating,
int32,
int64,
float64,
complex128,
object_,
)
from numpy._typing import (
NDArray,
ArrayLike,
_ArrayLikeBool_co,
_ArrayLikeUInt_co,
_ArrayLikeInt_co,
_ArrayLikeFloat_co,
_ArrayLikeComplex_co,
_ArrayLikeObject_co,
)
_T = TypeVar("_T")
_2Tup = tuple[_T, _T]
_5Tup = tuple[
_T,
NDArray[float64],
NDArray[int32],
NDArray[float64],
NDArray[float64],
]
__all__: list[str]
def poly(seq_of_zeros: ArrayLike) -> NDArray[floating[Any]]: ...
def roots(p: ArrayLike) -> NDArray[complexfloating[Any, Any]] | NDArray[floating[Any]]: ...
@overload
def polyint(
p: poly1d,
m: SupportsInt | SupportsIndex = ...,
k: None | _ArrayLikeComplex_co | _ArrayLikeObject_co = ...,
) -> poly1d: ...
@overload
def polyint(
p: _ArrayLikeFloat_co,
m: SupportsInt | SupportsIndex = ...,
k: None | _ArrayLikeFloat_co = ...,
) -> NDArray[floating[Any]]: ...
@overload
def polyint(
p: _ArrayLikeComplex_co,
m: SupportsInt | SupportsIndex = ...,
k: None | _ArrayLikeComplex_co = ...,
) -> NDArray[complexfloating[Any, Any]]: ...
@overload
def polyint(
p: _ArrayLikeObject_co,
m: SupportsInt | SupportsIndex = ...,
k: None | _ArrayLikeObject_co = ...,
) -> NDArray[object_]: ...
@overload
def polyder(
p: poly1d,
m: SupportsInt | SupportsIndex = ...,
) -> poly1d: ...
@overload
def polyder(
p: _ArrayLikeFloat_co,
m: SupportsInt | SupportsIndex = ...,
) -> NDArray[floating[Any]]: ...
@overload
def polyder(
p: _ArrayLikeComplex_co,
m: SupportsInt | SupportsIndex = ...,
) -> NDArray[complexfloating[Any, Any]]: ...
@overload
def polyder(
p: _ArrayLikeObject_co,
m: SupportsInt | SupportsIndex = ...,
) -> NDArray[object_]: ...
@overload
def polyfit(
x: _ArrayLikeFloat_co,
y: _ArrayLikeFloat_co,
deg: SupportsIndex | SupportsInt,
rcond: None | float = ...,
full: L[False] = ...,
w: None | _ArrayLikeFloat_co = ...,
cov: L[False] = ...,
) -> NDArray[float64]: ...
@overload
def polyfit(
x: _ArrayLikeComplex_co,
y: _ArrayLikeComplex_co,
deg: SupportsIndex | SupportsInt,
rcond: None | float = ...,
full: L[False] = ...,
w: None | _ArrayLikeFloat_co = ...,
cov: L[False] = ...,
) -> NDArray[complex128]: ...
@overload
def polyfit(
x: _ArrayLikeFloat_co,
y: _ArrayLikeFloat_co,
deg: SupportsIndex | SupportsInt,
rcond: None | float = ...,
full: L[False] = ...,
w: None | _ArrayLikeFloat_co = ...,
cov: L[True, "unscaled"] = ...,
) -> _2Tup[NDArray[float64]]: ...
y: _ArrayLikeComplex_co,
deg: SupportsIndex | SupportsInt,
rcond: None | float = ...,
full: L[False] = ...,
w: None | _ArrayLikeFloat_co = ...,
cov: L[True, "unscaled"] = ...,
@overload
def polyfit(
x: _ArrayLikeFloat_co,
y: _ArrayLikeFloat_co,
deg: SupportsIndex | SupportsInt,
rcond: None | float = ...,
full: L[True] = ...,
w: None | _ArrayLikeFloat_co = ...,
cov: bool | L["unscaled"] = ...,
) -> _5Tup[NDArray[float64]]: ...
@overload
def polyfit(
x: _ArrayLikeComplex_co,
y: _ArrayLikeComplex_co,
deg: SupportsIndex | SupportsInt,
rcond: None | float = ...,
full: L[True] = ...,
w: None | _ArrayLikeFloat_co = ...,
cov: bool | L["unscaled"] = ...,
) -> _5Tup[NDArray[complex128]]: ...
@overload
def polyval(
p: _ArrayLikeBool_co,
x: _ArrayLikeBool_co,
) -> NDArray[int64]: ...
@overload
def polyval(
p: _ArrayLikeUInt_co,
x: _ArrayLikeUInt_co,
) -> NDArray[unsignedinteger[Any]]: ...
@overload
def polyval(
p: _ArrayLikeInt_co,
x: _ArrayLikeInt_co,
) -> NDArray[signedinteger[Any]]: ...
@overload
def polyval(
p: _ArrayLikeFloat_co,
x: _ArrayLikeFloat_co,
) -> NDArray[floating[Any]]: ...
@overload
def polyval(
p: _ArrayLikeComplex_co,
x: _ArrayLikeComplex_co,
) -> NDArray[complexfloating[Any, Any]]: ...
@overload
def polyval(
p: _ArrayLikeObject_co,
x: _ArrayLikeObject_co,
) -> NDArray[object_]: ...
@overload
def polyadd(
a1: poly1d,
a2: _ArrayLikeComplex_co | _ArrayLikeObject_co,
) -> poly1d: ...
@overload
def polyadd(
a1: _ArrayLikeComplex_co | _ArrayLikeObject_co,
a2: poly1d,
) -> poly1d: ...
@overload
def polyadd(
a1: _ArrayLikeBool_co,
a2: _ArrayLikeBool_co,
) -> NDArray[np.bool]: ...
@overload
def polyadd(
a1: _ArrayLikeUInt_co,
a2: _ArrayLikeUInt_co,
) -> NDArray[unsignedinteger[Any]]: ...
@overload
def polyadd(
a1: _ArrayLikeInt_co,
a2: _ArrayLikeInt_co,
) -> NDArray[signedinteger[Any]]: ...
@overload
def polyadd(
a1: _ArrayLikeFloat_co,
a2: _ArrayLikeFloat_co,
) -> NDArray[floating[Any]]: ...
@overload
def polyadd(
a1: _ArrayLikeComplex_co,
a2: _ArrayLikeComplex_co,
) -> NDArray[complexfloating[Any, Any]]: ...
@overload
def polyadd(
a1: _ArrayLikeObject_co,
a2: _ArrayLikeObject_co,
) -> NDArray[object_]: ...
@overload
def polysub(
a1: poly1d,
a2: _ArrayLikeComplex_co | _ArrayLikeObject_co,
) -> poly1d: ...
@overload
def polysub(
a1: _ArrayLikeComplex_co | _ArrayLikeObject_co,
a2: poly1d,
) -> poly1d: ...
@overload
def polysub(
a1: _ArrayLikeBool_co,
a2: _ArrayLikeBool_co,
) -> NoReturn: ...
@overload
def polysub(
a1: _ArrayLikeUInt_co,
a2: _ArrayLikeUInt_co,
) -> NDArray[unsignedinteger[Any]]: ...
@overload
def polysub(
a1: _ArrayLikeInt_co,
a2: _ArrayLikeInt_co,
) -> NDArray[signedinteger[Any]]: ...
@overload
def polysub(
a1: _ArrayLikeFloat_co,
a2: _ArrayLikeFloat_co,
) -> NDArray[floating[Any]]: ...
@overload
def polysub(
a1: _ArrayLikeComplex_co,
a2: _ArrayLikeComplex_co,
) -> NDArray[complexfloating[Any, Any]]: ...
a1: _ArrayLikeObject_co,
a2: _ArrayLikeObject_co,
) -> NDArray[object_]: ...
polymul = polyadd
@overload
def polydiv(
u: poly1d,
v: _ArrayLikeComplex_co | _ArrayLikeObject_co,
) -> _2Tup[poly1d]: ...
@overload
def polydiv(
u: _ArrayLikeComplex_co | _ArrayLikeObject_co,
v: poly1d,
) -> _2Tup[poly1d]: ...
@overload
def polydiv(
u: _ArrayLikeFloat_co,
v: _ArrayLikeFloat_co,
) -> _2Tup[NDArray[floating[Any]]]: ...
@overload
def polydiv(
u: _ArrayLikeComplex_co,
v: _ArrayLikeComplex_co,
) -> _2Tup[NDArray[complexfloating[Any, Any]]]: ...
@overload
def polydiv(
u: _ArrayLikeObject_co,
v: _ArrayLikeObject_co,
) -> _2Tup[NDArray[Any]]: ...
.\numpy\numpy\lib\_scimath_impl.py
"""
Wrapper functions to more user-friendly calling of certain math functions
whose output data-type is different than the input data-type in certain
domains of the input.
For example, for functions like `log` with branch cuts, the versions in this
module provide the mathematically valid answers in the complex plane::
>>> import math
>>> np.emath.log(-math.exp(1)) == (1+1j*math.pi)
True
Similarly, `sqrt`, other base logarithms, `power` and trig functions are
correctly handled. See their respective docstrings for specific examples.
Functions
---------
.. autosummary::
:toctree: generated/
sqrt
log
log2
logn
log10
power
arccos
arcsin
arctanh
"""
import numpy._core.numeric as nx
import numpy._core.numerictypes as nt
from numpy._core.numeric import asarray, any
from numpy._core.overrides import array_function_dispatch
from numpy.lib._type_check_impl import isreal
__all__ = [
'sqrt', 'log', 'log2', 'logn', 'log10', 'power', 'arccos', 'arcsin',
'arctanh'
]
_ln2 = nx.log(2.0)
def _tocomplex(arr):
"""
Convert its input `arr` to a complex array.
The input is returned as a complex array of the smallest type that will fit
the original data: types like single, byte, short, etc. become csingle,
while others become cdouble.
A copy of the input is always made.
Parameters
----------
arr : array
Returns
-------
array
An array with the same input data as the input but in complex form.
Examples
--------
First, consider an input of type short:
>>> a = np.array([1,2,3],np.short)
>>> ac = np.lib.scimath._tocomplex(a); ac
array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)
>>> ac.dtype
dtype('complex64')
If the input is of type double, the output is correspondingly of the
complex double type as well:
>>> b = np.array([1,2,3],np.double)
>>> bc = np.lib.scimath._tocomplex(b); bc
array([1.+0.j, 2.+0.j, 3.+0.j])
>>> bc.dtype
dtype('complex128')
Note that even if the input was complex to begin with, a copy is still
made, since the astype() method always copies:
>>> c = np.array([1,2,3],np.csingle)
>>> cc = np.lib.scimath._tocomplex(c); cc
array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)
>>> c *= 2; c
array([2.+0.j, 4.+0.j, 6.+0.j], dtype=complex64)
>>> cc
array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)
"""
if issubclass(arr.dtype.type, (nt.single, nt.byte, nt.short, nt.ubyte,
nt.ushort, nt.csingle)):
return arr.astype(nt.csingle)
else:
return arr.astype(nt.cdouble)
def _fix_real_lt_zero(x):
"""
Convert `x` to complex if it has real, negative components.
Otherwise, output is just the array version of the input (via asarray).
Parameters
----------
x : array_like
Returns
-------
array
Examples
--------
"""
>>> np.lib.scimath._fix_real_lt_zero([1,2])
array([1, 2])
>>> np.lib.scimath._fix_real_lt_zero([-1,2])
array([-1.+0.j, 2.+0.j])
"""
# 将输入参数 x 转换为 numpy 数组
x = asarray(x)
# 如果数组 x 中存在实数且小于零的元素,则将数组转换为复数形式
if any(isreal(x) & (x < 0)):
x = _tocomplex(x)
# 返回处理后的数组 x
return x
# 将输入 `x` 转换为 numpy 数组
x = asarray(x)
# 如果 `x` 中存在实数且小于零的元素,则将 `x` 中所有元素转换为浮点数类型
if any(isreal(x) & (x < 0)):
x = x * 1.0
# 返回处理后的数组 `x`
return x
# 将输入 `x` 转换为 numpy 数组
x = asarray(x)
# 如果 `x` 中存在实数且绝对值大于 1 的元素,则将 `x` 转换为复数类型
if any(isreal(x) & (abs(x) > 1)):
x = _tocomplex(x)
# 返回处理后的数组 `x`
return x
# 定义一个函数 `_unary_dispatcher`,接收参数 `x`,返回元组 (x,)
def _unary_dispatcher(x):
return (x,)
# 使用 `array_function_dispatch` 装饰器,将 `_unary_dispatcher` 函数注册为 `sqrt` 函数的分发器
@array_function_dispatch(_unary_dispatcher)
# 定义 `sqrt` 函数,计算输入 `x` 的平方根
def sqrt(x):
# 调用 `_fix_real_lt_zero` 函数处理 `x`
x = _fix_real_lt_zero(x)
# 使用 `nx.sqrt` 计算 `x` 的平方根并返回结果
return nx.sqrt(x)
# 使用 `array_function_dispatch` 装饰器,将 `_unary_dispatcher` 函数注册为 `log` 函数的分发器
@array_function_dispatch(_unary_dispatcher)
# 定义 `log` 函数,计算输入 `x` 的自然对数
def log(x):
# 调用 `_fix_real_lt_zero` 函数处理 `x`
x = _fix_real_lt_zero(x)
# 返回 `nx.log` 计算后的对数值
return nx.log(x)
# 将输入参数 x 修正为非正实数,以处理复杂数的特定情况
x = _fix_real_lt_zero(x)
# 调用 numpy 中的特殊对数函数 nx.log 处理修正后的参数 x,返回结果
return nx.log(x)
# 使用装饰器将函数log10分派给_unary_dispatcher处理
@array_function_dispatch(_unary_dispatcher)
# 定义计算以10为底对数的函数log10,接受参数x
def log10(x):
"""
计算以10为底对数的值 `x`。
返回对数的“主值”(关于此的描述,请参见`numpy.log10`):math:`log_{10}(x)`。
对于实数 `x > 0`,返回实数(`log10(0)` 返回 `-inf`,`log10(np.inf)` 返回 `inf`)。
否则,返回复数的主值。
Parameters
----------
x : array_like or scalar
需要计算对数的值。
Returns
-------
out : ndarray or scalar
`x` 值的以10为底的对数。如果 `x` 是标量,则 `out` 也是标量,否则返回数组对象。
See Also
--------
numpy.log10
Notes
-----
对于 `real x < 0` 返回 `NAN` 的log10(),请使用 `numpy.log10`
(注意,除此之外 `numpy.log10` 和此 `log10` 是相同的,即对于 `x = 0` 都返回 `-inf`,
对于 `x = inf` 都返回 `inf`,特别地,如果 `x.imag != 0` 返回复数的主值)。
Examples
--------
(We set the printing precision so the example can be auto-tested)
>>> np.set_printoptions(precision=4)
>>> np.emath.log10(10**1)
1.0
>>> np.emath.log10([-10**1, -10**2, 10**2])
array([1.+1.3644j, 2.+1.3644j, 2.+0.j ])
"""
# 修正实数小于零的情况
x = _fix_real_lt_zero(x)
# 调用nx.log10函数计算log10值并返回
return nx.log10(x)
# 使用装饰器将函数logn分派给_logn_dispatcher处理
@array_function_dispatch(_logn_dispatcher)
# 定义计算以n为底对数的函数logn,接受参数n和x
def logn(n, x):
"""
计算以 `n` 为底对 `x` 的对数。
如果 `x` 包含负数输入,则在复数域中计算并返回结果。
Parameters
----------
n : array_like
底数的整数基数。
x : array_like
需要计算对数的值。
Returns
-------
out : ndarray or scalar
`x` 值以 `n` 为底的对数。如果 `x` 是标量,则 `out` 也是标量,否则返回数组。
Examples
--------
>>> np.set_printoptions(precision=4)
>>> np.emath.logn(2, [4, 8])
array([2., 3.])
>>> np.emath.logn(2, [-4, -8, 8])
array([2.+4.5324j, 3.+4.5324j, 3.+0.j ])
"""
# 修正实数小于零的情况
x = _fix_real_lt_zero(x)
n = _fix_real_lt_zero(n)
# 计算log(x)/log(n),返回以n为底对x的对数
return nx.log(x)/nx.log(n)
# 使用装饰器将函数log2分派给_unary_dispatcher处理
@array_function_dispatch(_unary_dispatcher)
# 定义计算以2为底对数的函数log2,接受参数x
def log2(x):
"""
计算以2为底对数的值 `x`。
返回对数的“主值”(关于此的描述,请参见`numpy.log2`):math:`log_2(x)`。
对于实数 `x > 0`,返回实数(`log2(0)` 返回 `-inf`,`log2(np.inf)` 返回 `inf`)。
否则,返回复数的主值。
Parameters
----------
x : array_like
需要计算对数的值。
Returns
-------
out : ndarray or scalar
`x` 值的以2为底的对数。如果 `x` 是标量,则 `out` 也是标量,否则返回数组。
See Also
--------
numpy.log2
Notes
-----
"""
# 修正实数小于零的情况
x = _fix_real_lt_zero(x)
# 调用nx.log2函数计算log2值并返回
return nx.log2(x)
For a log2() that returns ``NAN`` when real `x < 0`, use `numpy.log2`
(note, however, that otherwise `numpy.log2` and this `log2` are
identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`,
and, notably, the complex principle value if ``x.imag != 0``).
Examples
--------
We set the printing precision so the example can be auto-tested:
>>> np.set_printoptions(precision=4)
>>> np.emath.log2(8)
3.0
>>> np.emath.log2([-4, -8, 8])
array([2.+4.5324j, 3.+4.5324j, 3.+0.j ])
"""
x = _fix_real_lt_zero(x)
调用修复负实数的函数 `_fix_real_lt_zero()`,确保 x >= 0
return nx.log2(x)
返回以自然对数为底的 x 的对数,使用 numpy 的 log2 函数 nx.log2()
def _power_dispatcher(x, p):
return (x, p)
@array_function_dispatch(_power_dispatcher)
def power(x, p):
"""
返回 x 的 p 次方,即 x**p。
如果 x 包含负值,则输出将转换为复数域。
Parameters
----------
x : array_like
输入值。
p : array_like of ints
x 被提升到的幂次数。如果 x 包含多个值,则 p 必须是标量或者包含与 x 相同数量的值。
在后一种情况下,结果是 ``x[0]**p[0], x[1]**p[1], ...``。
Returns
-------
out : ndarray or scalar
``x**p`` 的结果。如果 x 和 p 都是标量,则 out 也是标量,否则返回数组。
See Also
--------
numpy.power
Examples
--------
>>> np.set_printoptions(precision=4)
>>> np.emath.power(2, 2)
4
>>> np.emath.power([2, 4], 2)
array([ 4, 16])
>>> np.emath.power([2, 4], -2)
array([0.25 , 0.0625])
>>> np.emath.power([-2, 4], 2)
array([ 4.-0.j, 16.+0.j])
>>> np.emath.power([2, 4], [2, 4])
array([ 4, 256])
"""
x = _fix_real_lt_zero(x)
p = _fix_int_lt_zero(p)
return nx.power(x, p)
@array_function_dispatch(_unary_dispatcher)
def arccos(x):
"""
计算 x 的反余弦值。
返回 x 的反余弦值的“主值”(有关详细信息,请参阅 `numpy.arccos`)。
对于实数 x,满足 `abs(x) <= 1`,返回值是闭区间 :math:`[0, \\pi]` 内的实数。
否则,返回复数的主值。
Parameters
----------
x : array_like or scalar
需要求反余弦的值(值)。
Returns
-------
out : ndarray or scalar
`x` 值的反余弦值。如果 x 是标量,则 out 也是标量,否则返回数组对象。
See Also
--------
numpy.arccos
Examples
--------
>>> np.set_printoptions(precision=4)
>>> np.emath.arccos(1) # 返回一个标量
0.0
>>> np.emath.arccos([1,2])
array([0.-0.j , 0.-1.317j])
"""
x = _fix_real_abs_gt_1(x)
return nx.arccos(x)
@array_function_dispatch(_unary_dispatcher)
def arcsin(x):
"""
计算 x 的反正弦值。
返回 x 的反正弦值的“主值”(有关详细信息,请参阅 `numpy.arcsin`)。
对于实数 x,满足 `abs(x) <= 1`,返回值是闭区间 :math:`[-\\pi/2, \\pi/2]` 内的实数。
否则,返回复数的主值。
Parameters
----------
x : array_like or scalar
需要求反正弦的值(值)。
Returns
-------
out : ndarray or scalar
`x` 值的反正弦值。如果 x 是标量,则 out 也是标量,否则返回数组对象。
"""
x = _fix_real_abs_gt_1(x)
return nx.arcsin(x)
out : ndarray or scalar
The inverse sine(s) of the `x` value(s). If `x` was a scalar, so
is `out`, otherwise an array object is returned.
See Also
--------
numpy.arcsin
Notes
-----
For an arcsin() that returns ``NAN`` when real `x` is not in the
interval ``[-1,1]``, use `numpy.arcsin`.
Examples
--------
>>> np.set_printoptions(precision=4)
>>> np.emath.arcsin(0)
0.0
>>> np.emath.arcsin([0,1])
array([0. , 1.5708])
"""
# 调用内部函数 `_fix_real_abs_gt_1(x)` 来确保 `x` 的绝对值不大于 `1`
x = _fix_real_abs_gt_1(x)
# 调用 `nx.arcsin(x)` 来计算 `x` 的反正弦值,并返回结果
return nx.arcsin(x)
# 定义一个装饰器函数,用于分派一元操作的数组函数调度
@array_function_dispatch(_unary_dispatcher)
def arctanh(x):
"""
Compute the inverse hyperbolic tangent of `x`.
Return the "principal value" (for a description of this, see
`numpy.arctanh`) of ``arctanh(x)``. For real `x` such that
``abs(x) < 1``, this is a real number. If `abs(x) > 1`, or if `x` is
complex, the result is complex. Finally, `x = 1` returns ``inf`` and
``x = -1`` returns ``-inf``.
Parameters
----------
x : array_like
The value(s) whose arctanh is (are) required.
Returns
-------
out : ndarray or scalar
The inverse hyperbolic tangent(s) of the `x` value(s). If `x` was
a scalar so is `out`, otherwise an array is returned.
See Also
--------
numpy.arctanh
Notes
-----
For an arctanh() that returns ``NAN`` when real `x` is not in the
interval ``(-1,1)``, use `numpy.arctanh` (this latter, however, does
return +/-inf for ``x = +/-1``).
Examples
--------
>>> np.set_printoptions(precision=4)
>>> np.emath.arctanh(0.5)
0.5493061443340549
>>> from numpy.testing import suppress_warnings
>>> with suppress_warnings() as sup:
... sup.filter(RuntimeWarning)
... np.emath.arctanh(np.eye(2))
array([[inf, 0.],
[ 0., inf]])
>>> np.emath.arctanh([1j])
array([0.+0.7854j])
"""
# 调用内部函数 `_fix_real_abs_gt_1` 处理参数 `x`,确保其实数部分的绝对值大于 1 的情况得到修正
x = _fix_real_abs_gt_1(x)
# 返回修正后参数 `x` 的反双曲正切值,由 `nx.arctanh(x)` 计算得出
return nx.arctanh(x)