NumPy 源码解析(五十)
.\numpy\numpy\_core\include\numpy\npy_cpu.h
/*
* This block defines CPU-specific macros based on the detected CPU architecture.
* The macros determine the target CPU and are used for conditional compilation.
* The possible values include:
* - NPY_CPU_X86
* - NPY_CPU_AMD64
* - NPY_CPU_PPC
* - NPY_CPU_PPC64
* - NPY_CPU_PPC64LE
* - NPY_CPU_SPARC
* - NPY_CPU_S390
* - NPY_CPU_IA64
* - NPY_CPU_HPPA
* - NPY_CPU_ALPHA
* - NPY_CPU_ARMEL
* - NPY_CPU_ARMEB
* - NPY_CPU_SH_LE
* - NPY_CPU_SH_BE
* - NPY_CPU_ARCEL
* - NPY_CPU_ARCEB
* - NPY_CPU_RISCV64
* - NPY_CPU_LOONGARCH
* - NPY_CPU_WASM
*/
/*
* __i386__ is defined by gcc and Intel compiler on Linux,
* _M_IX86 by VS compiler,
* i386 by Sun compilers on opensolaris at least
*/
/*
* both __x86_64__ and __amd64__ are defined by gcc
* __x86_64 defined by sun compiler on opensolaris at least
* _M_AMD64 defined by MS compiler
*/
/*
* __ppc__ is defined by gcc, I remember having seen __powerpc__ once,
* but can't find it ATM
* _ARCH_PPC is used by at least gcc on AIX
* As __powerpc__ and _ARCH_PPC are also defined by PPC64 check
* for those specifically first before defaulting to ppc
*/
#define NPY_CPU_PPC
#elif defined(__sparc__) || defined(__sparc)
/* __sparc__ is defined by gcc and Forte (e.g. Sun) compilers */
#define NPY_CPU_SPARC
#elif defined(__s390__)
#define NPY_CPU_S390
#elif defined(__ia64)
#define NPY_CPU_IA64
#elif defined(__hppa)
#define NPY_CPU_HPPA
#elif defined(__alpha__)
#define NPY_CPU_ALPHA
#elif defined(__arm__) || defined(__aarch64__) || defined(_M_ARM64)
/* _M_ARM64 is defined in MSVC for ARM64 compilation on Windows */
#if defined(__ARMEB__) || defined(__AARCH64EB__)
#if defined(__ARM_32BIT_STATE)
#define NPY_CPU_ARMEB_AARCH32
#elif defined(__ARM_64BIT_STATE)
#define NPY_CPU_ARMEB_AARCH64
#else
#define NPY_CPU_ARMEB
#endif
#elif defined(__ARMEL__) || defined(__AARCH64EL__) || defined(_M_ARM64)
#if defined(__ARM_32BIT_STATE)
#define NPY_CPU_ARMEL_AARCH32
#elif defined(__ARM_64BIT_STATE) || defined(_M_ARM64) || defined(__AARCH64EL__)
#define NPY_CPU_ARMEL_AARCH64
#else
#define NPY_CPU_ARMEL
#endif
/*
* Detect ARM specific configurations:
* - Check for little-endian (__ARMEL__) or big-endian (__ARMEB__) ARM architectures
* - Distinguish between ARM32 (__ARM_32BIT_STATE) and ARM64 (__ARM_64BIT_STATE)
* - Define appropriate macros based on detected conditions
*/
#define NPY_CPU_ARMEB_AARCH32
#elif defined(__ARM_64BIT_STATE)
/*
* Define ARM64 little-endian specific macros when __ARM_64BIT_STATE is detected:
* - __ARM_64BIT_STATE is used to distinguish ARM64 architecture
* - Also include checks for _M_ARM64 and __AARCH64EL__ for MSVC and AARCH64EL compatibility
*/
#define NPY_CPU_ARMEB_AARCH64
#else
/*
* Default to ARM architecture in little-endian mode when no specific condition is met:
* - Define NPY_CPU_ARMEL for general ARM little-endian architectures
*/
#define NPY_CPU_ARMEB
#endif
#elif defined(__ARMEL__) || defined(__AARCH64EL__) || defined(_M_ARM64)
#if defined(__ARM_32BIT_STATE)
/*
* Define ARM32 little-endian specific macros when __ARM_32BIT_STATE is detected:
* - __ARM_32BIT_STATE is used to distinguish ARM32 architecture
* - Include _M_ARM64 and __AARCH64EL__ checks for MSVC and AARCH64EL compatibility
*/
#define NPY_CPU_ARMEL_AARCH32
#elif defined(__ARM_64BIT_STATE) || defined(_M_ARM64) || defined(__AARCH64EL__)
/*
* Define ARM64 little-endian specific macros when __ARM_64BIT_STATE or equivalent is detected:
* - __ARM_64BIT_STATE is used to distinguish ARM64 architecture
* - Also include _M_ARM64 and __AARCH64EL__ checks for MSVC and AARCH64EL compatibility
*/
#define NPY_CPU_ARMEL_AARCH64
#else
/*
* Default to ARM architecture in little-endian mode when no specific condition is met:
* - Define NPY_CPU_ARMEL for general ARM little-endian architectures
*/
#define NPY_CPU_ARMEL
#endif
/*
* Define ARM architecture-specific macros:
* - __ARM__ and __aarch64__ are defined by compilers supporting ARM architectures
* - _M_ARM64 is defined by MSVC for ARM64 compilation on Windows
* - This section distinguishes between little-endian (__ARMEB__) and big-endian (__AARCH64EB__) modes
* - Further checks differentiate between ARM32 and ARM64 states (__ARM_32BIT_STATE and __ARM_64BIT_STATE)
*/
#if defined(__ARMEB__) || defined(__AARCH64EB__)
#if defined(__ARM_32BIT_STATE)
/*
* Define ARM32 big-endian specific macros when __ARM_32BIT_STATE is detected:
* - __ARM_32BIT_STATE is used to distinguish ARM32 architecture
*/
#define NPY_CPU_ARMEB_AARCH32
#elif defined(__ARM_64BIT_STATE)
/*
* Define ARM64 big-endian specific macros when __ARM_64BIT_STATE is detected:
* - __ARM_64BIT_STATE is used to distinguish ARM64 architecture
*/
#define NPY_CPU_ARMEB_AARCH64
#else
/*
* Default to big-endian ARM architecture when no specific condition is met:
* - Define NPY_CPU_ARMEB for general big-endian ARM architectures
*/
#define NPY_CPU_ARMEB
#endif
#elif defined(__ARMEL__) || defined(__AARCH64EL__) || defined(_M_ARM64)
#if defined(__ARM_32BIT_STATE)
/*
* Define ARM32 little-endian specific macros when __ARM_32BIT_STATE is detected:
* - __ARM_32BIT_STATE is used to distinguish ARM32 architecture
*/
#define NPY_CPU_ARMEL_AARCH32
#elif defined(__ARM_64BIT_STATE) || defined(_M_ARM64) || defined(__AARCH64EL__)
/*
* Define ARM64 little-endian specific macros when __ARM_64BIT_STATE or equivalent is detected:
* - __ARM_64BIT_STATE is used to distinguish ARM64 architecture
* - Also include _M_ARM64 and __AARCH64EL__ checks for MSVC and AARCH64EL compatibility
*/
#define NPY_CPU_ARMEL_AARCH64
#else
/*
* Default to little-endian ARM architecture when no specific condition is met:
* - Define NPY_CPU_ARMEL for general little-endian ARM architectures
*/
#define NPY_CPU_ARMEL
#endif
# 如果条件不满足,则执行以下代码
# 向用户报告错误:未知的 ARM CPU,请提供平台信息(操作系统、CPU和编译器)给 numpy 维护人员
# 结束条件判断
#elif defined(__sh__) && defined(__LITTLE_ENDIAN__)
#define NPY_CPU_SH_LE
#elif defined(__sh__) && defined(__BIG_ENDIAN__)
#define NPY_CPU_SH_BE
#elif defined(__MIPSEL__)
#define NPY_CPU_MIPSEL
#elif defined(__MIPSEB__)
#define NPY_CPU_MIPSEB
#elif defined(__or1k__)
#define NPY_CPU_OR1K
#elif defined(__mc68000__)
#define NPY_CPU_M68K
#elif defined(__arc__) && defined(__LITTLE_ENDIAN__)
#define NPY_CPU_ARCEL
#elif defined(__arc__) && defined(__BIG_ENDIAN__)
#define NPY_CPU_ARCEB
#elif defined(__riscv) && defined(__riscv_xlen) && __riscv_xlen == 64
#define NPY_CPU_RISCV64
#elif defined(__loongarch__)
#define NPY_CPU_LOONGARCH
#elif defined(__EMSCRIPTEN__)
/* __EMSCRIPTEN__ is defined by emscripten: an LLVM-to-Web compiler */
#define NPY_CPU_WASM
#else
#error Unknown CPU, please report this to numpy maintainers with \
information about your platform (OS, CPU and compiler)
#endif
/*
* Except for the following architectures, memory access is limited to the natural
* alignment of data types otherwise it may lead to bus error or performance regression.
* For more details about unaligned access, see https://www.kernel.org/doc/Documentation/unaligned-memory-access.txt.
*/
#if defined(NPY_CPU_X86) || defined(NPY_CPU_AMD64) || defined(__aarch64__) || defined(__powerpc64__)
#define NPY_ALIGNMENT_REQUIRED 0
#endif
#ifndef NPY_ALIGNMENT_REQUIRED
#define NPY_ALIGNMENT_REQUIRED 1
#endif
#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_ */
#elif defined(__sh__) && defined(__LITTLE_ENDIAN__)
#define NPY_CPU_SH_LE // Define for SH architecture with little endian byte order
#elif defined(__sh__) && defined(__BIG_ENDIAN__)
#define NPY_CPU_SH_BE // Define for SH architecture with big endian byte order
#elif defined(__MIPSEL__)
#define NPY_CPU_MIPSEL // Define for MIPS architecture with little endian byte order
#elif defined(__MIPSEB__)
#define NPY_CPU_MIPSEB // Define for MIPS architecture with big endian byte order
#elif defined(__or1k__)
#define NPY_CPU_OR1K // Define for OpenRISC architecture
#elif defined(__mc68000__)
#define NPY_CPU_M68K // Define for Motorola 68000 architecture
#elif defined(__arc__) && defined(__LITTLE_ENDIAN__)
#define NPY_CPU_ARCEL // Define for ARC architecture with little endian byte order
#elif defined(__arc__) && defined(__BIG_ENDIAN__)
#define NPY_CPU_ARCEB // Define for ARC architecture with big endian byte order
#elif defined(__riscv) && defined(__riscv_xlen) && __riscv_xlen == 64
#define NPY_CPU_RISCV64 // Define for RISC-V 64-bit architecture
#elif defined(__loongarch__)
#define NPY_CPU_LOONGARCH // Define for LoongArch architecture
#elif defined(__EMSCRIPTEN__)
/* __EMSCRIPTEN__ is defined by emscripten: an LLVM-to-Web compiler */
#define NPY_CPU_WASM // Define for WebAssembly (emscripten) platform
#else
#error Unknown CPU, please report this to numpy maintainers with \
information about your platform (OS, CPU and compiler)
#endif
/*
* Except for the following architectures, memory access is limited to the natural
* alignment of data types otherwise it may lead to bus error or performance regression.
* For more details about unaligned access, see https://www.kernel.org/doc/Documentation/unaligned-memory-access.txt.
*/
#if defined(NPY_CPU_X86) || defined(NPY_CPU_AMD64) || defined(__aarch64__) || defined(__powerpc64__)
#define NPY_ALIGNMENT_REQUIRED 0 // Disable alignment requirement for these architectures
#endif
#ifndef NPY_ALIGNMENT_REQUIRED
#define NPY_ALIGNMENT_REQUIRED 1 // Enable alignment requirement by default
#endif
#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_ */
.\numpy\numpy\_core\include\numpy\npy_endian.h
/*
* NPY_BYTE_ORDER is set to the same value as BYTE_ORDER set by glibc in
* endian.h
*/
/* Use endian.h if available */
// 定义 NPY_BYTE_ORDER 为当前系统的字节序
// 定义 NPY_LITTLE_ENDIAN 为当前系统的小端字节序
// 定义 NPY_BIG_ENDIAN 为当前系统的大端字节序
// 定义 NPY_BYTE_ORDER 为当前系统的字节序
// 定义 NPY_LITTLE_ENDIAN 为当前系统的小端字节序
// 定义 NPY_BIG_ENDIAN 为当前系统的大端字节序
// 定义 NPY_BYTE_ORDER 为当前系统的字节序
// 定义 NPY_LITTLE_ENDIAN 为当前系统的小端字节序
// 定义 NPY_BIG_ENDIAN 为当前系统的大端字节序
/* Set endianness info using target CPU */
// 默认为小端字节序
// 大端字节序
// 根据目标 CPU 设置字节序
|| defined(NPY_CPU_AMD64) \
|| defined(NPY_CPU_IA64) \
|| defined(NPY_CPU_ALPHA) \
|| defined(NPY_CPU_ARMEL) \
|| defined(NPY_CPU_ARMEL_AARCH32) \
|| defined(NPY_CPU_ARMEL_AARCH64) \
|| defined(NPY_CPU_SH_LE) \
|| defined(NPY_CPU_MIPSEL) \
|| defined(NPY_CPU_PPC64LE) \
|| defined(NPY_CPU_ARCEL) \
|| defined(NPY_CPU_RISCV64) \
|| defined(NPY_CPU_LOONGARCH) \
|| defined(NPY_CPU_WASM)
// 当目标 CPU 是以下之一时,使用小端字节序
|| defined(NPY_CPU_SPARC) \
|| defined(NPY_CPU_S390) \
|| defined(NPY_CPU_HPPA) \
|| defined(NPY_CPU_PPC64) \
|| defined(NPY_CPU_ARMEB) \
|| defined(NPY_CPU_ARMEB_AARCH32) \
|| defined(NPY_CPU_ARMEB_AARCH64) \
|| defined(NPY_CPU_SH_BE) \
|| defined(NPY_CPU_MIPSEB) \
|| defined(NPY_CPU_OR1K) \
|| defined(NPY_CPU_M68K) \
|| defined(NPY_CPU_ARCEB)
// 当目标 CPU 是以下之一时,使用大端字节序
// 如果目标 CPU 未知,则无法设置字节序,抛出错误
.\numpy\numpy\_core\include\numpy\npy_math.h
/* 通过在适当时添加 static inline 修饰符到 npy_math 函数定义中,
编译器有机会进行优化 */
extern "C" {
// 定义一个宏,返回两个数中较大的数
// 定义一个宏,返回两个数中较小的数
/*
* NAN 和 INFINITY 的宏定义(NAN 的行为与 glibc 一致,INFINITY 的行为与 C99 一致)
*
* XXX: 应测试平台上是否可用 INFINITY 和 NAN
*/
// 返回正无穷大的浮点数
static inline float __npy_inff(void)
{
const union { npy_uint32 __i; float __f;} __bint = {0x7f800000UL};
return __bint.__f;
}
// 返回 NaN 的浮点数
static inline float __npy_nanf(void)
{
const union { npy_uint32 __i; float __f;} __bint = {0x7fc00000UL};
return __bint.__f;
}
// 返回正零的浮点数
static inline float __npy_pzerof(void)
{
const union { npy_uint32 __i; float __f;} __bint = {0x00000000UL};
return __bint.__f;
}
// 返回负零的浮点数
static inline float __npy_nzerof(void)
{
const union { npy_uint32 __i; float __f;} __bint = {0x80000000UL};
return __bint.__f;
}
// 定义浮点数的正无穷大宏
// 定义浮点数的 NaN 宏
// 定义浮点数的正零宏
// 定义浮点数的负零宏
// 定义双精度浮点数的正无穷大宏
// 定义双精度浮点数的 NaN 宏
// 定义双精度浮点数的正零宏
// 定义双精度浮点数的负零宏
// 定义长双精度浮点数的正无穷大宏
// 定义长双精度浮点数的 NaN 宏
// 定义长双精度浮点数的正零宏
// 定义长双精度浮点数的负零宏
/*
* 一些有用的常量
*/
// 自然常数 e
// 以 2 为底 e 的对数
// 以 10 为底 e 的对数
// 自然对数 e 的底数
// 自然对数 e 的底数
// 圆周率 π
// π 的一半
// π 的四分之一
// 1/pi
// 2/pi
// 欧拉常数 γ
// 开平方根的 2 的值
// 1/sqrt(2)
// 单精度浮点数的自然常数 e
// 单精度浮点数以 2 为底 e 的对数
// 单精度浮点数以 10 为底 e 的对数
/*
* 定义常量:浮点数的对数值和数学常数
*/
/*
* 整数函数声明
*/
NPY_INPLACE npy_uint npy_gcdu(npy_uint a, npy_uint b); // 无符号整数最大公约数
NPY_INPLACE npy_uint npy_lcmu(npy_uint a, npy_uint b); // 无符号整数最小公倍数
NPY_INPLACE npy_ulong npy_gcdul(npy_ulong a, npy_ulong b); // 无符号长整型最大公约数
NPY_INPLACE npy_ulong npy_lcmul(npy_ulong a, npy_ulong b); // 无符号长整型最小公倍数
NPY_INPLACE npy_ulonglong npy_gcdull(npy_ulonglong a, npy_ulonglong b); // 无符号长长整型最大公约数
NPY_INPLACE npy_ulonglong npy_lcmull(npy_ulonglong a, npy_ulonglong b); // 无符号长长整型最小公倍数
NPY_INPLACE npy_int npy_gcd(npy_int a, npy_int b); // 整型最大公约数
NPY_INPLACE npy_int npy_lcm(npy_int a, npy_int b); // 整型最小公倍数
NPY_INPLACE npy_long npy_gcdl(npy_long a, npy_long b); // 长整型最大公约数
NPY_INPLACE npy_long npy_lcml(npy_long a, npy_long b); // 长整型最小公倍数
NPY_INPLACE npy_longlong npy_gcdll(npy_longlong a, npy_longlong b); // 长长整型最大公约数
NPY_INPLACE npy_longlong npy_lcmll(npy_longlong a, npy_longlong b); // 长长整型最小公倍数
NPY_INPLACE npy_ubyte npy_rshiftuhh(npy_ubyte a, npy_ubyte b); // 无符号字节右移
NPY_INPLACE npy_ubyte npy_lshiftuhh(npy_ubyte a, npy_ubyte b); // 无符号字节左移
NPY_INPLACE npy_ushort npy_rshiftuh(npy_ushort a, npy_ushort b); // 无符号短整型右移
NPY_INPLACE npy_ushort npy_lshiftuh(npy_ushort a, npy_ushort b); // 无符号短整型左移
NPY_INPLACE npy_uint npy_rshiftu(npy_uint a, npy_uint b); // 无符号整型右移
NPY_INPLACE npy_uint npy_lshiftu(npy_uint a, npy_uint b); // 无符号整型左移
NPY_INPLACE npy_ulong npy_rshiftul(npy_ulong a, npy_ulong b); // 无符号长整型右移
NPY_INPLACE npy_ulong npy_lshiftul(npy_ulong a, npy_ulong b); // 无符号长整型左移
/*
* NPY_INPLACE 宏定义的函数声明
* 这些函数执行无符号整数的位移操作
*/
NPY_INPLACE npy_ulonglong npy_rshiftull(npy_ulonglong a, npy_ulonglong b);
NPY_INPLACE npy_ulonglong npy_lshiftull(npy_ulonglong a, npy_ulonglong b);
NPY_INPLACE npy_byte npy_rshifthh(npy_byte a, npy_byte b);
NPY_INPLACE npy_byte npy_lshifthh(npy_byte a, npy_byte b);
NPY_INPLACE npy_short npy_rshifth(npy_short a, npy_short b);
NPY_INPLACE npy_short npy_lshifth(npy_short a, npy_short b);
NPY_INPLACE npy_int npy_rshift(npy_int a, npy_int b);
NPY_INPLACE npy_int npy_lshift(npy_int a, npy_int b);
NPY_INPLACE npy_long npy_rshiftl(npy_long a, npy_long b);
NPY_INPLACE npy_long npy_lshiftl(npy_long a, npy_long b);
NPY_INPLACE npy_longlong npy_rshiftll(npy_longlong a, npy_longlong b);
NPY_INPLACE npy_longlong npy_lshiftll(npy_longlong a, npy_longlong b);
/*
* NPY_INPLACE 宏定义的函数声明
* 这些函数执行无符号整数和字节的位操作
*/
NPY_INPLACE uint8_t npy_popcountuhh(npy_ubyte a);
NPY_INPLACE uint8_t npy_popcountuh(npy_ushort a);
NPY_INPLACE uint8_t npy_popcountu(npy_uint a);
NPY_INPLACE uint8_t npy_popcountul(npy_ulong a);
NPY_INPLACE uint8_t npy_popcountull(npy_ulonglong a);
NPY_INPLACE uint8_t npy_popcounthh(npy_byte a);
NPY_INPLACE uint8_t npy_popcounth(npy_short a);
NPY_INPLACE uint8_t npy_popcount(npy_int a);
NPY_INPLACE uint8_t npy_popcountl(npy_long a);
NPY_INPLACE uint8_t npy_popcountll(npy_longlong a);
/*
* C99 标准下的双精度数学函数声明,部分可能需要修复或者应该列入阻止列表
*/
NPY_INPLACE double npy_sin(double x);
NPY_INPLACE double npy_cos(double x);
NPY_INPLACE double npy_tan(double x);
NPY_INPLACE double npy_hypot(double x, double y);
NPY_INPLACE double npy_log2(double x);
NPY_INPLACE double npy_atan2(double x, double y);
/*
* 强制要求使用的 C99 标准下的双精度数学函数声明,不应列入阻止列表或需要修复
* 这些函数为了向后兼容而定义,但应考虑在某些时候废弃它们
*/
/*
* 计算双精度数 x 的间距
*/
double npy_spacing(double x);
/*
* IEEE 754 浮点数处理
*/
/* 在紧凑循环中使用内建函数以避免函数调用
* 仅当 npy_config.h 可用时(即 NumPy 的自建版本)才可用
*/
/* 仅当 npy_config.h 可用时(即 NumPy 的自建版本)才可用 */
/* 仅当 npy_config.h 可用时(即 NumPy 的自建版本)才可用 */
/*
* float C99 math funcs that need fixups or are blocklist-able
*/
NPY_INPLACE float npy_sinf(float x);
NPY_INPLACE float npy_cosf(float x);
NPY_INPLACE float npy_tanf(float x);
NPY_INPLACE float npy_expf(float x);
NPY_INPLACE float npy_sqrtf(float x);
NPY_INPLACE float npy_hypotf(float x, float y);
NPY_INPLACE float npy_log2f(float x);
NPY_INPLACE float npy_atan2f(float x, float y);
NPY_INPLACE float npy_powf(float x, float y);
NPY_INPLACE float npy_modff(float x, float* y);
/* Mandatory C99 float math funcs, no blocklisting or fixups */
/* defined for legacy reasons, should be deprecated at some point */
float npy_spacingf(float x);
/*
* long double C99 double math funcs that need fixups or are blocklist-able
*/
NPY_INPLACE npy_longdouble npy_sinl(npy_longdouble x);
NPY_INPLACE npy_longdouble npy_cosl(npy_longdouble x);
NPY_INPLACE npy_longdouble npy_tanl(npy_longdouble x);
NPY_INPLACE npy_longdouble npy_expl(npy_longdouble x);
NPY_INPLACE npy_longdouble npy_sqrtl(npy_longdouble x);
NPY_INPLACE npy_longdouble npy_hypotl(npy_longdouble x, npy_longdouble y);
NPY_INPLACE npy_longdouble npy_log2l(npy_longdouble x);
NPY_INPLACE npy_longdouble npy_atan2l(npy_longdouble x, npy_longdouble y);
NPY_INPLACE npy_longdouble npy_powl(npy_longdouble x, npy_longdouble y);
NPY_INPLACE npy_longdouble npy_modfl(npy_longdouble x, npy_longdouble* y);
/* Mandatory C99 double math funcs, no blocklisting or fixups */
/* defined for legacy reasons, should be deprecated at some point */
npy_longdouble npy_spacingl(npy_longdouble x);
/*
* Non standard functions
*/
NPY_INPLACE double npy_deg2rad(double x);
/*
* Function declarations for mathematical operations on various data types
*/
NPY_INPLACE double npy_rad2deg(double x);
// Converts radians to degrees
NPY_INPLACE double npy_logaddexp(double x, double y);
// Computes log(exp(x) + exp(y)) without overflow
NPY_INPLACE double npy_logaddexp2(double x, double y);
// Computes log2(2^x + 2^y) without overflow
NPY_INPLACE double npy_divmod(double x, double y, double *modulus);
// Computes division x / y and stores remainder in 'modulus'
NPY_INPLACE double npy_heaviside(double x, double h0);
// Heaviside step function: 0 if x < 0, h0 if x == 0, 1 if x > 0
NPY_INPLACE float npy_deg2radf(float x);
// Converts degrees to radians (single precision)
NPY_INPLACE float npy_rad2degf(float x);
// Converts radians to degrees (single precision)
NPY_INPLACE float npy_logaddexpf(float x, float y);
// Computes log(exp(x) + exp(y)) in single precision without overflow
NPY_INPLACE float npy_logaddexp2f(float x, float y);
// Computes log2(2^x + 2^y) in single precision without overflow
NPY_INPLACE float npy_divmodf(float x, float y, float *modulus);
// Computes division x / y in single precision and stores remainder in 'modulus'
NPY_INPLACE float npy_heavisidef(float x, float h0);
// Heaviside step function in single precision: 0 if x < 0, h0 if x == 0, 1 if x > 0
NPY_INPLACE npy_longdouble npy_deg2radl(npy_longdouble x);
// Converts degrees to radians (extended precision)
NPY_INPLACE npy_longdouble npy_rad2degl(npy_longdouble x);
// Converts radians to degrees (extended precision)
NPY_INPLACE npy_longdouble npy_logaddexpl(npy_longdouble x, npy_longdouble y);
// Computes log(exp(x) + exp(y)) in extended precision without overflow
NPY_INPLACE npy_longdouble npy_logaddexp2l(npy_longdouble x, npy_longdouble y);
// Computes log2(2^x + 2^y) in extended precision without overflow
NPY_INPLACE npy_longdouble npy_divmodl(npy_longdouble x, npy_longdouble y,
npy_longdouble *modulus);
// Computes division x / y in extended precision and stores remainder in 'modulus'
NPY_INPLACE npy_longdouble npy_heavisidel(npy_longdouble x, npy_longdouble h0);
// Heaviside step function in extended precision: 0 if x < 0, h0 if x == 0, 1 if x > 0
// Macro: alias for npy_rad2deg function
// Macro: alias for npy_rad2degf function
// Macro: alias for npy_rad2degl function
// Macro: alias for npy_deg2rad function
// Macro: alias for npy_deg2radf function
// Macro: alias for npy_deg2radl function
/*
* Complex number operations
*/
static inline double npy_creal(const npy_cdouble z)
{
return ((double *) &z)[0];
}
// Returns the real part of a complex double number
static inline void npy_csetreal(npy_cdouble *z, const double r)
{
((double *) z)[0] = r;
}
// Sets the real part of a complex double number to 'r'
static inline double npy_cimag(const npy_cdouble z)
{
return ((double *) &z)[1];
}
// Returns the imaginary part of a complex double number
static inline void npy_csetimag(npy_cdouble *z, const double i)
{
((double *) z)[1] = i;
}
// Sets the imaginary part of a complex double number to 'i'
static inline float npy_crealf(const npy_cfloat z)
{
return ((float *) &z)[0];
}
// Returns the real part of a complex float number
static inline void npy_csetrealf(npy_cfloat *z, const float r)
{
((float *) z)[0] = r;
}
// Sets the real part of a complex float number to 'r'
static inline float npy_cimagf(const npy_cfloat z)
{
return ((float *) &z)[1];
}
// Returns the imaginary part of a complex float number
static inline void npy_csetimagf(npy_cfloat *z, const float i)
{
((float *) z)[1] = i;
}
// Sets the imaginary part of a complex float number to 'i'
static inline npy_longdouble npy_creall(const npy_clongdouble z)
{
return ((longdouble_t *) &z)[0];
}
// Returns the real part of a complex long double number
static inline void npy_csetreall(npy_clongdouble *z, const longdouble_t r)
{
((longdouble_t *) z)[0] = r;
}
// Sets the real part of a complex long double number to 'r'
static inline npy_longdouble npy_cimagl(const npy_clongdouble z)
{
return ((longdouble_t *) &z)[1];
}
// Returns the imaginary part of a complex long double number
static inline void npy_csetimagl(npy_clongdouble *z, const longdouble_t i)
{
((longdouble_t *) z)[1] = i;
}
// Sets the imaginary part of a complex long double number to 'i'
// Macro: sets the real part of a complex number
// Macro: sets the imaginary part of a complex number
// Macro: sets the real part of a complex float number
// Macro: sets the imaginary part of a complex float number
// Macro: sets the real part of a complex long double number
// Macro: sets the imaginary part of a complex long double number
static inline npy_cdouble npy_cpack(double x, double y)
{
npy_cdouble z;
npy_csetreal(&z, x);
npy_csetimag(&z, y);
return z;
}
// Packs real and imaginary parts into a complex double number
static inline npy_cfloat npy_cpackf(float x, float y)
{
npy_cfloat z;
npy_csetrealf(&z, x);
npy_csetimagf(&z, y);
return z;
/*
* Single precision complex number representation functions
* using floating point types.
*/
/*
* Packs two long double values into a complex long double value.
*/
static inline npy_clongdouble npy_cpackl(npy_longdouble x, npy_longdouble y)
{
npy_clongdouble z;
npy_csetreall(&z, x); // 设置 z 的实部为 x
npy_csetimagl(&z, y); // 设置 z 的虚部为 y
return z; // 返回复数 z
}
/*
* Double precision complex functions
*/
double npy_cabs(npy_cdouble z); // 返回复数 z 的模
double npy_carg(npy_cdouble z); // 返回复数 z 的幅角
npy_cdouble npy_cexp(npy_cdouble z); // 返回 e^z
npy_cdouble npy_clog(npy_cdouble z); // 返回 ln(z)
npy_cdouble npy_cpow(npy_cdouble x, npy_cdouble y); // 返回 x^y
npy_cdouble npy_csqrt(npy_cdouble z); // 返回 sqrt(z)
npy_cdouble npy_ccos(npy_cdouble z); // 返回 cos(z)
npy_cdouble npy_csin(npy_cdouble z); // 返回 sin(z)
npy_cdouble npy_ctan(npy_cdouble z); // 返回 tan(z)
npy_cdouble npy_ccosh(npy_cdouble z); // 返回 cosh(z)
npy_cdouble npy_csinh(npy_cdouble z); // 返回 sinh(z)
npy_cdouble npy_ctanh(npy_cdouble z); // 返回 tanh(z)
npy_cdouble npy_cacos(npy_cdouble z); // 返回 arccos(z)
npy_cdouble npy_casin(npy_cdouble z); // 返回 arcsin(z)
npy_cdouble npy_catan(npy_cdouble z); // 返回 arctan(z)
npy_cdouble npy_cacosh(npy_cdouble z); // 返回 arccosh(z)
npy_cdouble npy_casinh(npy_cdouble z); // 返回 arcsinh(z)
npy_cdouble npy_catanh(npy_cdouble z); // 返回 arctanh(z)
/*
* Single precision complex functions
*/
float npy_cabsf(npy_cfloat z); // 返回复数 z 的模
float npy_cargf(npy_cfloat z); // 返回复数 z 的幅角
npy_cfloat npy_cexpf(npy_cfloat z); // 返回 e^z
npy_cfloat npy_clogf(npy_cfloat z); // 返回 ln(z)
npy_cfloat npy_cpowf(npy_cfloat x, npy_cfloat y); // 返回 x^y
npy_cfloat npy_csqrtf(npy_cfloat z); // 返回 sqrt(z)
npy_cfloat npy_ccosf(npy_cfloat z); // 返回 cos(z)
npy_cfloat npy_csinf(npy_cfloat z); // 返回 sin(z)
npy_cfloat npy_ctanf(npy_cfloat z); // 返回 tan(z)
npy_cfloat npy_ccoshf(npy_cfloat z); // 返回 cosh(z)
npy_cfloat npy_csinhf(npy_cfloat z); // 返回 sinh(z)
npy_cfloat npy_ctanhf(npy_cfloat z); // 返回 tanh(z)
npy_cfloat npy_cacosf(npy_cfloat z); // 返回 arccos(z)
npy_cfloat npy_casinf(npy_cfloat z); // 返回 arcsin(z)
npy_cfloat npy_catanf(npy_cfloat z); // 返回 arctan(z)
npy_cfloat npy_cacoshf(npy_cfloat z); // 返回 arccosh(z)
npy_cfloat npy_casinhf(npy_cfloat z); // 返回 arcsinh(z)
npy_cfloat npy_catanhf(npy_cfloat z); // 返回 arctanh(z)
/*
* Extended precision complex functions
*/
npy_longdouble npy_cabsl(npy_clongdouble z); // 返回复数 z 的模
npy_longdouble npy_cargl(npy_clongdouble z); // 返回复数 z 的幅角
npy_clongdouble npy_cexpl(npy_clongdouble z); // 返回 e^z
npy_clongdouble npy_clogl(npy_clongdouble z); // 返回 ln(z)
npy_clongdouble npy_cpowl(npy_clongdouble x, npy_clongdouble y); // 返回 x^y
npy_clongdouble npy_csqrtl(npy_clongdouble z); // 返回 sqrt(z)
npy_clongdouble npy_ccosl(npy_clongdouble z); // 返回 cos(z)
npy_clongdouble npy_csinl(npy_clongdouble z); // 返回 sin(z)
npy_clongdouble npy_ctanl(npy_clongdouble z); // 返回 tan(z)
npy_clongdouble npy_ccoshl(npy_clongdouble z); // 返回 cosh(z)
npy_clongdouble npy_csinhl(npy_clongdouble z); // 返回 sinh(z)
npy_clongdouble npy_ctanhl(npy_clongdouble z); // 返回 tanh(z)
npy_clongdouble npy_cacosl(npy_clongdouble z); // 返回 arccos(z)
npy_clongdouble npy_casinl(npy_clongdouble z); // 返回 arcsin(z)
npy_clongdouble npy_catanl(npy_clongdouble z); // 返回 arctan(z)
npy_clongdouble npy_cacoshl(npy_clongdouble z); // 返回 arccosh(z)
npy_clongdouble npy_casinhl(npy_clongdouble z); // 返回 arcsinh(z)
npy_clongdouble npy_catanhl(npy_clongdouble z); // 返回 arctanh(z)
/*
* Functions that set the floating point error
* status word.
*/
/*
* platform-dependent code translates floating point
* status to an integer sum of these values
*/
int npy_clear_floatstatus_barrier(char*); // 清除浮点数错误状态屏障
int npy_get_floatstatus_barrier(char*); // 获取浮点数错误状态屏障
/*
* use caution with these - clang and gcc8.1 are known to reorder calls
* to this form of the function which can defeat the check. The _barrier
* form of the call is preferable, where the argument is
* (char*)&local_variable
*/
/*
* 清除浮点状态的函数声明,调用时需要谨慎,因为clang和gcc8.1已知会重新排序这种函数调用,
* 这可能会破坏检查。更可取的是使用带_barrier后缀的调用形式,其中参数为
* (char*)&local_variable
*/
int npy_clear_floatstatus(void);
/* 获取当前浮点状态的函数声明 */
int npy_get_floatstatus(void);
/* 设置浮点状态为除以零错误 */
void npy_set_floatstatus_divbyzero(void);
/* 设置浮点状态为溢出 */
void npy_set_floatstatus_overflow(void);
/* 设置浮点状态为下溢 */
void npy_set_floatstatus_underflow(void);
/* 设置浮点状态为无效操作 */
void npy_set_floatstatus_invalid(void);
}
/* 如果启用了内联数学操作,包含内联数学头文件 */
.\numpy\numpy\_core\include\numpy\npy_no_deprecated_api.h
/*
* This include file is provided for inclusion in Cython *.pyd files where
* one would like to define the NPY_NO_DEPRECATED_API macro. It can be
* included by
*
* cdef extern from "npy_no_deprecated_api.h": pass
*
*/
/* 检查是否已经包含了旧版 API 相关的头文件,如果是则报错 */
defined(NUMPY_CORE_INCLUDE_NUMPY_NPY_DEPRECATED_API_H) || \
defined(NUMPY_CORE_INCLUDE_NUMPY_OLD_DEFINES_H_)
/* 定义 NPY_NO_DEPRECATED_API 宏为当前 NPY_API_VERSION,以禁用过时的 API */
.\numpy\numpy\_core\include\numpy\npy_os.h
// 如果定义了 linux、__linux 或者 __linux__,则定义 NPY_OS_LINUX
defined(__OpenBSD__) || defined(__DragonFly__)
// 如果定义了 __FreeBSD__、__NetBSD__、__OpenBSD__ 或者 __DragonFly__,则定义 NPY_OS_BSD
// 根据具体平台的定义进一步细化为 NPY_OS_FREEBSD、NPY_OS_NETBSD、NPY_OS_OPENBSD 或 NPY_OS_DRAGONFLY
// 如果定义了 sun 或 __sun,则定义 NPY_OS_SOLARIS
// 如果定义了 __CYGWIN__,则定义 NPY_OS_CYGWIN
/* We are on Windows.*/
/* We are using MinGW (64-bit or 32-bit)*/
// 如果定义了 _WIN32
// 如果同时定义了 __MINGW32__ 或者 __MINGW64__,则定义 NPY_OS_MINGW
/* Otherwise, if _WIN64 is defined, we are targeting 64-bit Windows*/
// 否则,如果定义了 _WIN64,则目标是 64 位 Windows
// 定义 NPY_OS_WIN64
/* Otherwise assume we are targeting 32-bit Windows*/
// 否则假设目标是 32 位 Windows
// 定义 NPY_OS_WIN32
// 如果定义了 __APPLE__,则定义 NPY_OS_DARWIN
// 如果定义了 __HAIKU__,则定义 NPY_OS_HAIKU
// 如果未定义任何已知平台,则定义 NPY_OS_UNKNOWN
.\numpy\numpy\_core\include\numpy\numpyconfig.h
/*
* On Mac OS X, because there is only one configuration stage for all the archs
* in universal builds, any macro which depends on the arch needs to be
* hardcoded.
*
* Note that distutils/pip will attempt a universal2 build when Python itself
* is built as universal2, hence this hardcoding is needed even if we do not
* support universal2 wheels anymore (see gh-22796).
* This code block can be removed after we have dropped the setup.py based
* build completely.
*/
// 定义 long 类型的大小为 8 字节(64位架构)
// 定义 long 类型的大小为 4 字节(32位架构)
// 取消之前的 long double 和 complex long double 大小定义
// 取消定义 IEEE 双精度 long double
// 取消定义 Intel 扩展 16 字节 long double
// 根据架构定义 long double 和 complex long double 的大小及相关宏
// 如果未知架构,则报错
/**
* To help with both NPY_TARGET_VERSION and the NPY_NO_DEPRECATED_API macro,
* we include API version numbers for specific versions of NumPy.
* To exclude all API that was deprecated as of 1.7, add the following before
*
*
* The same is true for NPY_TARGET_VERSION, although NumPy will default to
* a backwards compatible build anyway.
*/
// 定义不同版本 NumPy 的 API 版本号
/*
* Binary compatibility version number. This number is increased
* whenever the C-API is changed such that binary compatibility is
* broken, i.e. whenever a recompile of extension modules is needed.
*/
/*
* Minor API version we are compiling to be compatible with. The version
* Number is always increased when the API changes via: `NPY_API_VERSION`
* (and should maybe just track the NumPy version).
*
* If we have an internal build, we always target the current version of
* course.
*
* For downstream users, we default to an older version to provide them with
* maximum compatibility by default. Downstream can choose to extend that
* default, or narrow it down if they wish to use newer API. If you adjust
* this, consider the Python version support (example for 1.25.x):
*
* NumPy 1.25.x supports Python: 3.9 3.10 3.11 (3.12)
* NumPy 1.19.x supports Python: 3.6 3.7 3.8 3.9
* NumPy 1.17.x supports Python: 3.5 3.6 3.7 3.8
* NumPy 1.15.x supports Python: ... 3.6 3.7
*
* Users of the stable ABI may wish to target the last Python that is not
* end of life. This would be 3.8 at NumPy 1.25 release time.
* 1.17 as default was the choice of oldest-support-numpy at the time and
* has in practice no limit (compared to 1.19). Even earlier becomes legacy.
*/
/* NumPy internal build, always use current version. */
/* user provided a target version, use it */
/* Use the default (increase when dropping Python 3.9 support) */
/* Sanity check the (requested) feature version */
/* No support for irrelevant old targets, no need for error, but warn. */
/*
* We define a human readable translation to the Python version of NumPy
* for error messages (and also to allow grepping the binaries for conda).
*/
.\numpy\numpy\_core\include\numpy\random\bitgen.h
/* Must match the declaration in numpy/random/<any>.pxd */
// 定义了一个名为 bitgen 的结构体类型,用于封装随机数生成器的状态和相关函数指针
typedef struct bitgen {
void *state; // 指向随机数生成器状态的指针
uint64_t (*next_uint64)(void *st); // 函数指针,用于生成下一个 uint64_t 类型的随机数
uint32_t (*next_uint32)(void *st); // 函数指针,用于生成下一个 uint32_t 类型的随机数
double (*next_double)(void *st); // 函数指针,用于生成下一个 double 类型的随机数
uint64_t (*next_raw)(void *st); // 函数指针,用于生成下一个原始数据类型(uint64_t)的随机数
} bitgen_t;
.\numpy\numpy\_core\include\numpy\random\distributions.h
extern "C" {
/*
* RAND_INT_TYPE is used to share integer generators with RandomState which
* used long in place of int64_t. If changing a distribution that uses
* RAND_INT_TYPE, then the original unmodified copy must be retained for
* use in RandomState by copying to the legacy distributions source file.
*/
// 定义一个宏,用于返回两个数中的最小值
// 定义一个宏,用于返回两个数中的最大值
// 如果 M_PI 未定义,则定义为圆周率的数值
// 定义结构体 s_binomial_t,用于存储二项分布的参数
typedef struct s_binomial_t {
int has_binomial; /* !=0: following parameters initialized for binomial */
double psave;
RAND_INT_TYPE nsave;
double r;
double q;
double fm;
RAND_INT_TYPE m;
double p1;
double xm;
double xl;
double xr;
double c;
double laml;
double lamr;
double p2;
double p3;
double p4;
} binomial_t;
// 声明以下函数为外部函数,供外部调用
DECLDIR float random_standard_uniform_f(bitgen_t *bitgen_state);
DECLDIR double random_standard_uniform(bitgen_t *bitgen_state);
DECLDIR void random_standard_uniform_fill(bitgen_t *, npy_intp, double *);
DECLDIR void random_standard_uniform_fill_f(bitgen_t *, npy_intp, float *);
DECLDIR int64_t random_positive_int64(bitgen_t *bitgen_state);
DECLDIR int32_t random_positive_int32(bitgen_t *bitgen_state);
DECLDIR int64_t random_positive_int(bitgen_t *bitgen_state);
DECLDIR uint64_t random_uint(bitgen_t *bitgen_state);
DECLDIR double random_standard_exponential(bitgen_t *bitgen_state);
DECLDIR float random_standard_exponential_f(bitgen_t *bitgen_state);
DECLDIR void random_standard_exponential_fill(bitgen_t *, npy_intp, double *);
DECLDIR void random_standard_exponential_fill_f(bitgen_t *, npy_intp, float *);
DECLDIR void random_standard_exponential_inv_fill(bitgen_t *, npy_intp, double *);
DECLDIR void random_standard_exponential_inv_fill_f(bitgen_t *, npy_intp, float *);
DECLDIR double random_standard_normal(bitgen_t *bitgen_state);
DECLDIR float random_standard_normal_f(bitgen_t *bitgen_state);
DECLDIR void random_standard_normal_fill(bitgen_t *, npy_intp, double *);
DECLDIR void random_standard_normal_fill_f(bitgen_t *, npy_intp, float *);
DECLDIR double random_standard_gamma(bitgen_t *bitgen_state, double shape);
DECLDIR float random_standard_gamma_f(bitgen_t *bitgen_state, float shape);
DECLDIR double random_normal(bitgen_t *bitgen_state, double loc, double scale);
DECLDIR double random_gamma(bitgen_t *bitgen_state, double shape, double scale);
}
注释:
extern "C" {
/*
* RAND_INT_TYPE 用于与使用 long 替换 int64_t 的 RandomState 共享整数生成器。
* 如果更改使用 RAND_INT_TYPE 的分布,则必须保留原始未修改的副本,以供在 RandomState 中使用,通过复制到旧的分布源文件。
*/
// 定义 MIN 宏,返回两个数中的最小值
// 定义 MAX 宏,返回两个数中的最大值
// 定义结构体 s_binomial_t,用于存储二项分布的参数
typedef struct s_binomial_t {
int has_binomial; // 是否初始化为二项分布的标志,非0表示已初始化
double psave; // 保存的概率值
RAND_INT_TYPE nsave; // 保存的整数值
double r;
double q;
double fm;
RAND_INT_TYPE m;
double p1;
double xm;
double xl;
double xr;
double c;
double laml;
double lamr;
double p2;
double p3;
double p4;
} binomial_t;
// 声明以下函数为外部函数,供外部调用
DECLDIR float random_standard_uniform_f(bitgen_t *bitgen_state); // 声明随机标准均匀分布的浮点数版本函数
DECLDIR double random_standard_uniform(bitgen_t *bitgen_state); // 声明随机标准均匀分布的双精度版本函数
DECLDIR void random_standard_uniform_fill(bitgen_t *, npy_intp, double *); // 声明填充数组的随机标准均匀分布函数
DECLDIR void random_standard_uniform_fill_f(bitgen_t *, npy_intp, float *); // 声明填充数组的随机标准均匀分布的浮点数版本函数
DECLDIR int64_t random_positive_int64(bitgen_t *bitgen_state); // 声明生成正整数(int64_t类型)的函数
DECLDIR int32_t random_positive_int32(bitgen_t *bitgen_state); // 声明生成正整数(int32_t类型)的函数
DECLDIR int64_t random_positive_int(bitgen_t *bitgen_state); // 声明生成正整数(int64_t类型)的函数
DECLDIR uint64_t random_uint(bitgen_t *bitgen_state); // 声明生成无符号整数(uint64_t类型)的函数
DECLDIR double random_standard_exponential(bitgen_t *bitgen_state); // 声明随机标准指数分布的双精度版本函数
DECLDIR float random_standard_exponential_f(bitgen_t *bitgen_state); // 声明随机标准指数分布的浮点数版本函数
DECLDIR void random_standard_exponential_fill(bitgen_t *, npy_intp, double *); // 声明填充数组的随机标准指数分布函数
DECLDIR void random_standard_exponential_fill_f(bitgen_t *, npy_intp,
// 声明一个返回 float 类型的函数 random_gamma_f,接受一个指向 bitgen_t 结构体的指针和两个 float 类型的参数
DECLDIR float random_gamma_f(bitgen_t *bitgen_state, float shape, float scale);
// 声明一个返回 double 类型的函数 random_exponential,接受一个指向 bitgen_t 结构体的指针和一个 double 类型的参数
DECLDIR double random_exponential(bitgen_t *bitgen_state, double scale);
// 声明一个返回 double 类型的函数 random_uniform,接受一个指向 bitgen_t 结构体的指针和两个 double 类型的参数
DECLDIR double random_uniform(bitgen_t *bitgen_state, double lower, double range);
// 声明一个返回 double 类型的函数 random_beta,接受一个指向 bitgen_t 结构体的指针和两个 double 类型的参数
DECLDIR double random_beta(bitgen_t *bitgen_state, double a, double b);
// 声明一个返回 double 类型的函数 random_chisquare,接受一个指向 bitgen_t 结构体的指针和一个 double 类型的参数
DECLDIR double random_chisquare(bitgen_t *bitgen_state, double df);
// 声明一个返回 double 类型的函数 random_f,接受一个指向 bitgen_t 结构体的指针和两个 double 类型的参数
DECLDIR double random_f(bitgen_t *bitgen_state, double dfnum, double dfden);
// 声明一个返回 double 类型的函数 random_standard_cauchy,接受一个指向 bitgen_t 结构体的指针
DECLDIR double random_standard_cauchy(bitgen_t *bitgen_state);
// 声明一个返回 double 类型的函数 random_pareto,接受一个指向 bitgen_t 结构体的指针和一个 double 类型的参数
DECLDIR double random_pareto(bitgen_t *bitgen_state, double a);
// 声明一个返回 double 类型的函数 random_weibull,接受一个指向 bitgen_t 结构体的指针和一个 double 类型的参数
DECLDIR double random_weibull(bitgen_t *bitgen_state, double a);
// 声明一个返回 double 类型的函数 random_power,接受一个指向 bitgen_t 结构体的指针和一个 double 类型的参数
DECLDIR double random_power(bitgen_t *bitgen_state, double a);
// 声明一个返回 double 类型的函数 random_laplace,接受一个指向 bitgen_t 结构体的指针和两个 double 类型的参数
DECLDIR double random_laplace(bitgen_t *bitgen_state, double loc, double scale);
// 声明一个返回 double 类型的函数 random_gumbel,接受一个指向 bitgen_t 结构体的指针和两个 double 类型的参数
DECLDIR double random_gumbel(bitgen_t *bitgen_state, double loc, double scale);
// 声明一个返回 double 类型的函数 random_logistic,接受一个指向 bitgen_t 结构体的指针和两个 double 类型的参数
DECLDIR double random_logistic(bitgen_t *bitgen_state, double loc, double scale);
// 声明一个返回 double 类型的函数 random_lognormal,接受一个指向 bitgen_t 结构体的指针和两个 double 类型的参数
DECLDIR double random_lognormal(bitgen_t *bitgen_state, double mean, double sigma);
// 声明一个返回 double 类型的函数 random_rayleigh,接受一个指向 bitgen_t 结构体的指针和一个 double 类型的参数
DECLDIR double random_rayleigh(bitgen_t *bitgen_state, double mode);
// 声明一个返回 double 类型的函数 random_standard_t,接受一个指向 bitgen_t 结构体的指针和一个 double 类型的参数
DECLDIR double random_standard_t(bitgen_t *bitgen_state, double df);
// 声明一个返回 double 类型的函数 random_noncentral_chisquare,接受一个指向 bitgen_t 结构体的指针和两个 double 类型的参数
DECLDIR double random_noncentral_chisquare(bitgen_t *bitgen_state, double df, double nonc);
// 声明一个返回 double 类型的函数 random_noncentral_f,接受一个指向 bitgen_t 结构体的指针和三个 double 类型的参数
DECLDIR double random_noncentral_f(bitgen_t *bitgen_state, double dfnum, double dfden, double nonc);
// 声明一个返回 double 类型的函数 random_wald,接受一个指向 bitgen_t 结构体的指针和两个 double 类型的参数
DECLDIR double random_wald(bitgen_t *bitgen_state, double mean, double scale);
// 声明一个返回 double 类型的函数 random_vonmises,接受一个指向 bitgen_t 结构体的指针和两个 double 类型的参数
DECLDIR double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa);
// 声明一个返回 double 类型的函数 random_triangular,接受一个指向 bitgen_t 结构体的指针和三个 double 类型的参数
DECLDIR double random_triangular(bitgen_t *bitgen_state, double left, double mode, double right);
// 声明一个返回 RAND_INT_TYPE 类型的函数 random_poisson,接受一个指向 bitgen_t 结构体的指针和一个 double 类型的参数
DECLDIR RAND_INT_TYPE random_poisson(bitgen_t *bitgen_state, double lam);
// 声明一个返回 RAND_INT_TYPE 类型的函数 random_negative_binomial,接受一个指向 bitgen_t 结构体的指针和两个 double 类型的参数
DECLDIR RAND_INT_TYPE random_negative_binomial(bitgen_t *bitgen_state, double n, double p);
// 声明一个返回 int64_t 类型的函数 random_binomial,接受一个指向 bitgen_t 结构体的指针、一个 double 类型的参数、一个 int64_t 类型的参数和一个指向 binomial_t 结构体的指针
DECLDIR int64_t random_binomial(bitgen_t *bitgen_state, double p, int64_t n, binomial_t *binomial);
// 声明一个返回 int64_t 类型的函数 random_logseries,接受一个指向 bitgen_t 结构体的指针和一个 double 类型的参数
DECLDIR int64_t random_logseries(bitgen_t *bitgen_state, double p);
// 声明一个返回 int64_t 类型的函数 random_geometric,接受一个指向 bitgen_t 结构体的指针和一个 double 类型的参数
DECLDIR int64_t random_geometric(bitgen_t *bitgen_state, double p);
// 声明一个返回 RAND_INT_TYPE 类型的函数 random_geometric_search,接受一个指向 bitgen_t 结构体的指针和一个 double 类型的参数
DECLDIR RAND_INT_TYPE random_geometric_search(bitgen_t *bitgen_state, double p);
// 声明一个返回 RAND_INT_TYPE 类型的函数 random_zipf,接受一个指向 bitgen_t 结构体的指针和一个 double 类型的参数
DECLDIR RAND_INT_TYPE random_zipf(bitgen_t *bitgen_state, double a);
// 声明一个返回 int64_t 类型的函数 random_hypergeometric,接受一个指向 bitgen_t 结构体的指针和三个 int64_t 类型的参数
DECLDIR int64_t random_hypergeometric(bitgen_t *bitgen_state, int64_t good, int64_t bad, int64_t sample);
// 声明一个返回 uint64_t 类型的函数 random_interval,接受一个指向 bitgen_t 结构体的指针和一个 uint64_t 类型的参数
DECLDIR uint64_t random_interval(bitgen_t *bitgen_state, uint64_t max);
// 声明一个返回 uint64_t 类型的函数 random_bounded_uint64,接受一个指向 bitgen_t 结构体的指针和四个 uint64_t 类型的参数以及一个 bool 类型的参数
DECLDIR uint64_t random_bounded_uint64(bitgen_t *bitgen_state, uint64_t off, uint64_t rng, uint64_t mask, bool use_masked);
DECLDIR uint32_t random_buffered_bounded_uint32(bitgen_t *bitgen_state,
uint32_t off, uint32_t rng,
uint32_t mask, bool use_masked,
int *bcnt, uint32_t *buf);
DECLDIR uint16_t random_buffered_bounded_uint16(bitgen_t *bitgen_state,
uint16_t off, uint16_t rng,
uint16_t mask, bool use_masked,
int *bcnt, uint32_t *buf);
DECLDIR uint8_t random_buffered_bounded_uint8(bitgen_t *bitgen_state, uint8_t off,
uint8_t rng, uint8_t mask,
bool use_masked, int *bcnt,
uint32_t *buf);
DECLDIR npy_bool random_buffered_bounded_bool(bitgen_t *bitgen_state, npy_bool off,
npy_bool rng, npy_bool mask,
bool use_masked, int *bcnt,
uint32_t *buf);
DECLDIR void random_bounded_uint64_fill(bitgen_t *bitgen_state, uint64_t off,
uint64_t rng, npy_intp cnt,
bool use_masked, uint64_t *out);
DECLDIR void random_bounded_uint32_fill(bitgen_t *bitgen_state, uint32_t off,
uint32_t rng, npy_intp cnt,
bool use_masked, uint32_t *out);
DECLDIR void random_bounded_uint16_fill(bitgen_t *bitgen_state, uint16_t off,
uint16_t rng, npy_intp cnt,
bool use_masked, uint16_t *out);
DECLDIR void random_bounded_uint8_fill(bitgen_t *bitgen_state, uint8_t off,
uint8_t rng, npy_intp cnt,
bool use_masked, uint8_t *out);
DECLDIR void random_bounded_bool_fill(bitgen_t *bitgen_state, npy_bool off,
npy_bool rng, npy_intp cnt,
bool use_masked, npy_bool *out);
DECLDIR void random_multinomial(bitgen_t *bitgen_state, RAND_INT_TYPE n, RAND_INT_TYPE *mnix,
double *pix, npy_intp d, binomial_t *binomial);
DECLDIR int random_multivariate_hypergeometric_count(bitgen_t *bitgen_state,
int64_t total,
size_t num_colors, int64_t *colors,
int64_t nsample,
size_t num_variates, int64_t *variates);
DECLDIR int random_multivariate_hypergeometric_marginals(bitgen_t *bitgen_state,
int64_t total,
size_t num_colors, int64_t *colors,
int64_t nsample,
size_t num_variates, int64_t *variates);
/* 声明一个函数 random_multivariate_hypergeometric_marginals,无返回值,可能使用 bitgen_t 类型的状态,接受如下参数:
- bitgen_state: 位生成器状态的指针
- total: 整数,表示总数
- num_colors: 大小类型,表示颜色的数量
- colors: 整数的指针,表示颜色数组
- nsample: 整数,表示样本数
- num_variates: 大小类型,表示变量的数量
- variates: 整数的指针,表示变量数组 */
DECLDIR void random_multivariate_hypergeometric_marginals(bitgen_t *bitgen_state,
int64_t total,
size_t num_colors, int64_t *colors,
int64_t nsample,
size_t num_variates, int64_t *variates);
/* random_binomial_btpe 函数的声明,该函数在 legacy-distributions.c 和 distributions.c 中使用,
但不会被导出 */
/* 使用 bitgen_state 位生成器状态,接受如下参数:
- bitgen_state: 位生成器状态的指针
- n: 随机整数的类型
- p: 双精度浮点数,表示概率
- binomial: 二项分布结构体指针 */
RAND_INT_TYPE random_binomial_btpe(bitgen_t *bitgen_state,
RAND_INT_TYPE n,
double p,
binomial_t *binomial);
/* random_binomial_inversion 函数的声明,该函数在 legacy-distributions.c 和 distributions.c 中使用 */
/* 使用 bitgen_state 位生成器状态,接受如下参数:
- bitgen_state: 位生成器状态的指针
- n: 随机整数的类型
- p: 双精度浮点数,表示概率
- binomial: 二项分布结构体指针 */
RAND_INT_TYPE random_binomial_inversion(bitgen_t *bitgen_state,
RAND_INT_TYPE n,
double p,
binomial_t *binomial);
/* random_loggam 函数的声明 */
/* 接受一个双精度浮点数 x 作为参数 */
double random_loggam(double x);
/* 内联函数 next_double 的声明,返回一个双精度浮点数 */
/* 使用 bitgen_state 位生成器状态,接受如下参数:
- bitgen_state: 位生成器状态的指针 */
static inline double next_double(bitgen_t *bitgen_state) {
return bitgen_state->next_double(bitgen_state->state);
}
}
/* 结束条件编译的 endif 指令 */
.\numpy\numpy\_core\include\numpy\ufuncobject.h
/*
* 定义一个条件编译宏,用于防止重复包含 numpy 的 ufunc 相关头文件
*/
/*
* 包含 numpy 库的数学相关头文件
*/
/*
* 包含 numpy 库的通用头文件
*/
/*
* 如果是 C++ 代码,则使用 extern "C" 语法将代码声明为 C 函数接口
*/
extern "C" {
/*
* 定义了一个指向泛型函数指针类型 PyUFuncGenericFunction,
* 用于表示标准的逐元素或广义的 ufunc 内部循环。
* 这个函数指针接受参数列表和数据维度以及步长信息。
*/
typedef void (*PyUFuncGenericFunction)(
char **args,
npy_intp const *dimensions,
npy_intp const *strides,
void *innerloopdata);
/*
* 定义了一个指向带掩码的标准逐元素 ufunc 的最通用的一维内部循环函数指针类型
* 这里的 "masked" 意味着它会跳过掩码数组 maskptr 中对应位置为真值的数据项的计算。
* 接受数据指针数组、步长数组、掩码指针、掩码步长、数据项计数和内部循环数据。
*/
typedef void (PyUFunc_MaskedStridedInnerLoopFunc)(
char **dataptrs, npy_intp *strides,
char *maskptr, npy_intp mask_stride,
npy_intp count,
NpyAuxData *innerloopdata);
/* 声明 _tagPyUFuncObject 结构体类型,用于解析类型和选择内部循环的函数指针 */
struct _tagPyUFuncObject;
/*
* 给定 ufunc 调用的操作数,应确定计算输入和输出数据类型并返回一个内部循环函数。
* 此函数应验证是否遵循了类型转换规则,如果没有则应该失败。
*
* 对于向后兼容性,普通的类型解析函数不支持具有对象语义的辅助数据。
* 返回掩码通用函数的类型解析调用返回一个标准的 NpyAuxData 对象,
* 其中 NPY_AUXDATA_FREE 和 NPY_AUXDATA_CLONE 宏适用。
*
* ufunc: ufunc 对象。
* casting: 提供给 ufunc 的 'casting' 参数。
* operands: 一个长度为 (ufunc->nin + ufunc->nout) 的数组,
* 输出参数可能为 NULL。
* type_tup: 可能为 NULL,也可能为 ufunc 传递的 type_tup。
* out_dtypes: 应填充一个数组,其中包含 (ufunc->nin + ufunc->nout) 个新的
* dtypes 的新引用,每个输入和输出一个。
* 这些 dtypes 应该都是本机字节序格式。
*
* 成功返回 0,失败返回 -1(并设置异常),如果应返回 Py_NotImplemented 返回 -2。
*/
typedef int (PyUFunc_TypeResolutionFunc)(
struct _tagPyUFuncObject *ufunc,
NPY_CASTING casting,
PyArrayObject **operands,
PyObject *type_tup,
PyArray_Descr **out_dtypes);
/*
* for each core_num_dim_ix distinct dimension names,
* the possible "frozen" size (-1 if not frozen).
*/
npy_intp *core_dim_sizes;
/*
* for each distinct core dimension, a set of UFUNC_CORE_DIM* flags
*/
npy_uint32 *core_dim_flags;
/* Identity for reduction, when identity == PyUFunc_IdentityValue */
PyObject *identity_value;
/* New in NPY_API_VERSION 0x0000000F and above */
/* New private fields related to dispatching */
void *_dispatch_cache;
/* A PyListObject of `(tuple of DTypes, ArrayMethod/Promoter)` */
PyObject *_loops;
/*
* 对于每个 core_num_dim_ix 不同的维度名称,
* 可能的“冻结”大小(如果未冻结则为-1)。
*/
npy_intp *core_dim_sizes;
/*
* 对于每个不同的核心维度,一组 UFUNC_CORE_DIM* 标志
*/
npy_uint32 *core_dim_flags;
/* 当 identity == PyUFunc_IdentityValue 时,用于约简的标识 */
PyObject *identity_value;
/* 在 NPY_API_VERSION 0x0000000F 及以上版本中新增 */
/* 与调度相关的新私有字段 */
void *_dispatch_cache;
/* 一个 PyListObject 包含“(DTypes 元组, ArrayMethod/Promoter)” */
PyObject *_loops;
/* 结构体定义,表示 Python 中的通用函数对象 */
} PyUFuncObject;
/* 包含 Python 数组对象的头文件 */
/* UFunc 的常量定义 */
/* UFunc 的核心维度大小由操作数确定 */
/* UFunc 的核心维度可以忽略 */
/* 在执行期间推断出的标志 */
/* UFunc 对象的属性标志 */
/* 多线程相关宏定义 */
/* 在线程开始时保存线程状态 */
/* 在线程结束时恢复线程状态 */
/* 不允许多线程时的空宏定义 */
/* UFunc 单位的定义 */
/* UFunc 单位为 0,操作顺序可以重新排序 */
/* UFunc 单位为 1,操作顺序可以重新排序 */
/* UFunc 单位为 -1,操作顺序可以重新排序,用于位与操作的缩减 */
/* UFunc 没有单位,操作顺序不能重新排序,不允许多轴同时缩减 */
/* UFunc 没有单位,操作顺序可以重新排序,允许多轴同时缩减 */
/* UFunc 单位是标识值,操作顺序可以重新排序,允许多轴同时缩减 */
/* UFunc 的运算类型 */
/* PyUFunc_PyFuncData 结构体,用于保存 Python 函数数据 */
typedef struct {
int nin; /* 输入参数个数 */
int nout; /* 输出参数个数 */
PyObject *callable; /* Python 可调用对象 */
} PyUFunc_PyFuncData;
/* 用户定义的一维循环函数信息的链表结构 */
typedef struct _loop1d_info {
PyUFuncGenericFunction func; /* 通用函数指针 */
void *data; /* 数据指针 */
int *arg_types; /* 参数类型数组 */
struct _loop1d_info *next; /* 下一个链表节点 */
int nargs; /* 参数个数 */
PyArray_Descr **arg_dtypes; /* 参数数据类型数组 */
} PyUFunc_Loop1d;
/* UFUNC_PYVALS_NAME 宏定义 */
/* 下面的宏定义已经废弃,请使用 npy_set_floatstatus_* 在 npymath 库中 */
/* 生成浮点异常错误的宏 */
/* 如果未定义 UFUNC_NOFPE,则定义其默认行为 */
/* 清除 Borland C++ 的默认浮点异常处理 */
}
// 如果没有进入上一个
// 包含私有的 ufunc API 头文件 "__ufunc_api.h"
// 如果是 C++ 编译环境,则结束 extern "C" 块
}
// 结束 NUMPY_UFUNCOBJECT_H_ 宏的定义
.\numpy\numpy\_core\include\numpy\utils.h
// 如果是 GCC 编译器,则定义 __COMP_NPY_UNUSED 为未使用属性
// 如果是 Intel 编译器,则定义 __COMP_NPY_UNUSED 为未使用属性
// 如果是 Clang 编译器,则定义 __COMP_NPY_UNUSED 为未使用属性
// 其他情况下,__COMP_NPY_UNUSED 不做特殊处理
// 如果是 GCC、Intel 编译器或者 Clang 编译器,则定义 NPY_DECL_ALIGNED(x) 为按 x 对齐
// 如果是 MSVC 编译器,则定义 NPY_DECL_ALIGNED(x) 为按 x 对齐
// 其他情况下,不做特殊处理
/* Use this to tag a variable as not used. It will remove unused variable
* warning on support platforms (see __COM_NPY_UNUSED) and mangle the variable
* to avoid accidental use */
// 使用此宏标记未使用的变量,以消除在支持的平台上的未使用变量警告(参见 __COM_NPY_UNUSED),并混淆变量以避免意外使用
.\numpy\numpy\_core\include\numpy\_neighborhood_iterator_imp.h
/*
* Private API (here for inline)
*/
// 定义一个静态内联函数,用于增加邻域迭代器的坐标
static inline int
_PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter);
/*
* Update to next item of the iterator
*
* Note: this simply increment the coordinates vector, last dimension
* incremented first , i.e, for dimension 3
* ...
* -1, -1, -1
* -1, -1, 0
* -1, -1, 1
* ....
* -1, 0, -1
* -1, 0, 0
* ....
* 0, -1, -1
* 0, -1, 0
* ....
*/
// 定义宏用于更新迭代器的坐标,实现按照特定顺序递增坐标
wb = iter->coordinates[c] < iter->bounds[c][1]; \
if (wb) { \
iter->coordinates[c] += 1; \
return 0; \
} \
else { \
iter->coordinates[c] = iter->bounds[c][0]; \
}
// 针对二维数组进行优化的版本,手动展开循环
static inline int
_PyArrayNeighborhoodIter_IncrCoord2D(PyArrayNeighborhoodIterObject* iter)
{
npy_intp wb;
_UPDATE_COORD_ITER(1) // 优化后的增加坐标函数调用,针对第二维
_UPDATE_COORD_ITER(0) // 优化后的增加坐标函数调用,针对第一维
return 0;
}
/*
* Advance to the next neighbour
*/
// 前进到下一个邻域
static inline int
PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter)
{
_PyArrayNeighborhoodIter_IncrCoord (iter); // 调用增加坐标的函数
iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates); // 更新数据指针
return 0;
}
/*
* Reset functions
*/
// 重置函数
static inline int
PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter)
{
npy_intp i;
for (i = 0; i < iter->nd; ++i) {
iter->coordinates[i] = iter->bounds[i][0]; // 将所有维度的坐标重置为最小值
}
iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates); // 更新数据指针
return 0;
}
.\numpy\numpy\_core\include\numpy\_public_dtype_api_table.h
/*
* Public exposure of the DType Classes. These are tricky to expose
* via the Python API, so they are exposed through this header for now.
*
* These definitions are only relevant for the public API and we reserve
* the slots 320-360 in the API table generation for this (currently).
*
* TODO: This file should be consolidated with the API table generation
* (although not sure the current generation is worth preserving).
*/
/* All of these require NumPy 2.0 support */
/*
* The type of the DType metaclass
*/
/*
* NumPy's builtin DTypes:
*/
#define PyArray_BoolDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[1])
/* Integers */
#define PyArray_ByteDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[2])
#define PyArray_UByteDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[3])
#define PyArray_ShortDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[4])
#define PyArray_UShortDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[5])
#define PyArray_IntDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[6])
#define PyArray_UIntDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[7])
#define PyArray_LongDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[8])
#define PyArray_ULongDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[9])
#define PyArray_LongLongDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[10])
#define PyArray_ULongLongDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[11])
/* Integer aliases */
#define PyArray_Int8DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[12])
#define PyArray_UInt8DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[13])
#define PyArray_Int16DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[14])
#define PyArray_UInt16DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[15])
#define PyArray_Int32DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[16])
#define PyArray_UInt32DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[17])
#define PyArray_Int64DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[18])
#define PyArray_UInt64DType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[19])
#define PyArray_IntpDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[20])
#define PyArray_UIntpDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[21])
/* Floats */
#define PyArray_HalfDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[22])
#define PyArray_FloatDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[23])
#define PyArray_DoubleDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[24])
#define PyArray_LongDoubleDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[25])
/* Complex */
#define PyArray_CFloatDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[26])
#define PyArray_CDoubleDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[27])
#endif /* NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION */
#endif /* !(defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD) */
#endif /* NUMPY_CORE_INCLUDE_NUMPY__PUBLIC_DTYPE_API_TABLE_H_ */
/* 定义不同数据类型的宏,这些宏用于访问特定类型的数组元数据 */
#define PyArray_CLongDoubleDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[28])
/* 字符串/字节 */
#define PyArray_BytesDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[29])
#define PyArray_UnicodeDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[30])
/* 日期时间/时间增量 */
#define PyArray_DatetimeDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[31])
#define PyArray_TimedeltaDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[32])
/* 对象/空类型 */
#define PyArray_ObjectDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[33])
#define PyArray_VoidDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[34])
/* Python 类型(用作标量的标记) */
#define PyArray_PyLongDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[35])
#define PyArray_PyFloatDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[36])
#define PyArray_PyComplexDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[37])
/* 默认整数类型 */
#define PyArray_DefaultIntDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[38])
/* 新的非遗留数据类型按添加顺序排列 */
#define PyArray_StringDType (*(PyArray_DTypeMeta *)(PyArray_API + 320)[39])
/* 注意:偏移量 40 可用 */
/* 为抽象类重新开始使用较大的偏移量: */
#define PyArray_IntAbstractDType (*(PyArray_DTypeMeta *)PyArray_API[366])
#define PyArray_FloatAbstractDType (*(PyArray_DTypeMeta *)PyArray_API[367])
#define PyArray_ComplexAbstractDType (*(PyArray_DTypeMeta *)PyArray_API[368])
#endif /* NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION */
#endif /* NPY_INTERNAL_BUILD */
#endif /* NUMPY_CORE_INCLUDE_NUMPY__PUBLIC_DTYPE_API_TABLE_H_ */
.\numpy\numpy\_core\memmap.py
from contextlib import nullcontext
import operator
import numpy as np
from .._utils import set_module
from .numeric import uint8, ndarray, dtype
__all__ = ['memmap']
dtypedescr = dtype
valid_filemodes = ["r", "c", "r+", "w+"]
writeable_filemodes = ["r+", "w+"]
mode_equivalents = {
"readonly":"r",
"copyonwrite":"c",
"readwrite":"r+",
"write":"w+"
}
@set_module('numpy')
class memmap(ndarray):
"""Create a memory-map to an array stored in a *binary* file on disk.
Memory-mapped files are used for accessing small segments of large files
on disk, without reading the entire file into memory. NumPy's
memmap's are array-like objects. This differs from Python's ``mmap``
module, which uses file-like objects.
This subclass of ndarray has some unpleasant interactions with
some operations, because it doesn't quite fit properly as a subclass.
An alternative to using this subclass is to create the ``mmap``
object yourself, then create an ndarray with ndarray.__new__ directly,
passing the object created in its 'buffer=' parameter.
This class may at some point be turned into a factory function
which returns a view into an mmap buffer.
Flush the memmap instance to write the changes to the file. Currently there
is no API to close the underlying ``mmap``. It is tricky to ensure the
resource is actually closed, since it may be shared between different
memmap instances.
Parameters
----------
filename : str, file-like object, or pathlib.Path instance
The file name or file object to be used as the array data buffer.
dtype : data-type, optional
The data-type used to interpret the file contents.
Default is `uint8`.
mode : {'r+', 'r', 'w+', 'c'}, optional
The file is opened in this mode:
+------+-------------------------------------------------------------+
| 'r' | Open existing file for reading only. |
+------+-------------------------------------------------------------+
| 'r+' | Open existing file for reading and writing. |
+------+-------------------------------------------------------------+
| 'w+' | Create or overwrite existing file for reading and writing. |
| | If ``mode == 'w+'`` then `shape` must also be specified. |
+------+-------------------------------------------------------------+
| 'c' | Copy-on-write: assignments affect data in memory, but |
| | changes are not saved to disk. The file on disk is |
| | read-only. |
+------+-------------------------------------------------------------+
Default is 'r+'.
"""
pass
shape : int or sequence of ints, optional
.. versionchanged:: 2.0
order : {'C', 'F'}, optional
Attributes
----------
filename : str or pathlib.Path instance
offset : int
mode : str
Methods
-------
flush
See also
--------
lib.format.open_memmap : 创建或加载一个内存映射的 `.npy` 文件。
Notes
-----
Examples
--------
>>> data = np.arange(12, dtype='float32')
>>> data.resize((3,4))
>>> from tempfile import mkdtemp
>>> import os.path as path
>>> filename = path.join(mkdtemp(), 'newfile.dat')
>>> fp = np.memmap(filename, dtype='float32', mode='w+', shape=(3,4))
>>> fp
memmap([[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.]], dtype=float32)
>>> fp[:] = data[:]
>>> fp
__array_priority__ = -100.0
def __array_finalize__(self, obj):
if hasattr(obj, '_mmap') and np.may_share_memory(self, obj):
self._mmap = obj._mmap
self.filename = obj.filename
self.offset = obj.offset
self.mode = obj.mode
else:
self._mmap = None
self.filename = None
self.offset = None
self.mode = None
def flush(self):
"""
将数组的任何更改写入到磁盘文件中。
详细信息请参考 `memmap`。
参数
----------
None
参见
--------
memmap
"""
if self.base is not None and hasattr(self.base, 'flush'):
self.base.flush()
def __array_wrap__(self, arr, context=None, return_scalar=False):
arr = super().__array_wrap__(arr, context)
if self is arr or type(self) is not memmap:
return arr
if return_scalar:
return arr[()]
return arr.view(np.ndarray)
def __getitem__(self, index):
res = super().__getitem__(index)
if type(res) is memmap and res._mmap is None:
return res.view(type=ndarray)
return res
.\numpy\numpy\_core\memmap.pyi
from numpy import memmap as memmap
__all__: list[str]
.\numpy\numpy\_core\multiarray.py
"""
Create the numpy._core.multiarray namespace for backward compatibility.
In v1.16 the multiarray and umath c-extension modules were merged into
a single _multiarray_umath extension module. So we replicate the old
namespace by importing from the extension module.
"""
import functools
from . import overrides
from . import _multiarray_umath
from ._multiarray_umath import *
from ._multiarray_umath import (
_flagdict, from_dlpack, _place, _reconstruct,
_vec_string, _ARRAY_API, _monotonicity, _get_ndarray_c_version,
_get_madvise_hugepage, _set_madvise_hugepage,
_get_promotion_state, _set_promotion_state
)
__all__ = [
'_ARRAY_API', 'ALLOW_THREADS', 'BUFSIZE', 'CLIP', 'DATETIMEUNITS',
'ITEM_HASOBJECT', 'ITEM_IS_POINTER', 'LIST_PICKLE', 'MAXDIMS',
'MAY_SHARE_BOUNDS', 'MAY_SHARE_EXACT', 'NEEDS_INIT', 'NEEDS_PYAPI',
'RAISE', 'USE_GETITEM', 'USE_SETITEM', 'WRAP',
'_flagdict', 'from_dlpack', '_place', '_reconstruct', '_vec_string',
'_monotonicity', 'add_docstring', 'arange', 'array', 'asarray',
'asanyarray', 'ascontiguousarray', 'asfortranarray', 'bincount',
'broadcast', 'busday_count', 'busday_offset', 'busdaycalendar', 'can_cast',
'compare_chararrays', 'concatenate', 'copyto', 'correlate', 'correlate2',
'count_nonzero', 'c_einsum', 'datetime_as_string', 'datetime_data',
'dot', 'dragon4_positional', 'dragon4_scientific', 'dtype',
'empty', 'empty_like', 'error', 'flagsobj', 'flatiter', 'format_longfloat',
'frombuffer', 'fromfile', 'fromiter', 'fromstring',
'get_handler_name', 'get_handler_version', 'inner', 'interp',
'interp_complex', 'is_busday', 'lexsort', 'matmul', 'vecdot',
'may_share_memory', 'min_scalar_type', 'ndarray', 'nditer', 'nested_iters',
'normalize_axis_index', 'packbits', 'promote_types', 'putmask',
'ravel_multi_index', 'result_type', 'scalar', 'set_datetimeparse_function',
'set_legacy_print_mode',
'set_typeDict', 'shares_memory', 'typeinfo',
'unpackbits', 'unravel_index', 'vdot', 'where', 'zeros',
'_get_promotion_state', '_set_promotion_state'
]
_reconstruct.__module__ = 'numpy._core.multiarray'
scalar.__module__ = 'numpy._core.multiarray'
from_dlpack.__module__ = 'numpy'
arange.__module__ = 'numpy'
array.__module__ = 'numpy'
asarray.__module__ = 'numpy'
asanyarray.__module__ = 'numpy'
ascontiguousarray.__module__ = 'numpy'
asfortranarray.__module__ = 'numpy'
datetime_data.__module__ = 'numpy'
empty.__module__ = 'numpy'
frombuffer.__module__ = 'numpy'
fromfile.__module__ = 'numpy'
fromiter.__module__ = 'numpy'
frompyfunc.__module__ = 'numpy'
fromstring.__module__ = 'numpy'
may_share_memory.__module__ = 'numpy'
nested_iters.__module__ = 'numpy'
promote_types.__module__ = 'numpy'
zeros.__module__ = 'numpy'
_get_promotion_state.__module__ = 'numpy'
_set_promotion_state.__module__ = 'numpy'
normalize_axis_index.__module__ = 'numpy'
array_function_from_c_func_and_dispatcher = functools.partial(
overrides.array_function_from_dispatcher,
module='numpy', docs_from_dispatcher=True, verify=False)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.empty_like)
def empty_like(
prototype, dtype=None, order=None, subok=None, shape=None, *, device=None
):
"""
empty_like(prototype, dtype=None, order='K', subok=True, shape=None, *,
device=None)
Return a new array with the same shape and type as a given array.
Parameters
----------
prototype : array_like
The shape and data-type of `prototype` define these same attributes
of the returned array.
dtype : data-type, optional
Overrides the data type of the result.
.. versionadded:: 1.6.0
order : {'C', 'F', 'A', or 'K'}, optional
Overrides the memory layout of the result. 'C' means C-order,
'F' means F-order, 'A' means 'F' if `prototype` is Fortran
contiguous, 'C' otherwise. 'K' means match the layout of `prototype`
as closely as possible.
.. versionadded:: 1.6.0
subok : bool, optional.
If True, then the newly created array will use the sub-class
type of `prototype`, otherwise it will be a base-class array. Defaults
to True.
shape : int or sequence of ints, optional.
Overrides the shape of the result. If order='K' and the number of
dimensions is unchanged, will try to keep order, otherwise,
order='C' is implied.
.. versionadded:: 1.17.0
device : str, optional
The device on which to place the created array. Default: None.
For Array-API interoperability only, so must be ``"cpu"`` if passed.
.. versionadded:: 2.0.0
Returns
-------
out : ndarray
Array of uninitialized (arbitrary) data with the same
shape and type as `prototype`.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full_like : Return a new array with shape of input filled with value.
empty : Return a new uninitialized array.
Notes
-----
Unlike other array creation functions (e.g. `zeros_like`, `ones_like`,
`full_like`), `empty_like` does not initialize the values of the array,
and may therefore be marginally faster. However, the values stored in the
newly allocated array are arbitrary. For reproducible behavior, be sure
to set each element of the array before reading.
Examples
--------
>>> a = ([1,2,3], [4,5,6]) # a is array-like
# 创建一个与给定数组 `a` 具有相同形状和数据类型的未初始化数组
>>> np.empty_like(a)
array([[-1073741821, -1073741821, 3], # 未初始化的值
[ 0, 0, -1073741821]])
# 创建一个新的数组 `a`,包含特定的浮点数值
>>> a = np.array([[1., 2., 3.],[4.,5.,6.]])
# 创建一个与数组 `a` 具有相同形状和数据类型的未初始化数组
>>> np.empty_like(a)
array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000], # 未初始化的值
[ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]])
"""
return (prototype,)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.concatenate)
def concatenate(arrays, axis=None, out=None, *, dtype=None, casting=None):
"""
concatenate(
(a1, a2, ...),
axis=0,
out=None,
dtype=None,
casting="same_kind"
)
Join a sequence of arrays along an existing axis.
Parameters
----------
a1, a2, ... : sequence of array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int, optional
The axis along which the arrays will be joined. If axis is None,
arrays are flattened before use. Default is 0.
out : ndarray, optional
If provided, the destination to place the result. The shape must be
correct, matching that of what concatenate would have returned if no
out argument were specified.
dtype : str or dtype
If provided, the destination array will have this dtype. Cannot be
provided together with `out`.
.. versionadded:: 1.20.0
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Defaults to 'same_kind'.
For a description of the options, please see :term:`casting`.
.. versionadded:: 1.20.0
Returns
-------
res : ndarray
The concatenated array.
See Also
--------
ma.concatenate : Concatenate function that preserves input masks.
array_split : Split an array into multiple sub-arrays of equal or
near-equal size.
split : Split array into a list of multiple sub-arrays of equal size.
hsplit : Split array into multiple sub-arrays horizontally (column wise).
vsplit : Split array into multiple sub-arrays vertically (row wise).
dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
stack : Stack a sequence of arrays along a new axis.
block : Assemble arrays from blocks.
hstack : Stack arrays in sequence horizontally (column wise).
vstack : Stack arrays in sequence vertically (row wise).
dstack : Stack arrays in sequence depth wise (along third dimension).
column_stack : Stack 1-D arrays as columns into a 2-D array.
Notes
-----
When one or more of the arrays to be concatenated is a MaskedArray,
this function will return a MaskedArray object instead of an ndarray,
but the input masks are *not* preserved. In cases where a MaskedArray
is expected as input, use the ma.concatenate function from the masked
array module instead.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> b = np.array([[5, 6]])
>>> np.concatenate((a, b), axis=0)
array([[1, 2],
[3, 4],
[5, 6]])
>>> np.concatenate((a, b.T), axis=1)
array([[1, 2, 5],
[3, 4, 6]])
>>> np.concatenate((a, b), axis=None)
array([1, 2, 3, 4, 5, 6])
"""
pass
if out is not None:
arrays = list(arrays)
arrays.append(out)
return arrays
@array_function_from_c_func_and_dispatcher(_multiarray_umath.inner)
def inner(a, b):
"""
inner(a, b, /)
Inner product of two arrays.
Ordinary inner product of vectors for 1-D arrays (without complex
conjugation), in higher dimensions a sum product over the last axes.
Parameters
----------
a, b : array_like
If `a` and `b` are nonscalar, their last dimensions must match.
Returns
-------
out : ndarray
If `a` and `b` are both
scalars or both 1-D arrays then a scalar is returned; otherwise
an array is returned.
``out.shape = (*a.shape[:-1], *b.shape[:-1])``
Raises
------
ValueError
If both `a` and `b` are nonscalar and their last dimensions have
different sizes.
See Also
--------
tensordot : Sum products over arbitrary axes.
dot : Generalised matrix product, using second last dimension of `b`.
einsum : Einstein summation convention.
Notes
-----
For vectors (1-D arrays) it computes the ordinary inner-product::
np.inner(a, b) = sum(a[:]*b[:])
More generally, if ``ndim(a) = r > 0`` and ``ndim(b) = s > 0``::
np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1))
or explicitly::
np.inner(a, b)[i0,...,ir-2,j0,...,js-2]
= sum(a[i0,...,ir-2,:]*b[j0,...,js-2,:])
In addition `a` or `b` may be scalars, in which case::
np.inner(a,b) = a*b
Examples
--------
Ordinary inner product for vectors:
>>> a = np.array([1,2,3])
>>> b = np.array([0,1,0])
>>> np.inner(a, b)
2
Some multidimensional examples:
>>> a = np.arange(24).reshape((2,3,4))
>>> b = np.arange(4)
>>> c = np.inner(a, b)
>>> c.shape
(2, 3)
>>> c
array([[ 14, 38, 62],
[ 86, 110, 134]])
>>> a = np.arange(2).reshape((1,1,2))
>>> b = np.arange(6).reshape((3,2))
>>> c = np.inner(a, b)
>>> c.shape
(1, 1, 3)
>>> c
array([[[1, 3, 5]]])
An example where `b` is a scalar:
>>> np.inner(np.eye(2), 7)
array([[7., 0.],
[0., 7.]])
"""
return (a, b)
@array_function_from_c_func_and_dispatcher(_multiarray_umath.where)
def where(condition, x=None, y=None):
"""
where(condition, [x, y], /)
Return elements chosen from `x` or `y` depending on `condition`.
.. note::
When only `condition` is provided, this function is a shorthand for
``np.asarray(condition).nonzero()``. Using `nonzero` directly should be
preferred, as it behaves correctly for subclasses. The rest of this
documentation covers only the case where all three arguments are
provided.
Parameters
----------
condition : array_like, bool
Where True, yield `x`, otherwise yield `y`.
x, y : array_like
Values from which to choose. `x`, `y` and `condition` need to be
broadcastable to some shape.
Returns
-------
"""
out : ndarray
返回的数组,其元素来自于 `x`,其中 `condition` 为 True,而 `y` 中的元素则来自于其他位置。
See Also
--------
choose
nonzero : 当 x 和 y 被省略时调用的函数
Notes
-----
如果所有的数组都是 1-D,`where` 等效于::
[xv if c else yv
for c, xv, yv in zip(condition, x, y)]
Examples
--------
>>> a = np.arange(10)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.where(a < 5, a, 10*a)
array([ 0, 1, 2, 3, 4, 50, 60, 70, 80, 90])
这也适用于多维数组:
>>> np.where([[True, False], [True, True]],
... [[1, 2], [3, 4]],
... [[9, 8], [7, 6]])
array([[1, 8],
[3, 4]])
x、y 和 condition 的形状会被一起广播:
>>> x, y = np.ogrid[:3, :4]
>>> np.where(x < y, x, 10 + y)
array([[10, 0, 0, 0],
[10, 11, 1, 1],
[10, 11, 12, 2]])
>>> a = np.array([[0, 1, 2],
... [0, 2, 4],
... [0, 3, 6]])
>>> np.where(a < 4, a, -1)
array([[ 0, 1, 2],
[ 0, 2, -1],
[ 0, 3, -1]])
"""
return (condition, x, y)
# 从 _multiarray_umath.lexsort 函数和分发器创建一个 array_function
@array_function_from_c_func_and_dispatcher(_multiarray_umath.lexsort)
# 定义 lexsort 函数,用于多键稳定间接排序
def lexsort(keys, axis=None):
"""
lexsort(keys, axis=-1)
执行使用一系列键的间接稳定排序。
给定多个排序键,lexsort 返回一个整数索引数组,
描述了按多个键排序的顺序。序列中的最后一个键用于主排序顺序,
次要排序使用倒数第二个键,依此类推。
Parameters
----------
keys : (k, m, n, ...) array-like
要排序的 k 个键。最后一个键(例如,如果 keys 是二维数组,则为最后一行)
是主要的排序键。沿着零轴的每个 keys 的元素必须是相同形状的数组对象。
axis : int, optional
要间接排序的轴。默认情况下,对每个序列的最后一个轴进行排序。
独立地沿着 axis 切片排序;参见最后一个示例。
Returns
-------
indices : (m, n, ...) ndarray of ints
沿指定轴排序键的索引数组。
See Also
--------
argsort : 间接排序。
ndarray.sort : 原地排序。
sort : 返回数组的排序副本。
Examples
--------
按姓氏和名字排序。
>>> surnames = ('Hertz', 'Galilei', 'Hertz')
>>> first_names = ('Heinrich', 'Galileo', 'Gustav')
>>> ind = np.lexsort((first_names, surnames))
>>> ind
array([1, 2, 0])
>>> [surnames[i] + ", " + first_names[i] for i in ind]
['Galilei, Galileo', 'Hertz, Gustav', 'Hertz, Heinrich']
根据两个数字键排序,首先按照 ``a`` 的元素,然后按照 ``b`` 的元素进行排序。
>>> a = [1, 5, 1, 4, 3, 4, 4]
>>> b = [9, 4, 0, 4, 0, 2, 1]
>>> ind = np.lexsort((b, a))
>>> ind
array([2, 0, 4, 6, 5, 3, 1])
>>> [(a[i], b[i]) for i in ind]
[(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)]
与 `argsort` 进行比较,它会独立地对每个键进行排序。
>>> np.argsort((b, a), kind='stable')
array([[2, 4, 6, 5, 1, 3, 0],
[0, 2, 4, 3, 5, 6, 1]])
要使用 `argsort` 按字典顺序排序,我们需要提供一个结构化数组。
>>> x = np.array([(ai, bi) for ai, bi in zip(a, b)],
... dtype = np.dtype([('x', int), ('y', int)]))
>>> np.argsort(x)
array([2, 0, 4, 6, 5, 3, 1])
keys 的零轴始终对应于键序列,因此 2D 数组与其他键序列一样对待。
>>> arr = np.asarray([b, a])
>>> ind2 = np.lexsort(arr)
>>> np.testing.assert_equal(ind2, ind)
因此,`axis` 参数指的是每个键的轴,而不是 keys 参数本身的轴。
例如,数组 ``arr`` 被视为
"""
# 如果 keys 是一个元组,则直接返回 keys 自身
if isinstance(keys, tuple):
return keys
# 否则,将 keys 包装成一个元组并返回
else:
return (keys,)
# 从 _multiarray_umath.can_cast 导入 C 函数并创建 array_function 装饰器,使 can_cast 函数与它关联
@array_function_from_c_func_and_dispatcher(_multiarray_umath.can_cast)
# 定义函数 can_cast,用于确定是否可以按照给定的类型转换规则从 from_ 类型到 to 类型进行转换
def can_cast(from_, to, casting=None):
"""
can_cast(from_, to, casting='safe')
Returns True if cast between data types can occur according to the
casting rule.
Parameters
----------
from_ : dtype, dtype specifier, NumPy scalar, or array
Data type, NumPy scalar, or array to cast from.
to : dtype or dtype specifier
Data type to cast to.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
Returns
-------
out : bool
True if cast can occur according to the casting rule.
Notes
-----
.. versionchanged:: 1.17.0
Casting between a simple data type and a structured one is possible only
for "unsafe" casting. Casting to multiple fields is allowed, but
casting from multiple fields is not.
.. versionchanged:: 1.9.0
Casting from numeric to string types in 'safe' casting mode requires
that the string dtype length is long enough to store the maximum
integer/float value converted.
.. versionchanged:: 2.0
This function does not support Python scalars anymore and does not
apply any value-based logic for 0-D arrays and NumPy scalars.
See also
--------
dtype, result_type
Examples
--------
Basic examples
>>> np.can_cast(np.int32, np.int64)
True
>>> np.can_cast(np.float64, complex)
True
>>> np.can_cast(complex, float)
False
>>> np.can_cast('i8', 'f8')
True
>>> np.can_cast('i8', 'f4')
False
>>> np.can_cast('i4', 'S4')
False
"""
# 返回输入的 from_ 参数,这里仅作为示例,实际函数实现应该根据输入参数进行相应的类型转换判断逻辑
return (from_,)
# 从 _multiarray_umath.min_scalar_type 导入 C 函数并创建 array_function 装饰器,使 min_scalar_type 函数与它关联
@array_function_from_c_func_and_dispatcher(_multiarray_umath.min_scalar_type)
# 定义函数 min_scalar_type,用于确定标量值 a 的最小数据类型
def min_scalar_type(a):
"""
min_scalar_type(a, /)
For scalar ``a``, returns the data type with the smallest size
and smallest scalar kind which can hold its value. For non-scalar
array ``a``, returns the vector's dtype unmodified.
Floating point values are not demoted to integers,
and complex values are not demoted to floats.
Parameters
----------
a : scalar or array_like
The value whose minimal data type is to be found.
Returns
-------
out : dtype
The minimal data type.
Notes
-----
.. versionadded:: 1.6.0
See Also
--------
result_type, promote_types, dtype, can_cast
Examples
--------
>>> np.min_scalar_type(10)
dtype('uint8')
>>> np.min_scalar_type(-260)
dtype('int16')
"""
# 调用 NumPy 库中的函数 np.min_scalar_type,返回适合表示给定标量值的最小数据类型
>>> np.min_scalar_type(3.1)
dtype('float16')
# 调用 NumPy 库中的函数 np.min_scalar_type,返回适合表示给定标量值的最小数据类型
>>> np.min_scalar_type(1e50)
dtype('float64')
# 调用 NumPy 库中的函数 np.min_scalar_type,返回适合表示给定数组的最小数据类型
>>> np.min_scalar_type(np.arange(4, dtype='f8'))
dtype('float64')
"""
函数定义:接收参数 a,并返回以元组形式包含 a 的结果。
"""
return (a,)
# 使用 _multiarray_umath.result_type 函数和相关的调度器装饰器,定义了 result_type 函数
@array_function_from_c_func_and_dispatcher(_multiarray_umath.result_type)
def result_type(*arrays_and_dtypes):
"""
result_type(*arrays_and_dtypes)
Returns the type that results from applying the NumPy
type promotion rules to the arguments.
Type promotion in NumPy works similarly to the rules in languages
like C++, with some slight differences. When both scalars and
arrays are used, the array's type takes precedence and the actual value
of the scalar is taken into account.
For example, calculating 3*a, where a is an array of 32-bit floats,
intuitively should result in a 32-bit float output. If the 3 is a
32-bit integer, the NumPy rules indicate it can't convert losslessly
into a 32-bit float, so a 64-bit float should be the result type.
By examining the value of the constant, '3', we see that it fits in
an 8-bit integer, which can be cast losslessly into the 32-bit float.
Parameters
----------
arrays_and_dtypes : list of arrays and dtypes
The operands of some operation whose result type is needed.
Returns
-------
out : dtype
The result type.
See also
--------
dtype, promote_types, min_scalar_type, can_cast
Notes
-----
.. versionadded:: 1.6.0
The specific algorithm used is as follows.
Categories are determined by first checking which of boolean,
integer (int/uint), or floating point (float/complex) the maximum
kind of all the arrays and the scalars are.
If there are only scalars or the maximum category of the scalars
is higher than the maximum category of the arrays,
the data types are combined with :func:`promote_types`
to produce the return value.
Otherwise, `min_scalar_type` is called on each scalar, and
the resulting data types are all combined with :func:`promote_types`
to produce the return value.
The set of int values is not a subset of the uint values for types
with the same number of bits, something not reflected in
:func:`min_scalar_type`, but handled as a special case in `result_type`.
Examples
--------
>>> np.result_type(3, np.arange(7, dtype='i1'))
dtype('int8')
>>> np.result_type('i4', 'c8')
dtype('complex128')
>>> np.result_type(3.0, -2)
dtype('float64')
"""
# 直接返回输入参数的结果类型
return arrays_and_dtypes
# 使用 _multiarray_umath.dot 函数和相关的调度器装饰器,定义了 dot 函数
@array_function_from_c_func_and_dispatcher(_multiarray_umath.dot)
def dot(a, b, out=None):
"""
dot(a, b, out=None)
Dot product of two arrays. Specifically,
- If both `a` and `b` are 1-D arrays, it is inner product of vectors
(without complex conjugation).
- If both `a` and `b` are 2-D arrays, it is matrix multiplication,
but using :func:`matmul` or ``a @ b`` is preferred.
- If either `a` or `b` is 0-D (scalar), it is equivalent to
:func:`multiply` and using ``numpy.multiply(a, b)`` or ``a * b`` is
preferred.
"""
# 返回数组 a 和 b 的点积结果
pass # 这里是一个占位符,表示函数体暂未实现
# 计算两个数组的点积(dot product)。
# 如果 `a` 是 N 维数组,`b` 是 1 维数组,则是 `a` 的最后一个轴和 `b` 的点积。
# 如果 `a` 是 N 维数组,`b` 是 M 维数组(其中 M >= 2),则是 `a` 的最后一个轴和 `b` 的倒数第二个轴的点积。
# 使用优化的 BLAS 库(参见 `numpy.linalg`)进行计算。
def dot(a, b, out=None):
"""
Parameters
----------
a : array_like
第一个参数。
b : array_like
第二个参数。
out : ndarray, optional
输出参数。必须具有与 `dot(a, b)` 返回类型相同的确切类型,必须是 C 连续的,
其 dtype 必须是 `dot(a, b)` 返回的 dtype。这是一个性能特性。因此,如果不满足这些条件,
将引发异常,而不是尝试灵活处理。
Returns
-------
output : ndarray
返回 `a` 和 `b` 的点积。如果 `a` 和 `b` 都是标量或都是 1 维数组,则返回标量;否则返回数组。
如果给定了 `out`,则返回 `out`。
Raises
------
ValueError
如果 `a` 的最后一个维度大小与 `b` 的倒数第二个维度大小不同。
See Also
--------
vdot : 复共轭点积。
tensordot : 在任意轴上进行求和积。
einsum : Einstein 求和约定。
matmul : '@' 操作符作为带有 out 参数的方法。
linalg.multi_dot : 链式点积。
Examples
--------
>>> np.dot(3, 4)
12
两个参数都不是复共轭的情况:
>>> np.dot([2j, 3j], [2j, 3j])
(-13+0j)
对于二维数组,它是矩阵乘积:
>>> a = [[1, 0], [0, 1]]
>>> b = [[4, 1], [2, 2]]
>>> np.dot(a, b)
array([[4, 1],
[2, 2]])
>>> a = np.arange(3*4*5*6).reshape((3,4,5,6))
>>> b = np.arange(3*4*5*6)[::-1].reshape((5,4,6,3))
>>> np.dot(a, b)[2,3,2,1,2,2]
499128
>>> sum(a[2,3,2,:] * b[1,2,:,2])
499128
"""
return (a, b, out)
# 使用 `_multiarray_umath.vdot` 函数和分派器创建一个 `vdot` 函数,并装饰它
@array_function_from_c_func_and_dispatcher(_multiarray_umath.vdot)
def vdot(a, b):
"""
vdot(a, b, /)
Return the dot product of two vectors.
The vdot(`a`, `b`) function handles complex numbers differently than
dot(`a`, `b`). If the first argument is complex the complex conjugate
of the first argument is used for the calculation of the dot product.
Note that `vdot` handles multidimensional arrays differently than `dot`:
it does *not* perform a matrix product, but flattens input arguments
to 1-D vectors first. Consequently, it should only be used for vectors.
Parameters
----------
a : array_like
If `a` is complex the complex conjugate is taken before calculation
of the dot product.
b : array_like
Second argument to the dot product.
Returns
-------
output : ndarray
Dot product of `a` and `b`. Can be an int, float, or
complex depending on the types of `a` and `b`.
See Also
--------
dot : Return the dot product without using the complex conjugate of the
first argument.
Examples
--------
>>> a = np.array([1+2j,3+4j])
>>> b = np.array([5+6j,7+8j])
>>> np.vdot(a, b)
(70-8j)
>>> np.vdot(b, a)
(70+8j)
Note that higher-dimensional arrays are flattened!
>>> a = np.array([[1, 4], [5, 6]])
>>> b = np.array([[4, 1], [2, 2]])
>>> np.vdot(a, b)
30
>>> np.vdot(b, a)
30
>>> 1*4 + 4*1 + 5*2 + 6*2
30
"""
# 直接返回输入参数 a, b,实际上并未计算点积,只是为了示例目的
return (a, b)
# 使用 `_multiarray_umath.bincount` 函数和分派器创建一个 `bincount` 函数,并装饰它
@array_function_from_c_func_and_dispatcher(_multiarray_umath.bincount)
def bincount(x, weights=None, minlength=None):
"""
bincount(x, /, weights=None, minlength=0)
Count number of occurrences of each value in array of non-negative ints.
The number of bins (of size 1) is one larger than the largest value in
`x`. If `minlength` is specified, there will be at least this number
of bins in the output array (though it will be longer if necessary,
depending on the contents of `x`).
Each bin gives the number of occurrences of its index value in `x`.
If `weights` is specified the input array is weighted by it, i.e. if a
value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead
of ``out[n] += 1``.
Parameters
----------
x : array_like, 1 dimension, nonnegative ints
Input array.
weights : array_like, optional
Weights, array of the same shape as `x`.
minlength : int, optional
A minimum number of bins for the output array.
.. versionadded:: 1.6.0
Returns
-------
out : ndarray of ints
The result of binning the input array.
The length of `out` is equal to ``np.amax(x)+1``.
Raises
------
ValueError
If the input is not 1-dimensional, or contains elements with negative
values, or if `minlength` is negative.
TypeError
If the type of the input is float or complex.
See Also
"""
# 直接返回输入参数 x,实际上并未进行 binning 的计算,只是为了示例目的
return x
# 计算输入数组中每个整数值的出现次数,并返回结果数组
histogram, digitize, unique
# 示例1:计算从0到4的整数数组的频次
>>> np.bincount(np.arange(5))
array([1, 1, 1, 1, 1])
# 示例2:计算指定数组中每个整数值的频次
>>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7]))
array([1, 3, 1, 1, 0, 0, 0, 1])
# 示例3:验证结果数组的大小是否等于数组中最大值加一
>>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23])
>>> np.bincount(x).size == np.amax(x)+1
True
# 如果输入数组的数据类型不是整数,则会引发 TypeError
The input array needs to be of integer dtype, otherwise a
TypeError is raised:
>>> np.bincount(np.arange(5, dtype=float))
Traceback (most recent call last):
...
TypeError: Cannot cast array data from dtype('float64') to dtype('int64')
according to the rule 'safe'
# 使用 weights 关键字可以对数组的可变大小块执行加权求和
A possible use of ``bincount`` is to perform sums over
variable-size chunks of an array, using the ``weights`` keyword.
>>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights
>>> x = np.array([0, 1, 1, 2, 2, 2])
>>> np.bincount(x, weights=w)
array([ 0.3, 0.7, 1.1])
"""
return (x, weights)
# 将 _multiarray_umath.ravel_multi_index 函数从 C 函数和分发器转换为数组函数装饰器
@array_function_from_c_func_and_dispatcher(_multiarray_umath.ravel_multi_index)
def ravel_multi_index(multi_index, dims, mode=None, order=None):
"""
ravel_multi_index(multi_index, dims, mode='raise', order='C')
Converts a tuple of index arrays into an array of flat
indices, applying boundary modes to the multi-index.
Parameters
----------
multi_index : tuple of array_like
A tuple of integer arrays, one array for each dimension.
dims : tuple of ints
The shape of array into which the indices from ``multi_index`` apply.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices are handled. Can specify
either one mode or a tuple of modes, one mode per index.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
In 'clip' mode, a negative index which would normally
wrap will clip to 0 instead.
order : {'C', 'F'}, optional
Determines whether the multi-index should be viewed as
indexing in row-major (C-style) or column-major
(Fortran-style) order.
Returns
-------
raveled_indices : ndarray
An array of indices into the flattened version of an array
of dimensions ``dims``.
See Also
--------
unravel_index
Notes
-----
.. versionadded:: 1.6.0
Examples
--------
>>> arr = np.array([[3,6,6],[4,5,1]])
>>> np.ravel_multi_index(arr, (7,6))
array([22, 41, 37])
>>> np.ravel_multi_index(arr, (7,6), order='F')
array([31, 41, 13])
>>> np.ravel_multi_index(arr, (4,6), mode='clip')
array([22, 23, 19])
>>> np.ravel_multi_index(arr, (4,4), mode=('clip','wrap'))
array([12, 13, 13])
>>> np.ravel_multi_index((3,1,4,1), (6,7,8,9))
1621
"""
return multi_index
# 将 _multiarray_umath.unravel_index 函数从 C 函数和分发器转换为数组函数装饰器
@array_function_from_c_func_and_dispatcher(_multiarray_umath.unravel_index)
def unravel_index(indices, shape=None, order=None):
"""
unravel_index(indices, shape, order='C')
Converts a flat index or array of flat indices into a tuple
of coordinate arrays.
Parameters
----------
indices : array_like
An integer array whose elements are indices into the flattened
version of an array of dimensions ``shape``. Before version 1.6.0,
this function accepted just one index value.
shape : tuple of ints
The shape of the array to use for unraveling ``indices``.
.. versionchanged:: 1.16.0
Renamed from ``dims`` to ``shape``.
order : {'C', 'F'}, optional
Determines whether the indices should be viewed as indexing in
row-major (C-style) or column-major (Fortran-style) order.
.. versionadded:: 1.6.0
Returns
-------
unraveled_coords : tuple of ndarray
Each array in the tuple has the same shape as the ``indices``
array.
See Also
--------
ravel_multi_index
Examples
--------
>>> indices = np.array([22, 41, 37])
>>> shape = (7, 6)
>>> np.unravel_index(indices, shape)
(array([3, 6, 6]), array([4, 5, 1]))
>>> indices = 1621
>>> shape = (6, 7, 8, 9)
>>> np.unravel_index(indices, shape)
(3, 1, 4, 1)
"""
return indices
# 使用 numpy 库中的函数 unravel_index 来根据给定的索引和形状返回多维数组中的坐标。
# 示例1: 在形状为 (7, 6) 的二维数组中,根据一维索引 [22, 41, 37] 返回的坐标。
# 结果为 (array([3, 6, 6]), array([4, 5, 1])),分别对应三个索引在二维数组中的位置。
>>> np.unravel_index([22, 41, 37], (7, 6))
# 示例2: 在以列序优先 ('F') 排序的形状为 (7, 6) 的二维数组中,根据一维索引 [31, 41, 13] 返回的坐标。
# 结果同样为 (array([3, 6, 6]), array([4, 5, 1])),因为列序优先的顺序下索引的解析顺序不同但解析的坐标相同。
>>> np.unravel_index([31, 41, 13], (7, 6), order='F')
# 示例3: 在形状为 (6, 7, 8, 9) 的四维数组中,根据一维索引 1621 返回的坐标。
# 结果为 (3, 1, 4, 1),对应索引 1621 在四维数组中的位置。
>>> np.unravel_index(1621, (6, 7, 8, 9))
"""
返回一个包含示例返回值的元组,表示解析索引后的坐标。
"""
return (indices,)
# 使用装饰器将 copyto 函数与 C 函数 _multiarray_umath.copyto 绑定
@array_function_from_c_func_and_dispatcher(_multiarray_umath.copyto)
# 定义函数 copyto,用于将源数组 src 的值复制到目标数组 dst 中,支持必要的广播
def copyto(dst, src, casting=None, where=None):
"""
copyto(dst, src, casting='same_kind', where=True)
Copies values from one array to another, broadcasting as necessary.
Raises a TypeError if the `casting` rule is violated, and if
`where` is provided, it selects which elements to copy.
.. versionadded:: 1.7.0
Parameters
----------
dst : ndarray
The array into which values are copied.
src : array_like
The array from which values are copied.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur when copying.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
where : array_like of bool, optional
A boolean array which is broadcasted to match the dimensions
of `dst`, and selects elements to copy from `src` to `dst`
wherever it contains the value True.
Examples
--------
>>> A = np.array([4, 5, 6])
>>> B = [1, 2, 3]
>>> np.copyto(A, B)
>>> A
array([1, 2, 3])
>>> A = np.array([[1, 2, 3], [4, 5, 6]])
>>> B = [[4, 5, 6], [7, 8, 9]]
>>> np.copyto(A, B)
>>> A
array([[4, 5, 6],
[7, 8, 9]])
"""
return (dst, src, where)
# 使用装饰器将 putmask 函数与 C 函数 _multiarray_umath.putmask 绑定
@array_function_from_c_func_and_dispatcher(_multiarray_umath.putmask)
# 定义函数 putmask,根据条件和输入值改变数组元素
def putmask(a, /, mask, values):
"""
putmask(a, mask, values)
Changes elements of an array based on conditional and input values.
Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``.
If `values` is not the same size as `a` and `mask` then it will repeat.
This gives behavior different from ``a[mask] = values``.
Parameters
----------
a : ndarray
Target array.
mask : array_like
Boolean mask array. It has to be the same shape as `a`.
values : array_like
Values to put into `a` where `mask` is True. If `values` is smaller
than `a` it will be repeated.
See Also
--------
place, put, take, copyto
Examples
--------
>>> x = np.arange(6).reshape(2, 3)
>>> np.putmask(x, x>2, x**2)
>>> x
array([[ 0, 1, 2],
[ 9, 16, 25]])
If `values` is smaller than `a` it is repeated:
>>> x = np.arange(5)
>>> np.putmask(x, x>1, [-33, -44])
>>> x
array([ 0, 1, -33, -44, -33])
"""
return (a, mask, values)
# 使用装饰器将 packbits 函数与 C 函数 _multiarray_umath.packbits 绑定
@array_function_from_c_func_and_dispatcher(_multiarray_umath.packbits)
# 定义函数 packbits,将布尔数组转换为按位打包的数组
def packbits(a, axis=None, bitorder='big'):
"""
packbits(a, /, axis=None, bitorder='big')
"""
# 使用装饰器将 packbits 函数与 C 函数 _multiarray_umath.packbits 绑定
@array_function_from_c_func_and_dispatcher(_multiarray_umath.packbits)
# 定义函数 packbits,将布尔数组转换为按位打包的数组
def packbits(a, axis=None, bitorder='big'):
"""
packbits(a, /, axis=None, bitorder='big')
"""
# 将二进制值数组的元素打包成 uint8 数组中的位。
# 结果通过在末尾插入零位进行填充至完整字节。
# Parameters 参数
# ----------
# a : array_like
# 整数或布尔值数组,其元素应打包成位。
# axis : int, optional
# 进行位打包的维度。
# ``None`` 表示打包扁平化数组。
# bitorder : {'big', 'little'}, optional
# 输入位的顺序。'big' 将模拟 bin(val),
# ``[0, 0, 0, 0, 0, 0, 1, 1] => 3 = 0b00000011``,
# 'little' 将反转顺序,所以 ``[1, 1, 0, 0, 0, 0, 0, 0] => 3``。
# 默认为 'big'。
#
# .. versionadded:: 1.17.0
#
# Returns 返回值
# -------
# packed : ndarray
# uint8 类型的数组,其元素表示输入元素的逻辑(0 或非零)值对应的位。
# `packed` 的形状与输入的维度相同(除非 `axis` 是 None,在这种情况下输出是 1-D)。
#
# See Also 参见
# --------
# unpackbits: 将 uint8 数组的元素解包成二进制输出数组。
# Examples 示例
# --------
# >>> a = np.array([[[1,0,1],
# ... [0,1,0]],
# ... [[1,1,0],
# ... [0,0,1]]])
# >>> b = np.packbits(a, axis=-1)
# >>> b
# array([[[160],
# [ 64]],
# [[192],
# [ 32]]], dtype=uint8)
#
# 注意,在二进制中,160 = 1010 0000,64 = 0100 0000,192 = 1100 0000,
# 和 32 = 0010 0000。
def packbits(a, axis=None, bitorder='big'):
return (a,)
# 使用 C 函数和调度器创建一个 array_function,将其关联到 _multiarray_umath.unpackbits 函数
@array_function_from_c_func_and_dispatcher(_multiarray_umath.unpackbits)
def unpackbits(a, axis=None, count=None, bitorder='big'):
"""
unpackbits(a, /, axis=None, count=None, bitorder='big')
从 uint8 类型的数组 `a` 中解包元素到一个二进制数值的输出数组中。
每个 `a` 的元素表示一个应该被解包到一个二进制数值输出数组中的位字段。
输出数组的形状可以是 1-D(如果 `axis` 是 ``None``)或者与输入数组相同,
且沿指定轴进行解包。
Parameters
----------
a : ndarray, uint8 type
输入数组。
axis : int, optional
进行位解包的维度。
``None`` 意味着对扁平化的数组进行解包。
count : int or None, optional
沿着 `axis` 解包的元素数量,提供的目的是撤销尺寸不是八的倍数的打包效果。
非负数意味着只解包 `count` 位。
负数意味着从末尾裁剪掉这么多位。
``None`` 意味着解包整个数组(默认)。
比可用位数大的计数将会在输出中添加零填充。
负计数不能超过可用的位数。
.. versionadded:: 1.17.0
bitorder : {'big', 'little'}, optional
返回位的顺序。'big' 将模拟 bin(val),``3 = 0b00000011 => [0, 0, 0, 0, 0, 0, 1, 1]``,
'little' 将顺序反转为 ``[1, 1, 0, 0, 0, 0, 0, 0]``。
默认为 'big'。
.. versionadded:: 1.17.0
Returns
-------
unpacked : ndarray, uint8 type
元素是二进制值(0 或 1)。
See Also
--------
packbits : 将二进制值数组的元素打包到 uint8 数组中的位。
Examples
--------
>>> a = np.array([[2], [7], [23]], dtype=np.uint8)
>>> a
array([[ 2],
[ 7],
[23]], dtype=uint8)
>>> b = np.unpackbits(a, axis=1)
>>> b
array([[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 0, 1, 1, 1]], dtype=uint8)
>>> c = np.unpackbits(a, axis=1, count=-3)
>>> c
array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 1, 0]], dtype=uint8)
>>> p = np.packbits(b, axis=0)
>>> np.unpackbits(p, axis=0)
array([[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
>>> np.array_equal(b, np.unpackbits(p, axis=0, count=b.shape[0]))
True
"""
return (a,)
# 使用 C 函数和调度器创建一个 array_function,将其关联到 _multiarray_umath.shares_memory 函数
@array_function_from_c_func_and_dispatcher(_multiarray_umath.shares_memory)
def shares_memory(a, b, max_work=None):
"""
shares_memory(a, b, /, max_work=None)
# 检查两个数组是否共享内存空间的函数定义
Determine if two arrays share memory.
# 确定两个数组是否共享内存空间。
.. warning::
# 警告信息开始
This function can be exponentially slow for some inputs, unless
`max_work` is set to a finite number or ``MAY_SHARE_BOUNDS``.
If in doubt, use `numpy.may_share_memory` instead.
# 对于某些输入,该函数可能会呈指数级增长地慢,除非将 `max_work` 设置为有限的数值或 ``MAY_SHARE_BOUNDS``。
# 如果不确定,请使用 `numpy.may_share_memory` 替代。
Parameters
----------
a, b : ndarray
Input arrays
# 输入参数:a、b - 数组类型
max_work : int, optional
Effort to spend on solving the overlap problem (maximum number
of candidate solutions to consider). The following special
values are recognized:
# 可选参数:max_work - 解决重叠问题的尝试次数(考虑的候选解的最大数量)。
# 下列特殊值被识别:
max_work=MAY_SHARE_EXACT (default)
The problem is solved exactly. In this case, the function returns
True only if there is an element shared between the arrays. Finding
the exact solution may take extremely long in some cases.
# max_work=MAY_SHARE_EXACT (默认)
# 精确解决问题。在这种情况下,如果数组之间存在共享元素,则函数返回 True。在某些情况下,找到精确解可能需要非常长的时间。
max_work=MAY_SHARE_BOUNDS
Only the memory bounds of a and b are checked.
# max_work=MAY_SHARE_BOUNDS
# 只检查 a 和 b 的内存边界。
Raises
------
numpy.exceptions.TooHardError
Exceeded max_work.
# 抛出异常:numpy.exceptions.TooHardError - 超过了 max_work。
Returns
-------
out : bool
# 返回结果:布尔值
See Also
--------
may_share_memory
# 参见:may_share_memory 函数
Examples
--------
>>> x = np.array([1, 2, 3, 4])
>>> np.shares_memory(x, np.array([5, 6, 7]))
False
>>> np.shares_memory(x[::2], x)
True
>>> np.shares_memory(x[::2], x[1::2])
False
Checking whether two arrays share memory is NP-complete, and
runtime may increase exponentially in the number of
dimensions. Hence, `max_work` should generally be set to a finite
number, as it is possible to construct examples that take
extremely long to run:
# 检查两个数组是否共享内存是 NP 完全问题,且运行时间可能在维度数量上呈指数增长。
# 因此,`max_work` 通常应设置为有限的数值,因为可能构造需要运行极长时间的示例:
>>> from numpy.lib.stride_tricks import as_strided
>>> x = np.zeros([192163377], dtype=np.int8)
>>> x1 = as_strided(
... x, strides=(36674, 61119, 85569), shape=(1049, 1049, 1049))
>>> x2 = as_strided(
... x[64023025:], strides=(12223, 12224, 1), shape=(1049, 1049, 1))
>>> np.shares_memory(x1, x2, max_work=1000)
Traceback (most recent call last):
...
numpy.exceptions.TooHardError: Exceeded max_work
# 跟踪到最近的调用:numpy.exceptions.TooHardError - 超过了 max_work。
Running ``np.shares_memory(x1, x2)`` without `max_work` set takes
around 1 minute for this case. It is possible to find problems
that take still significantly longer.
# 在没有设置 `max_work` 的情况下运行 ``np.shares_memory(x1, x2)`` 大约需要 1 分钟。可能会找到需要更长时间的问题。
"""
return (a, b)
# 返回两个输入的元组作为结果
# 使用装饰器将 _multiarray_umath.may_share_memory 转换为 array_function,并使用其相关的分发器
@array_function_from_c_func_and_dispatcher(_multiarray_umath.may_share_memory)
# 定义函数 may_share_memory,用于判断两个数组是否可能共享内存
def may_share_memory(a, b, max_work=None):
"""
may_share_memory(a, b, /, max_work=None)
Determine if two arrays might share memory
A return of True does not necessarily mean that the two arrays
share any element. It just means that they *might*.
Only the memory bounds of a and b are checked by default.
Parameters
----------
a, b : ndarray
Input arrays
max_work : int, optional
Effort to spend on solving the overlap problem. See
`shares_memory` for details. Default for ``may_share_memory``
is to do a bounds check.
Returns
-------
out : bool
See Also
--------
shares_memory
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
>>> x = np.zeros([3, 4])
>>> np.may_share_memory(x[:,0], x[:,1])
True
"""
# 直接返回输入的数组 a 和 b,用于判断它们是否可能共享内存
return (a, b)
# 使用装饰器将 _multiarray_umath.is_busday 转换为 array_function,并使用其相关的分发器
@array_function_from_c_func_and_dispatcher(_multiarray_umath.is_busday)
# 定义函数 is_busday,用于计算给定日期是否是有效的工作日
def is_busday(dates, weekmask=None, holidays=None, busdaycal=None, out=None):
"""
is_busday(
dates,
weekmask='1111100',
holidays=None,
busdaycal=None,
out=None
)
Calculates which of the given dates are valid days, and which are not.
.. versionadded:: 1.7.0
Parameters
----------
dates : array_like of datetime64[D]
The array of dates to process.
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates. They may be
specified in any order, and NaT (not-a-time) dates are ignored.
This list is saved in a normalized form that is suited for
fast calculations of valid days.
busdaycal : busdaycalendar, optional
A `busdaycalendar` object which specifies the valid days. If this
parameter is provided, neither weekmask nor holidays may be
provided.
out : array of bool, optional
If provided, this array is filled with the result.
Returns
-------
out : array of bool
An array with the same shape as ``dates``, containing True for
each valid day, and False for each invalid day.
See Also
--------
busdaycalendar : An object that specifies a custom set of valid days.
busday_offset : Applies an offset counted in valid days.
busday_count : Counts how many valid days are in a half-open date range.
Examples
--------
"""
# 直接返回输入的日期数组 dates,用于判断每个日期是否是有效的工作日
return (dates, weekmask, holidays, busdaycal, out)
# 使用 numpy 库中的 is_busday 函数来判断给定日期是否是工作日
# dates: 要检查的日期列表
# weekmask: 定义工作日的掩码,例如 '1111100' 表示周一至周五为工作日
# holidays: 节假日列表,这些日期被视为非工作日
# out: 可选参数,用于指定输出结果的存储位置
return (dates, weekmask, holidays, out)
# 定义一个装饰器,将 C 函数和调度器包装成数组函数,功能与 _multiarray_umath.busday_offset 相同
@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_offset)
# busday_offset 函数定义,用于计算工作日偏移量
def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None,
busdaycal=None, out=None):
"""
busday_offset(
dates,
offsets,
roll='raise',
weekmask='1111100',
holidays=None,
busdaycal=None,
out=None
)
First adjusts the date to fall on a valid day according to
the ``roll`` rule, then applies offsets to the given dates
counted in valid days.
.. versionadded:: 1.7.0
Parameters
----------
dates : array_like of datetime64[D]
要处理的日期数组。
offsets : array_like of int
偏移量数组,与 dates 进行广播。
roll : {'raise', 'nat', 'forward', 'following', 'backward', 'preceding', \
'modifiedfollowing', 'modifiedpreceding'}, optional
处理不落在有效日期的日期的规则。默认为 'raise'。
* 'raise' 表示对无效日期抛出异常。
* 'nat' 表示对无效日期返回 NaT (not-a-time)。
* 'forward' 和 'following' 表示取稍后的第一个有效日期。
* 'backward' 和 'preceding' 表示取稍早的第一个有效日期。
* 'modifiedfollowing' 表示取稍后的第一个有效日期,除非跨越月边界,在这种情况下取稍早的第一个有效日期。
* 'modifiedpreceding' 表示取稍早的第一个有效日期,除非跨越月边界,在这种情况下取稍后的第一个有效日期。
weekmask : str or array_like of bool, optional
一个长度为七的数组,指示星期一到星期日哪些是有效日期。可以指定为长度为七的列表或数组,如 [1,1,1,1,1,0,0];
一个长度为七的字符串,如 '1111100';或一个字符串,如 "Mon Tue Wed Thu Fri",由星期几的三字母缩写组成,
可以用空格分隔。有效的缩写包括:Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
作为无效日期考虑的日期数组。可以以任何顺序指定,NaT (not-a-time) 日期将被忽略。
此列表以适合快速计算有效日期的规范化形式保存。
busdaycal : busdaycalendar, optional
指定有效日期的 busdaycalendar 对象。如果提供了此参数,则 weekmask 和 holidays 不能提供。
out : array of datetime64[D], optional
如果提供,将用结果填充的数组。
Returns
-------
out : array of datetime64[D]
通过广播 dates 和 offsets 得到的形状的数组,包含应用了偏移量的日期。
"""
"""
See Also
--------
busdaycalendar : An object that specifies a custom set of valid days.
定义了一组自定义有效日期的对象。
is_busday : Returns a boolean array indicating valid days.
返回一个布尔数组,指示哪些日期是有效的工作日。
busday_count : Counts how many valid days are in a half-open date range.
统计半开区间日期范围内的有效工作日数量。
Examples
--------
>>> # First business day in October 2011 (not accounting for holidays)
... np.busday_offset('2011-10', 0, roll='forward')
np.datetime64('2011-10-03')
>>> # Last business day in February 2012 (not accounting for holidays)
... np.busday_offset('2012-03', -1, roll='forward')
np.datetime64('2012-02-29')
>>> # Third Wednesday in January 2011
... np.busday_offset('2011-01', 2, roll='forward', weekmask='Wed')
np.datetime64('2011-01-19')
>>> # 2012 Mother's Day in Canada and the U.S.
... np.busday_offset('2012-05', 1, roll='forward', weekmask='Sun')
np.datetime64('2012-05-13')
>>>
... np.busday_offset('2011-03-20', 0, roll='forward')
np.datetime64('2011-03-21')
>>> np.busday_offset('2011-03-22', 0, roll='forward')
np.datetime64('2011-03-22')
>>>
... np.busday_offset('2011-03-20', 1, roll='backward')
np.datetime64('2011-03-21')
>>> np.busday_offset('2011-03-22', 1, roll='backward')
np.datetime64('2011-03-23')
"""
返回 (dates, offsets, weekmask, holidays, out)
# 从 C 函数和调度器生成的数组函数装饰器,将 C 函数 _multiarray_umath.busday_count 转换为数组函数
@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_count)
def busday_count(begindates, enddates, weekmask=None, holidays=None,
busdaycal=None, out=None):
"""
busday_count(
begindates,
enddates,
weekmask='1111100',
holidays=[],
busdaycal=None,
out=None
)
Counts the number of valid days between `begindates` and
`enddates`, not including the day of `enddates`.
If ``enddates`` specifies a date value that is earlier than the
corresponding ``begindates`` date value, the count will be negative.
.. versionadded:: 1.7.0
Parameters
----------
begindates : array_like of datetime64[D]
The array of the first dates for counting.
enddates : array_like of datetime64[D]
The array of the end dates for counting, which are excluded
from the count themselves.
weekmask : str or array_like of bool, optional
A seven-element array indicating which of Monday through Sunday are
valid days. May be specified as a length-seven list or array, like
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
weekdays, optionally separated by white space. Valid abbreviations
are: Mon Tue Wed Thu Fri Sat Sun
holidays : array_like of datetime64[D], optional
An array of dates to consider as invalid dates. They may be
specified in any order, and NaT (not-a-time) dates are ignored.
This list is saved in a normalized form that is suited for
fast calculations of valid days.
busdaycal : busdaycalendar, optional
A `busdaycalendar` object which specifies the valid days. If this
parameter is provided, neither weekmask nor holidays may be
provided.
out : array of int, optional
If provided, this array is filled with the result.
Returns
-------
out : array of int
An array with a shape from broadcasting ``begindates`` and ``enddates``
together, containing the number of valid days between
the begin and end dates.
See Also
--------
busdaycalendar : An object that specifies a custom set of valid days.
is_busday : Returns a boolean array indicating valid days.
busday_offset : Applies an offset counted in valid days.
Examples
--------
>>>
... np.busday_count('2011-01', '2011-02')
21
>>>
>>> np.busday_count('2011', '2012')
260
>>>
... np.busday_count('2011', '2012', weekmask='Sat')
53
"""
# 直接返回函数参数,用于展示函数签名和文档示例
return (begindates, enddates, weekmask, holidays, out)
# 从 C 函数和调度器生成的数组函数装饰器,将 C 函数 _multiarray_umath.datetime_as_string 转换为数组函数
@array_function_from_c_func_and_dispatcher(
_multiarray_umath.datetime_as_string)
def datetime_as_string(arr, unit=None, timezone=None, casting=None):
"""
"""
# 将数组中的日期时间转换为字符串表示形式的函数
def datetime_as_string(arr, unit=None, timezone='naive', casting='same_kind'):
# arr: datetime64 类型的数组,需要格式化的 UTC 时间戳数组
# unit: 字符串,表示日期时间的精度单位,可以是 None, 'auto',或者参考 datetime 单位的字符串
# timezone: 字符串 {'naive', 'UTC', 'local'} 或者 tzinfo 对象,指定显示日期时间时使用的时区信息
# 'naive' 表示没有指定时区,'UTC' 表示使用 UTC 时区并以 'Z' 结尾,'local' 表示转换为本地时区并带有 +-#### 的时区偏移量
# 如果是 tzinfo 对象,则与 'local' 类似,但使用指定的时区
# casting: 字符串 {'no', 'equiv', 'safe', 'same_kind', 'unsafe'},指定在不同日期时间单位之间转换时的类型转换规则
# 返回值
# str_arr: 与 arr 具有相同形状的字符串数组
return (arr,)
这段代码是一个函数定义,用于将给定的日期时间数组(以 numpy 的 datetime64 类型表示)转换为字符串数组。它支持多种参数配置,包括日期时间精度、时区信息和类型转换规则。
.\numpy\numpy\_core\multiarray.pyi
import builtins
import os
import datetime as dt
from collections.abc import Sequence, Callable, Iterable
from typing import (
Literal as L,
Any,
overload,
TypeVar,
SupportsIndex,
final,
Final,
Protocol,
ClassVar,
)
import numpy as np
from numpy import (
busdaycalendar as busdaycalendar,
broadcast as broadcast,
dtype as dtype,
ndarray as ndarray,
nditer as nditer,
ufunc,
str_,
uint8,
intp,
int_,
float64,
timedelta64,
datetime64,
generic,
unsignedinteger,
signedinteger,
floating,
complexfloating,
_OrderKACF,
_OrderCF,
_CastingKind,
_ModeKind,
_SupportsBuffer,
_IOProtocol,
_CopyMode,
_NDIterFlagsKind,
_NDIterOpFlagsKind,
)
from numpy._typing import (
_ShapeLike,
DTypeLike,
_DTypeLike,
NDArray,
ArrayLike,
_ArrayLike,
_SupportsArrayFunc,
_NestedSequence,
_ArrayLikeBool_co,
_ArrayLikeUInt_co,
_ArrayLikeInt_co,
_ArrayLikeFloat_co,
_ArrayLikeComplex_co,
_ArrayLikeTD64_co,
_ArrayLikeDT64_co,
_ArrayLikeObject_co,
_ArrayLikeStr_co,
_ArrayLikeBytes_co,
_ScalarLike_co,
_IntLike_co,
_FloatLike_co,
_TD64Like_co,
)
_T_co = TypeVar("_T_co", covariant=True)
_T_contra = TypeVar("_T_contra", contravariant=True)
_SCT = TypeVar("_SCT", bound=generic)
_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
_UnitKind = L[
"Y",
"M",
"D",
"h",
"m",
"s",
"ms",
"us", "μs",
"ns",
"ps",
"fs",
"as",
]
_RollKind = L[
"nat",
"forward",
"following",
"backward",
"preceding",
"modifiedfollowing",
"modifiedpreceding",
]
class _SupportsLenAndGetItem(Protocol[_T_contra, _T_co]):
def __len__(self) -> int: ...
def __getitem__(self, key: _T_contra, /) -> _T_co: ...
__all__: list[str]
ALLOW_THREADS: Final[int]
BUFSIZE: L[8192]
CLIP: L[0]
WRAP: L[1]
RAISE: L[2]
MAXDIMS: L[32]
MAY_SHARE_BOUNDS: L[0]
MAY_SHARE_EXACT: L[-1]
tracemalloc_domain: L[389047]
@overload
def empty_like(
prototype: _ArrayType,
dtype: None = ...,
order: _OrderKACF = ...,
subok: bool = ...,
shape: None | _ShapeLike = ...,
*,
device: None | L["cpu"] = ...,
) -> _ArrayType: ...
@overload
def empty_like(
prototype: _ArrayLike[_SCT],
dtype: None = ...,
order: _OrderKACF = ...,
subok: bool = ...,
shape: None | _ShapeLike = ...,
*,
device: None | L["cpu"] = ...,
) -> NDArray[_SCT]: ...
@overload
def empty_like(
prototype: object,
dtype: None = ...,
order: _OrderKACF = ...,
subok: bool = ...,
shape: None | _ShapeLike = ...,
*,
device: None | L["cpu"] = ...,
) -> NDArray[Any]: ...
@overload
def empty_like(
prototype: object,
dtype: None = ...,
order: _OrderKACF = ...,
subok: bool = ...,
shape: None | _ShapeLike = ...,
*,
device: None | L["cpu"] = ...,
) -> NDArray[Any]: ...
prototype: Any,
dtype: _DTypeLike[_SCT],
order: _OrderKACF = ...,
subok: bool = ...,
shape: None | _ShapeLike = ...,
*,
device: None | L["cpu"] = ...,
) -> NDArray[_SCT]: ...
@overload
def empty_like(
prototype: Any,
dtype: DTypeLike,
order: _OrderKACF = ...,
subok: bool = ...,
shape: None | _ShapeLike = ...,
*,
device: None | L["cpu"] = ...,
) -> NDArray[Any]: ...
@overload
def array(
object: _ArrayType,
dtype: None = ...,
*,
copy: None | bool | _CopyMode = ...,
order: _OrderKACF = ...,
subok: L[True],
ndmin: int = ...,
like: None | _SupportsArrayFunc = ...,
) -> _ArrayType: ...
@overload
def array(
object: _ArrayLike[_SCT],
dtype: None = ...,
*,
copy: None | bool | _CopyMode = ...,
order: _OrderKACF = ...,
subok: bool = ...,
ndmin: int = ...,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[_SCT]: ...
@overload
def array(
object: object,
dtype: None = ...,
*,
copy: None | bool | _CopyMode = ...,
order: _OrderKACF = ...,
subok: bool = ...,
ndmin: int = ...,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[Any]: ...
@overload
def array(
object: Any,
dtype: _DTypeLike[_SCT],
*,
copy: None | bool | _CopyMode = ...,
order: _OrderKACF = ...,
subok: bool = ...,
ndmin: int = ...,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[_SCT]: ...
@overload
def array(
object: Any,
dtype: DTypeLike,
*,
copy: None | bool | _CopyMode = ...,
order: _OrderKACF = ...,
subok: bool = ...,
ndmin: int = ...,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[Any]: ...
@overload
def zeros(
shape: _ShapeLike,
dtype: None = ...,
order: _OrderCF = ...,
*,
device: None | L["cpu"] = ...,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[float64]: ...
@overload
def zeros(
shape: _ShapeLike,
dtype: _DTypeLike[_SCT],
order: _OrderCF = ...,
*,
device: None | L["cpu"] = ...,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[_SCT]: ...
@overload
def zeros(
shape: _ShapeLike,
dtype: DTypeLike,
order: _OrderCF = ...,
*,
device: None | L["cpu"] = ...,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[Any]: ...
@overload
def empty(
shape: _ShapeLike,
dtype: None = ...,
order: _OrderCF = ...,
*,
device: None | L["cpu"] = ...,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[float64]: ...
@overload
def empty(
shape: _ShapeLike,
dtype: _DTypeLike[_SCT],
order: _OrderCF = ...,
*,
device: None | L["cpu"] = ...,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[_SCT]: ...
@overload
def empty(
shape: _ShapeLike,
dtype: DTypeLike,
order: _OrderCF = ...,
*,
device: None | L["cpu"] = ...,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[Any]: ...
@overload
def unravel_index(
indices: _IntLike_co,
shape: _ShapeLike,
order: _OrderCF = ...,
) -> tuple[intp, ...]: ...
@overload
def unravel_index(
indices: _ArrayLikeInt_co,
shape: _ShapeLike,
order: _OrderCF = ...,
def result_type(
*arrays_and_dtypes: ArrayLike | DTypeLike,
) -> dtype[Any]: ...
@overload
def dot(a: ArrayLike, b: ArrayLike, out: None = ...) -> Any: ...
@overload
def dot(a: ArrayLike, b: ArrayLike, out: _ArrayType) -> _ArrayType: ...
@overload
def where(
condition: ArrayLike,
/,
) -> tuple[NDArray[intp], ...]: ...
@overload
def where(
condition: ArrayLike,
x: ArrayLike,
y: ArrayLike,
/,
) -> NDArray[Any]: ...
def lexsort(
keys: ArrayLike,
axis: None | SupportsIndex = ...,
) -> Any: ...
def can_cast(
from_: ArrayLike | DTypeLike,
to: DTypeLike,
casting: None | _CastingKind = ...,
) -> bool: ...
def min_scalar_type(
a: ArrayLike, /,
) -> dtype[Any]: ...
def result_type(
*arrays_and_dtypes: ArrayLike | DTypeLike,
) -> dtype[Any]: ...
@overload
def vdot(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, /) -> np.bool: ...
@overload
def vdot(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co, /) -> unsignedinteger[Any]: ...
@overload
def vdot(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, /) -> signedinteger[Any]: ...
@overload
def vdot(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, /) -> floating[Any]: ...
@overload
def vdot(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, /) -> complexfloating[Any, Any]: ...
@overload
def vdot(a: _ArrayLikeTD64_co, b: _ArrayLikeTD64_co, /) -> timedelta64: ...
@overload
def vdot(a: _ArrayLikeObject_co, b: Any, /) -> Any: ...
@overload
def vdot(a: Any, b: _ArrayLikeObject_co, /) -> Any: ...
def bincount(
x: ArrayLike,
/,
weights: None | ArrayLike = ...,
minlength: SupportsIndex = ...,
) -> NDArray[intp]: ...
def copyto(
dst: NDArray[Any],
src: ArrayLike,
casting: None | _CastingKind = ...,
where: None | _ArrayLikeBool_co = ...,
) -> None: ...
def putmask(
a: NDArray[Any],
/,
mask: _ArrayLikeBool_co,
values: ArrayLike,
) -> None: ...
def packbits(
a: _ArrayLikeInt_co,
/,
axis: None | SupportsIndex = ...,
bitorder: L["big", "little"] = ...,
) -> NDArray[uint8]: ...
def unpackbits(
a: _ArrayLike[uint8],
/,
axis: None | SupportsIndex = ...,
count: None | SupportsIndex = ...,
bitorder: L["big", "little"] = ...,
) -> NDArray[uint8]: ...
def shares_memory(
a: object,
b: object,
/,
max_work: None | int = ...,
) -> bool: ...
def may_share_memory(
a: object,
b: object,
/,
max_work: None | int = ...,
) -> bool: ...
@overload
def asarray(
a: _ArrayLike[_SCT],
dtype: None = ...,
order: _OrderKACF = ...,
*,
device: None | L["cpu"] = ...,
copy: None | bool = ...,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[_SCT]: ...
@overload
def asarray(
a: object,
dtype: None = ...,
order: _OrderKACF = ...,
*,
device: None | L["cpu"] = ...,
copy: None | bool = ...,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[Any]: ...
@overload
def asarray(
a: Any,
dtype: _DTypeLike[_SCT],
order: _OrderKACF = ...,
*,
device: None | L["cpu"] = ...,
copy: None | bool = ...,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[_SCT]: ...
@overload
def asarray(
a: Any,
dtype: DTypeLike,
order: _OrderKACF = ...,
*,
device: None | L["cpu"] = ...,
copy: None | bool = ...,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[Any]: ...
@overload
def asanyarray(
a: _ArrayType,
dtype: None = ...,
order: _OrderKACF = ...,
*,
device: None | L["cpu"] = ...,
copy: None | bool = ...,
like: None | _SupportsArrayFunc = ...,
) -> _ArrayType: ...
@overload
def asanyarray(
a: _ArrayLike[_SCT],
dtype: None = ...,
order: _OrderKACF = ...,
*,
device: None | L["cpu"] = ...,
copy: None | bool = ...,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[_SCT]: ...
@overload
def asanyarray(
a: object,
dtype: None = ...,
order: _OrderKACF = ...,
*,
device: None | L["cpu"] = ...,
copy: None | bool = ...,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[Any]: ...
like: None | _SupportsArrayFunc = ...,
@overload
def asanyarray(
a: Any,
dtype: _DTypeLike[_SCT],
order: _OrderKACF = ...,
*,
device: None | L["cpu"] = ...,
copy: None | bool = ...,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[_SCT]: ...
@overload
def asanyarray(
a: Any,
dtype: DTypeLike,
order: _OrderKACF = ...,
*,
device: None | L["cpu"] = ...,
copy: None | bool = ...,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[Any]: ...
@overload
def ascontiguousarray(
a: _ArrayLike[_SCT],
dtype: None = ...,
*,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[_SCT]: ...
@overload
def ascontiguousarray(
a: object,
dtype: None = ...,
*,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[Any]: ...
@overload
def ascontiguousarray(
a: Any,
dtype: _DTypeLike[_SCT],
*,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[_SCT]: ...
@overload
def ascontiguousarray(
a: Any,
dtype: DTypeLike,
*,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[Any]: ...
@overload
def asfortranarray(
a: _ArrayLike[_SCT],
dtype: None = ...,
*,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[_SCT]: ...
@overload
def asfortranarray(
a: object,
dtype: None = ...,
*,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[Any]: ...
@overload
def asfortranarray(
a: Any,
dtype: _DTypeLike[_SCT],
*,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[_SCT]: ...
@overload
def asfortranarray(
a: Any,
dtype: DTypeLike,
*,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[Any]: ...
def promote_types(__type1: DTypeLike, __type2: DTypeLike) -> dtype[Any]: ...
@overload
def fromstring(
string: str | bytes,
dtype: None = ...,
count: SupportsIndex = ...,
*,
sep: str,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[float64]: ...
@overload
def fromstring(
string: str | bytes,
dtype: _DTypeLike[_SCT],
count: SupportsIndex = ...,
*,
sep: str,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[_SCT]: ...
@overload
def fromstring(
string: str | bytes,
dtype: DTypeLike,
count: SupportsIndex = ...,
*,
sep: str,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[Any]: ...
def frompyfunc(
func: Callable[..., Any], /,
nin: SupportsIndex,
nout: SupportsIndex,
*,
identity: Any = ...,
) -> ufunc: ...
@overload
def fromfile(
file: str | bytes | os.PathLike[Any] | _IOProtocol,
dtype: None = ...,
count: SupportsIndex = ...,
sep: str = ...,
offset: SupportsIndex = ...,
*,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[float64]: ...
@overload
def fromfile(
file: str | bytes | os.PathLike[Any] | _IOProtocol,
dtype: _DTypeLike[_SCT],
count: SupportsIndex = ...,
sep: str = ...,
offset: SupportsIndex = ...,
*,
like: None | _SupportsArrayFunc = ...,
@overload
def fromfile(
file: str | bytes | os.PathLike[Any] | _IOProtocol,
dtype: DTypeLike,
count: SupportsIndex = ...,
sep: str = ...,
offset: SupportsIndex = ...,
*,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[Any]: ...
@overload
def fromiter(
iter: Iterable[Any],
dtype: _DTypeLike[_SCT],
count: SupportsIndex = ...,
*,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[_SCT]: ...
@overload
def fromiter(
iter: Iterable[Any],
dtype: DTypeLike,
count: SupportsIndex = ...,
*,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[Any]: ...
@overload
def frombuffer(
buffer: _SupportsBuffer,
dtype: None = ...,
count: SupportsIndex = ...,
offset: SupportsIndex = ...,
*,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[float64]: ...
@overload
def frombuffer(
buffer: _SupportsBuffer,
dtype: _DTypeLike[_SCT],
count: SupportsIndex = ...,
offset: SupportsIndex = ...,
*,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[_SCT]: ...
@overload
def frombuffer(
buffer: _SupportsBuffer,
dtype: DTypeLike,
count: SupportsIndex = ...,
offset: SupportsIndex = ...,
*,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[Any]: ...
@overload
def arange(
stop: _IntLike_co,
/, *,
dtype: None = ...,
device: None | L["cpu"] = ...,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[signedinteger[Any]]: ...
@overload
def arange(
start: _IntLike_co,
stop: _IntLike_co,
step: _IntLike_co = ...,
dtype: None = ...,
*,
device: None | L["cpu"] = ...,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[signedinteger[Any]]: ...
@overload
def arange(
stop: _FloatLike_co,
/, *,
dtype: None = ...,
device: None | L["cpu"] = ...,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[floating[Any]]: ...
@overload
def arange(
start: _FloatLike_co,
stop: _FloatLike_co,
step: _FloatLike_co = ...,
dtype: None = ...,
*,
device: None | L["cpu"] = ...,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[floating[Any]]: ...
@overload
def arange(
stop: _TD64Like_co,
/, *,
dtype: None = ...,
device: None | L["cpu"] = ...,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[timedelta64]: ...
@overload
def arange(
start: _TD64Like_co,
stop: _TD64Like_co,
step: _TD64Like_co = ...,
dtype: None = ...,
*,
device: None | L["cpu"] = ...,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[timedelta64]: ...
@overload
def arange(
start: datetime64,
stop: datetime64,
step: datetime64 = ...,
dtype: None = ...,
*,
device: None | L["cpu"] = ...,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[datetime64]: ...
@overload
def arange(
stop: Any,
/, *,
dtype: _DTypeLike[_SCT],
device: None | L["cpu"] = ...,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[_SCT]: ...
@overload
def arange(
start: Any,
stop: Any,
step: Any = ...,
dtype: _DTypeLike[_SCT] = ...,
*,
device: None | L["cpu"] = ...,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[_SCT]: ...
@overload
def arange(
stop: Any, /,
*,
dtype: DTypeLike,
device: None | L["cpu"] = ...,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[Any]: ...
@overload
def arange(
start: Any,
stop: Any,
step: Any = ...,
dtype: DTypeLike = ...,
*,
device: None | L["cpu"] = ...,
like: None | _SupportsArrayFunc = ...,
) -> NDArray[Any]: ...
def datetime_data(
dtype: str | _DTypeLike[datetime64] | _DTypeLike[timedelta64], /,
) -> tuple[str, int]: ...
@overload
def busday_count(
begindates: _ScalarLike_co | dt.date,
enddates: _ScalarLike_co | dt.date,
weekmask: ArrayLike = ...,
holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
busdaycal: None | busdaycalendar = ...,
out: None = ...,
) -> int_: ...
@overload
def busday_count(
begindates: ArrayLike | dt.date | _NestedSequence[dt.date],
enddates: ArrayLike | dt.date | _NestedSequence[dt.date],
weekmask: ArrayLike = ...,
holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
busdaycal: None | busdaycalendar = ...,
out: None = ...,
) -> NDArray[int_]: ...
@overload
def busday_count(
begindates: ArrayLike | dt.date | _NestedSequence[dt.date],
enddates: ArrayLike | dt.date | _NestedSequence[dt.date],
weekmask: ArrayLike = ...,
holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
busdaycal: None | busdaycalendar = ...,
out: _ArrayType = ...,
) -> _ArrayType: ...
@overload
def busday_offset(
dates: datetime64 | dt.date,
offsets: _TD64Like_co | dt.timedelta,
roll: L["raise"] = ...,
weekmask: ArrayLike = ...,
holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
busdaycal: None | busdaycalendar = ...,
out: None = ...,
) -> datetime64: ...
@overload
def busday_offset(
dates: _ArrayLike[datetime64] | dt.date | _NestedSequence[dt.date],
offsets: _ArrayLikeTD64_co | dt.timedelta | _NestedSequence[dt.timedelta],
roll: L["raise"] = ...,
weekmask: ArrayLike = ...,
holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
busdaycal: None | busdaycalendar = ...,
out: None = ...,
) -> NDArray[datetime64]: ...
@overload
def busday_offset(
dates: _ArrayLike[datetime64] | dt.date | _NestedSequence[dt.date],
offsets: _ArrayLikeTD64_co | dt.timedelta | _NestedSequence[dt.timedelta],
roll: L["raise"] = ...,
weekmask: ArrayLike = ...,
holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
busdaycal: None | busdaycalendar = ...,
out: _ArrayType = ...,
@overload
def busday_offset(
dates: _ScalarLike_co | dt.date,
offsets: _ScalarLike_co | dt.timedelta,
roll: _RollKind,
weekmask: ArrayLike = ...,
holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
busdaycal: None | busdaycalendar = ...,
out: None = ...,
) -> datetime64:
...
@overload
def busday_offset(
dates: ArrayLike | dt.date | _NestedSequence[dt.date],
offsets: ArrayLike | dt.timedelta | _NestedSequence[dt.timedelta],
roll: _RollKind,
weekmask: ArrayLike = ...,
holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
busdaycal: None | busdaycalendar = ...,
out: None = ...,
) -> NDArray[datetime64]:
...
@overload
def busday_offset(
dates: ArrayLike | dt.date | _NestedSequence[dt.date],
offsets: ArrayLike | dt.timedelta | _NestedSequence[dt.timedelta],
roll: _RollKind,
weekmask: ArrayLike = ...,
holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
busdaycal: None | busdaycalendar = ...,
out: _ArrayType = ...,
) -> _ArrayType:
...
@overload
def is_busday(
dates: _ScalarLike_co | dt.date,
weekmask: ArrayLike = ...,
holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
busdaycal: None | busdaycalendar = ...,
out: None = ...,
) -> np.bool:
...
@overload
def is_busday(
dates: ArrayLike | _NestedSequence[dt.date],
weekmask: ArrayLike = ...,
holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
busdaycal: None | busdaycalendar = ...,
out: None = ...,
) -> NDArray[np.bool]:
...
@overload
def is_busday(
dates: ArrayLike | _NestedSequence[dt.date],
weekmask: ArrayLike = ...,
holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
busdaycal: None | busdaycalendar = ...,
out: _ArrayType = ...,
) -> _ArrayType:
...
@overload
def datetime_as_string(
arr: datetime64 | dt.date,
unit: None | L["auto"] | _UnitKind = ...,
timezone: L["naive", "UTC", "local"] | dt.tzinfo = ...,
casting: _CastingKind = ...,
) -> str_:
...
@overload
def datetime_as_string(
arr: _ArrayLikeDT64_co | _NestedSequence[dt.date],
unit: None | L["auto"] | _UnitKind = ...,
timezone: L["naive", "UTC", "local"] | dt.tzinfo = ...,
casting: _CastingKind = ...,
) -> NDArray[str_]:
...
@overload
def compare_chararrays(
a1: _ArrayLikeStr_co,
a2: _ArrayLikeStr_co,
cmp: L["<", "<=", "==", ">=", ">", "!="],
rstrip: bool,
) -> NDArray[np.bool]:
...
@overload
def compare_chararrays(
a1: _ArrayLikeBytes_co,
a2: _ArrayLikeBytes_co,
cmp: L["<", "<=", "==", ">=", ">", "!="],
rstrip: bool,
) -> NDArray[np.bool]:
...
def add_docstring(obj: Callable[..., Any], docstring: str, /) ->
_SetItemKeys = [
"A", "ALIGNED",
"W", "WRITEABLE",
"X", "WRITEBACKIFCOPY",
]
@final
class flagsobj:
__hash__: ClassVar[None]
aligned: bool
writeable: bool
writebackifcopy: bool
@property
def behaved(self) -> bool: ...
@property
def c_contiguous(self) -> bool: ...
@property
def carray(self) -> bool: ...
@property
def contiguous(self) -> bool: ...
@property
def f_contiguous(self) -> bool: ...
@property
def farray(self) -> bool: ...
@property
def fnc(self) -> bool: ...
@property
def forc(self) -> bool: ...
@property
def fortran(self) -> bool: ...
@property
def num(self) -> int: ...
@property
def owndata(self) -> bool: ...
def __getitem__(self, key: _GetItemKeys) -> bool: ...
def __setitem__(self, key: _SetItemKeys, value: bool) -> None: ...
def nested_iters(
op: ArrayLike | Sequence[ArrayLike],
axes: Sequence[Sequence[SupportsIndex]],
flags: None | Sequence[_NDIterFlagsKind] = ...,
op_flags: None | Sequence[Sequence[_NDIterOpFlagsKind]] = ...,
op_dtypes: DTypeLike | Sequence[DTypeLike] = ...,
order: _OrderKACF = ...,
casting: _CastingKind = ...,
buffersize: SupportsIndex = ...,
) -> tuple[nditer, ...]: ...