NumPy 源码解析(四十)
.\numpy\numpy\random\_generator.pyx
import operator
import warnings
from collections.abc import Sequence
from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer
from cpython cimport (Py_INCREF, PyFloat_AsDouble)
from cpython.mem cimport PyMem_Malloc, PyMem_Free
cimport cython
import numpy as np
cimport numpy as np
from numpy.lib.array_utils import normalize_axis_index
from .c_distributions cimport *
from libc cimport string
from libc.math cimport sqrt
from libc.stdint cimport (uint8_t, uint16_t, uint32_t, uint64_t,
int32_t, int64_t, INT64_MAX, SIZE_MAX)
from ._bounded_integers cimport (_rand_bool, _rand_int32, _rand_int64,
_rand_int16, _rand_int8, _rand_uint64, _rand_uint32, _rand_uint16,
_rand_uint8, _gen_mask)
from ._pcg64 import PCG64
from numpy.random cimport bitgen_t
from ._common cimport (POISSON_LAM_MAX, CONS_POSITIVE, CONS_NONE,
CONS_NON_NEGATIVE, CONS_BOUNDED_0_1, CONS_BOUNDED_GT_0_1,
CONS_BOUNDED_LT_0_1, CONS_GT_1, CONS_POSITIVE_NOT_NAN, CONS_POISSON,
double_fill, cont, kahan_sum, cont_broadcast_3, float_fill, cont_f,
check_array_constraint, check_constraint, disc, discrete_broadcast_iii,
validate_output_shape
)
cdef extern from "numpy/arrayobject.h":
int PyArray_ResolveWritebackIfCopy(np.ndarray)
int PyArray_FailUnlessWriteable(np.PyArrayObject *obj,
const char *name) except -1
object PyArray_FromArray(np.PyArrayObject *, np.PyArray_Descr *, int)
enum:
NPY_ARRAY_WRITEBACKIFCOPY
np.import_array()
cdef int64_t _safe_sum_nonneg_int64(size_t num_colors, int64_t *colors):
"""
Sum the values in the array `colors`.
Return -1 if an overflow occurs.
The values in *colors are assumed to be nonnegative.
"""
cdef size_t i
cdef int64_t sum
sum = 0
for i in range(num_colors):
if colors[i] > INT64_MAX - sum:
return -1
sum += colors[i]
return sum
cdef inline void _shuffle_raw_wrap(bitgen_t *bitgen, np.npy_intp n,
np.npy_intp first, np.npy_intp itemsize,
np.npy_intp stride,
char* data, char* buf) noexcept nogil:
if itemsize == sizeof(np.npy_intp):
_shuffle_raw(bitgen, n, first, sizeof(np.npy_intp), stride, data, buf)
else:
_shuffle_raw(bitgen, n, first, itemsize, stride, data, buf)
cdef inline void _shuffle_raw(bitgen_t *bitgen, np.npy_intp n,
np.npy_intp first, np.npy_intp itemsize,
np.npy_intp stride,
char* data, char* buf) noexcept nogil:
"""
Parameters
----------
bitgen
指向 bitgen_t 实例的指针。
n
data 中的元素数量。
first
首个要进行洗牌的观察结果。洗牌 n-1, n-2, ..., first,当 first=1 时整个数组被洗牌。
itemsize
每个项的字节大小。
stride
数组的步长。
data
数据的位置。
buf
缓冲区的位置 (itemsize)。
"""
cdef np.npy_intp i, j
for i in reversed(range(first, n)):
j = random_interval(bitgen, i)
string.memcpy(buf, data + j * stride, itemsize)
string.memcpy(data + j * stride, data + i * stride, itemsize)
string.memcpy(data + i * stride, buf, itemsize)
cdef inline void _shuffle_int(bitgen_t *bitgen, np.npy_intp n,
np.npy_intp first, int64_t* data) noexcept nogil:
"""
Parameters
----------
bitgen
指向 bitgen_t 实例的指针。
n
data 中的元素数量。
first
首个要进行洗牌的观察结果。洗牌 n-1, n-2, ..., first,当 first=1 时整个数组被洗牌。
data
数据的位置。
"""
cdef np.npy_intp i, j
cdef int64_t temp
for i in reversed(range(first, n)):
j = random_bounded_uint64(bitgen, 0, i, 0, 0)
temp = data[j]
data[j] = data[i]
data[i] = temp
cdef bint _check_bit_generator(object bitgen):
"""检查对象是否符合 BitGenerator 接口。"""
if not hasattr(bitgen, "capsule"):
return False
cdef const char *name = "BitGenerator"
return PyCapsule_IsValid(bitgen.capsule, name)
cdef class Generator:
"""
Generator(bit_generator)
`Generator` 是 BitGenerators 的容器。
`Generator` 提供了从各种概率分布生成随机数的多种方法。除了特定于分布的参数外,每个方法还接受一个关键字参数 `size`,默认为 `None`。如果 `size` 是 `None`,则生成并返回单个值。如果 `size` 是整数,则返回填充有生成值的一维数组。如果 `size` 是元组,则返回填充有相应形状的数组。
函数 :func:`numpy.random.default_rng` 将使用 numpy 默认的 `BitGenerator` 实例化一个 `Generator`。
**不保证兼容性**
`Generator` 不提供版本兼容性的保证。特别是随着更好的算法的发展,位流可能会发生变化。
Parameters
----------
bit_generator : BitGenerator
作为核心生成器使用的 BitGenerator 实例。
Notes
-----
"""
"""
The Python stdlib module `random` contains pseudo-random number generator
with a number of methods that are similar to the ones available in
`Generator`. It uses Mersenne Twister, and this bit generator can
be accessed using `MT19937`. `Generator`, besides being
NumPy-aware, has the advantage that it provides a much larger number
of probability distributions to choose from.
Examples
--------
>>> from numpy.random import Generator, PCG64
>>> rng = Generator(PCG64())
>>> rng.standard_normal()
-0.203 # random
See Also
--------
default_rng : Recommended constructor for `Generator`.
"""
cdef public object _bit_generator
cdef bitgen_t _bitgen
cdef binomial_t _binomial
cdef object lock
_poisson_lam_max = POISSON_LAM_MAX
def __init__(self, bit_generator):
self._bit_generator = bit_generator
capsule = bit_generator.capsule
cdef const char *name = "BitGenerator"
if not PyCapsule_IsValid(capsule, name):
raise ValueError("Invalid bit generator. The bit generator must "
"be instantiated.")
self._bitgen = (<bitgen_t *> PyCapsule_GetPointer(capsule, name))[0]
self.lock = bit_generator.lock
def __repr__(self):
return self.__str__() + ' at 0x{:X}'.format(id(self))
def __str__(self):
_str = self.__class__.__name__
_str += '(' + self.bit_generator.__class__.__name__ + ')'
return _str
def __getstate__(self):
return None
def __setstate__(self, bit_gen):
if isinstance(bit_gen, dict):
self.bit_generator.state = bit_gen
def __reduce__(self):
from ._pickle import __generator_ctor
return __generator_ctor, (self._bit_generator, ), None
@property
def bit_generator(self):
"""
Gets the bit generator instance used by the generator
Returns
-------
bit_generator : BitGenerator
The bit generator instance used by the generator
"""
return self._bit_generator
def spawn(self, int n_children):
"""
spawn(n_children)
Create new independent child generators.
See :ref:`seedsequence-spawn` for additional notes on spawning
children.
.. versionadded:: 1.25.0
Parameters
----------
n_children : int
Number of child generators to create.
Returns
-------
child_generators : list of Generators
List containing newly spawned child generator objects.
Raises
------
TypeError
Raised when the underlying SeedSequence does not support spawning.
See Also
--------
random.BitGenerator.spawn, random.SeedSequence.spawn :
Equivalent method on the bit generator and seed sequence.
bit_generator :
The bit generator instance used by the generator.
Examples
--------
Starting from a seeded default generator:
>>> # High quality entropy created with: f"0x{secrets.randbits(128):x}"
>>> entropy = 0x3034c61a9ae04ff8cb62ab8ec2c4b501
>>> rng = np.random.default_rng(entropy)
Create two new generators for example for parallel execution:
>>> child_rng1, child_rng2 = rng.spawn(2)
Drawn numbers from each are independent but derived from the initial
seeding entropy:
>>> rng.uniform(), child_rng1.uniform(), child_rng2.uniform()
(0.19029263503854454, 0.9475673279178444, 0.4702687338396767)
It is safe to spawn additional children from the original ``rng`` or
the children:
>>> more_child_rngs = rng.spawn(20)
>>> nested_spawn = child_rng1.spawn(20)
"""
return [type(self)(g) for g in self._bit_generator.spawn(n_children)]
def random(self, size=None, dtype=np.float64, out=None):
"""
random(size=None, dtype=np.float64, out=None)
Return random floats in the half-open interval [0.0, 1.0).
Results are from the "continuous uniform" distribution over the
stated interval. To sample :math:`Unif[a, b), b > a` use `uniform`
or multiply the output of `random` by ``(b - a)`` and add ``a``::
(b - a) * random() + a
Parameters
----------
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
dtype : dtype, optional
Desired dtype of the result, only `float64` and `float32` are supported.
Byteorder must be native. The default value is np.float64.
out : ndarray, optional
Alternative output array in which to place the result. If size is not None,
it must have the same shape as the provided size and must match the type of
the output values.
Returns
-------
out : float or ndarray of floats
Array of random floats of shape `size` (unless ``size=None``, in which
case a single float is returned).
See Also
--------
uniform : Draw samples from the parameterized uniform distribution.
Examples
--------
>>> rng = np.random.default_rng()
>>> rng.random()
0.47108547995356098 # random
>>> type(rng.random())
<class 'float'>
>>> rng.random((5,))
array([ 0.30220482, 0.86820401, 0.1654503 , 0.11659149, 0.54323428]) # random
Three-by-two array of random numbers from [-5, 0):
>>> 5 * rng.random((3, 2)) - 5
array([[-3.99149989, -0.52338984], # random
[-2.99091858, -0.79479508],
[-1.23204345, -1.75224494]])
"""
cdef double temp
_dtype = np.dtype(dtype)
if _dtype == np.float64:
return double_fill(&random_standard_uniform_fill, &self._bitgen, size, self.lock, out)
elif _dtype == np.float32:
return float_fill(&random_standard_uniform_fill_f, &self._bitgen, size, self.lock, out)
else:
raise TypeError('Unsupported dtype %r for random' % _dtype)
def standard_exponential(self, size=None, dtype=np.float64, method='zig', out=None):
"""
standard_exponential(size=None, dtype=np.float64, method='zig', out=None)
Draw samples from the standard exponential distribution.
`standard_exponential` is identical to the exponential distribution
with a scale parameter of 1.
Parameters
----------
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
dtype : dtype, optional
Desired dtype of the result, only `float64` and `float32` are supported.
Byteorder must be native. The default value is np.float64.
method : str, optional
Either 'inv' or 'zig'. 'inv' uses the default inverse CDF method.
'zig' uses the much faster Ziggurat method of Marsaglia and Tsang.
out : ndarray, optional
Alternative output array in which to place the result. If size is not None,
it must have the same shape as the provided size and must match the type of
the output values.
Returns
-------
out : float or ndarray
Drawn samples.
Examples
--------
Output a 3x8000 array:
>>> rng = np.random.default_rng()
>>> n = rng.standard_exponential((3, 8000))
"""
_dtype = np.dtype(dtype)
if _dtype == np.float64:
if method == 'zig':
return double_fill(&random_standard_exponential_fill, &self._bitgen, size, self.lock, out)
else:
return double_fill(&random_standard_exponential_inv_fill, &self._bitgen, size, self.lock, out)
elif _dtype == np.float32:
if method == 'zig':
return float_fill(&random_standard_exponential_fill_f, &self._bitgen, size, self.lock, out)
else:
return float_fill(&random_standard_exponential_inv_fill_f, &self._bitgen, size, self.lock, out)
else:
raise TypeError('Unsupported dtype %r for standard_exponential'
% _dtype)
def bytes(self, np.npy_intp length):
"""
bytes(length)
Return random bytes.
Parameters
----------
length : int
Number of random bytes.
Returns
-------
out : bytes
String of length `length`.
Notes
-----
This function generates random bytes from a discrete uniform
distribution. The generated bytes are independent from the CPU's
native endianness.
Examples
--------
>>> rng = np.random.default_rng()
>>> rng.bytes(10)
b'\\xfeC\\x9b\\x86\\x17\\xf2\\xa1\\xafcp' # random
"""
cdef Py_ssize_t n_uint32 = ((length - 1) // 4 + 1)
return self.integers(0, 4294967296, size=n_uint32,
dtype=np.uint32).astype('<u4').tobytes()[:length]
@cython.wraparound(True)
def standard_normal(self, size=None, dtype=np.float64, out=None):
"""
standard_normal(size=None, dtype=np.float64, out=None)
Draw samples from a standard Normal distribution (mean=0, stdev=1).
Parameters
----------
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
dtype : dtype, optional
Desired dtype of the result, only `float64` and `float32` are supported.
Byteorder must be native. The default value is np.float64.
out : ndarray, optional
Alternative output array in which to place the result. If size is not None,
it must have the same shape as the provided size and must match the type of
the output values.
Returns
-------
out : float or ndarray
A floating-point array of shape ``size`` of drawn samples, or a
single sample if ``size`` was not specified.
See Also
--------
normal :
Equivalent function with additional ``loc`` and ``scale`` arguments
for setting the mean and standard deviation.
Notes
-----
For random samples from the normal distribution with mean ``mu`` and
standard deviation ``sigma``, use one of::
mu + sigma * rng.standard_normal(size=...)
rng.normal(mu, sigma, size=...)
Examples
--------
>>> rng = np.random.default_rng()
>>> rng.standard_normal()
2.1923875335537315 # random
>>> s = rng.standard_normal(8000)
>>> s
array([ 0.6888893 , 0.78096262, -0.89086505, ..., 0.49876311, # random
-0.38672696, -0.4685006 ]) # random
>>> s.shape
(8000,)
>>> s = rng.standard_normal(size=(3, 4, 2))
>>> s.shape
(3, 4, 2)
Two-by-four array of samples from the normal distribution with
mean 3 and standard deviation 2.5:
>>> 3 + 2.5 * rng.standard_normal(size=(2, 4))
array([[-4.49401501, 4.00950034, -1.81814867, 7.29718677], # random
[ 0.39924804, 4.68456316, 4.99394529, 4.84057254]]) # random
"""
_dtype = np.dtype(dtype)
if _dtype == np.float64:
return double_fill(&random_standard_normal_fill, &self._bitgen, size, self.lock, out)
elif _dtype == np.float32:
return float_fill(&random_standard_normal_fill_f, &self._bitgen, size, self.lock, out)
else:
raise TypeError('Unsupported dtype %r for standard_normal' % _dtype)
def chisquare(self, df, size=None):
"""
chisquare(df, size=None)
Draw samples from a chi-square distribution.
When `df` independent random variables, each with standard normal
distributions (mean 0, variance 1), are squared and summed, the
resulting distribution is chi-square (see Notes). This distribution
is often used in hypothesis testing.
Parameters
----------
df : float or array_like of floats
Number of degrees of freedom, must be > 0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``df`` is a scalar. Otherwise,
``np.array(df).size`` samples are drawn.
Returns
-------
out : ndarray or scalar
Drawn samples from the parameterized chi-square distribution.
Raises
------
ValueError
When `df` <= 0 or when an inappropriate `size` (e.g. ``size=-1``)
is given.
Notes
-----
The variable obtained by summing the squares of `df` independent,
standard normally distributed random variables:
.. math:: Q = \\sum_{i=0}^{\\mathtt{df}} X^2_i
is chi-square distributed, denoted
.. math:: Q \\sim \\chi^2_k.
The probability density function of the chi-squared distribution is
.. math:: p(x) = \\frac{(1/2)^{k/2}}{\\Gamma(k/2)}
x^{k/2 - 1} e^{-x/2},
where :math:`\\Gamma` is the gamma function,
.. math:: \\Gamma(x) = \\int_0^{-\\infty} t^{x - 1} e^{-t} dt.
References
----------
.. [1] NIST "Engineering Statistics Handbook"
https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm
Examples
--------
>>> rng = np.random.default_rng()
>>> rng.chisquare(2,4)
array([ 1.89920014, 9.00867716, 3.13710533, 5.62318272]) # random
The distribution of a chi-square random variable
with 20 degrees of freedom looks as follows:
>>> import matplotlib.pyplot as plt
>>> import scipy.stats as stats
>>> s = rng.chisquare(20, 10000)
>>> count, bins, _ = plt.hist(s, 30, density=True)
>>> x = np.linspace(0, 60, 1000)
>>> plt.plot(x, stats.chi2.pdf(x, df=20))
>>> plt.xlim([0, 60])
>>> plt.show()
"""
return cont(&random_chisquare, &self._bitgen, size, self.lock, 1,
df, 'df', CONS_POSITIVE,
0.0, '', CONS_NONE,
0.0, '', CONS_NONE, None)
def standard_cauchy(self, size=None):
"""
standard_cauchy(size=None)
Draw samples from a standard Cauchy distribution with mode = 0.
Also known as the Lorentz distribution.
Parameters
----------
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
single value is returned.
Returns
-------
samples : ndarray or scalar
The drawn samples.
Notes
-----
The probability density function for the full Cauchy distribution is
.. math:: P(x; x_0, \\gamma) = \\frac{1}{\\pi \\gamma \\bigl[ 1+
(\\frac{x-x_0}{\\gamma})^2 \\bigr] }
and the Standard Cauchy distribution just sets :math:`x_0=0` and
:math:`\\gamma=1`
The Cauchy distribution arises in the solution to the driven harmonic
oscillator problem, and also describes spectral line broadening. It
also describes the distribution of values at which a line tilted at
a random angle will cut the x axis.
When studying hypothesis tests that assume normality, seeing how the
tests perform on data from a Cauchy distribution is a good indicator of
their sensitivity to a heavy-tailed distribution, since the Cauchy looks
very much like a Gaussian distribution, but with heavier tails.
References
----------
.. [1] NIST/SEMATECH e-Handbook of Statistical Methods, "Cauchy
Distribution",
https://www.itl.nist.gov/div898/handbook/eda/section3/eda3663.htm
.. [2] Weisstein, Eric W. "Cauchy Distribution." From MathWorld--A
Wolfram Web Resource.
https://mathworld.wolfram.com/CauchyDistribution.html
.. [3] Wikipedia, "Cauchy distribution"
https://en.wikipedia.org/wiki/Cauchy_distribution
Examples
--------
Draw samples and plot the distribution:
>>> import matplotlib.pyplot as plt
>>> rng = np.random.default_rng()
>>> s = rng.standard_cauchy(1000000)
>>> s = s[(s>-25) & (s<25)] # truncate distribution so it plots well
>>> plt.hist(s, bins=100)
>>> plt.show()
"""
return cont(&random_standard_cauchy, &self._bitgen, size, self.lock, 0,
0.0, '', CONS_NONE, 0.0, '', CONS_NONE, 0.0, '', CONS_NONE, None)
def pareto(self, a, size=None):
"""
pareto(a, size=None)
Draw samples from a Pareto II (AKA Lomax) distribution with
specified shape.
Parameters
----------
a : float or array_like of floats
Shape of the distribution. Must be positive.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``a`` is a scalar. Otherwise,
``np.array(a).size`` samples are drawn.
Returns
-------
out : ndarray or scalar
Drawn samples from the Pareto II distribution.
See Also
--------
scipy.stats.pareto : Pareto I distribution
scipy.stats.lomax : Lomax (Pareto II) distribution
scipy.stats.genpareto : Generalized Pareto distribution
Notes
-----
The probability density for the Pareto II distribution is
.. math:: p(x) = \\frac{a}{{x+1}^{a+1}} , x \ge 0
where :math:`a > 0` is the shape.
The Pareto II distribution is a shifted and scaled version of the
Pareto I distribution, which can be found in `scipy.stats.pareto`.
References
----------
.. [1] Francis Hunt and Paul Johnson, On the Pareto Distribution of
Sourceforge projects.
.. [2] Pareto, V. (1896). Course of Political Economy. Lausanne.
.. [3] Reiss, R.D., Thomas, M.(2001), Statistical Analysis of Extreme
Values, Birkhauser Verlag, Basel, pp 23-30.
.. [4] Wikipedia, "Pareto distribution",
https://en.wikipedia.org/wiki/Pareto_distribution
Examples
--------
Draw samples from the distribution:
>>> a = 3.
>>> rng = np.random.default_rng()
>>> s = rng.pareto(a, 10000)
Display the histogram of the samples, along with the probability
density function:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0, 3, 50)
>>> pdf = a / (x+1)**(a+1)
>>> plt.hist(s, bins=x, density=True, label='histogram')
>>> plt.plot(x, pdf, linewidth=2, color='r', label='pdf')
>>> plt.xlim(x.min(), x.max())
>>> plt.legend()
>>> plt.show()
"""
return cont(&random_pareto, &self._bitgen, size, self.lock, 1,
a, 'a', CONS_POSITIVE,
0.0, '', CONS_NONE,
0.0, '', CONS_NONE, None)
def rayleigh(self, scale=1.0, size=None):
"""
rayleigh(scale=1.0, size=None)
从 Rayleigh 分布中抽取样本。
:math:`\\chi` 和 Weibull 分布是 Rayleigh 分布的推广。
Parameters
----------
scale : float or array_like of floats, optional
尺度参数,也等于众数。必须为非负数。默认为 1。
size : int or tuple of ints, optional
输出的形状。如果给定形状为 ``(m, n, k)``,则抽取 ``m * n * k`` 个样本。
如果 `size` 为 `None`(默认),且 `scale` 为标量,则返回单个值。
否则,抽取 `np.array(scale).size` 个样本。
Returns
-------
out : ndarray or scalar
从参数化的 Rayleigh 分布中抽取的样本。
Notes
-----
Rayleigh 分布的概率密度函数为:
.. math:: P(x;scale) = \\frac{x}{scale^2}e^{\\frac{-x^2}{2 \\cdotp scale^2}}
例如,如果风速的东向分量和北向分量都服从相同的均值为零的高斯分布,
那么风速将服从 Rayleigh 分布。
References
----------
.. [1] Brighton Webs Ltd., "Rayleigh Distribution,"
https://web.archive.org/web/20090514091424/http://brighton-webs.co.uk:80/distributions/rayleigh.asp
.. [2] Wikipedia, "Rayleigh distribution"
https://en.wikipedia.org/wiki/Rayleigh_distribution
Examples
--------
从分布中抽取值并绘制直方图:
>>> from matplotlib.pyplot import hist
>>> rng = np.random.default_rng()
>>> values = hist(rng.rayleigh(3, 100000), bins=200, density=True)
浪高往往遵循 Rayleigh 分布。如果平均浪高为 1 米,有多少浪的高度可能大于 3 米?
>>> meanvalue = 1
>>> modevalue = np.sqrt(2 / np.pi) * meanvalue
>>> s = rng.rayleigh(modevalue, 1000000)
高于 3 米的浪的百分比为:
>>> 100.*sum(s>3)/1000000.
0.087300000000000003 # 随机
"""
return cont(&random_rayleigh, &self._bitgen, size, self.lock, 1,
scale, 'scale', CONS_NON_NEGATIVE,
0.0, '', CONS_NONE,
0.0, '', CONS_NONE, None)
def wald(self, mean, scale, size=None):
"""
wald(mean, scale, size=None)
Draw samples from a Wald, or inverse Gaussian, distribution.
As the scale approaches infinity, the distribution becomes more like a
Gaussian. Some references claim that the Wald is an inverse Gaussian
with mean equal to 1, but this is by no means universal.
The inverse Gaussian distribution was first studied in relationship to
Brownian motion. In 1956 M.C.K. Tweedie used the name inverse Gaussian
because there is an inverse relationship between the time to cover a
unit distance and distance covered in unit time.
Parameters
----------
mean : float or array_like of floats
Distribution mean, must be > 0.
scale : float or array_like of floats
Scale parameter, must be > 0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``mean`` and ``scale`` are both scalars.
Otherwise, ``np.broadcast(mean, scale).size`` samples are drawn.
Returns
-------
out : ndarray or scalar
Drawn samples from the parameterized Wald distribution.
Notes
-----
The probability density function for the Wald distribution is
.. math:: P(x;mean,scale) = \\sqrt{\\frac{scale}{2\\pi x^3}}e^
\\frac{-scale(x-mean)^2}{2\\cdotp mean^2x}
As noted above the inverse Gaussian distribution first arise
from attempts to model Brownian motion. It is also a
competitor to the Weibull for use in reliability modeling and
modeling stock returns and interest rate processes.
References
----------
.. [1] Brighton Webs Ltd., Wald Distribution,
https://web.archive.org/web/20090423014010/http://www.brighton-webs.co.uk:80/distributions/wald.asp
.. [2] Chhikara, Raj S., and Folks, J. Leroy, "The Inverse Gaussian
Distribution: Theory : Methodology, and Applications", CRC Press,
1988.
.. [3] Wikipedia, "Inverse Gaussian distribution"
https://en.wikipedia.org/wiki/Inverse_Gaussian_distribution
Examples
--------
Draw values from the distribution and plot the histogram:
>>> import matplotlib.pyplot as plt
>>> rng = np.random.default_rng()
>>> h = plt.hist(rng.wald(3, 2, 100000), bins=200, density=True)
>>> plt.show()
"""
return cont(&random_wald, &self._bitgen, size, self.lock, 2,
mean, 'mean', CONS_POSITIVE,
scale, 'scale', CONS_POSITIVE,
0.0, '', CONS_NONE, None)
def poisson(self, lam=1.0, size=None):
"""
poisson(lam=1.0, size=None)
Draw samples from a Poisson distribution.
The Poisson distribution is the limit of the binomial distribution
for large N.
Parameters
----------
lam : float or array_like of floats
Expected number of events occurring in a fixed-time interval,
must be >= 0. A sequence must be broadcastable over the requested
size.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``lam`` is a scalar. Otherwise,
``np.array(lam).size`` samples are drawn.
Returns
-------
out : ndarray or scalar
Drawn samples from the parameterized Poisson distribution.
Notes
-----
The Poisson distribution
.. math:: f(k; \\lambda)=\\frac{\\lambda^k e^{-\\lambda}}{k!}
For events with an expected separation :math:`\\lambda` the Poisson
distribution :math:`f(k; \\lambda)` describes the probability of
:math:`k` events occurring within the observed
interval :math:`\\lambda`.
Because the output is limited to the range of the C int64 type, a
ValueError is raised when `lam` is within 10 sigma of the maximum
representable value.
References
----------
.. [1] Weisstein, Eric W. "Poisson Distribution."
From MathWorld--A Wolfram Web Resource.
https://mathworld.wolfram.com/PoissonDistribution.html
.. [2] Wikipedia, "Poisson distribution",
https://en.wikipedia.org/wiki/Poisson_distribution
Examples
--------
Draw samples from the distribution:
>>> rng = np.random.default_rng()
>>> lam, size = 5, 10000
>>> s = rng.poisson(lam=lam, size=size)
Verify the mean and variance, which should be approximately ``lam``:
>>> s.mean(), s.var()
(4.9917 5.1088311) # may vary
Display the histogram and probability mass function:
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> x = np.arange(0, 21)
>>> pmf = stats.poisson.pmf(x, mu=lam)
>>> plt.hist(s, bins=x, density=True, width=0.5)
>>> plt.stem(x, pmf, 'C1-')
>>> plt.show()
Draw each 100 values for lambda 100 and 500:
>>> s = rng.poisson(lam=(100., 500.), size=(100, 2))
"""
使用指定的参数调用底层的随机泊松函数,返回随机数样本
return disc(&random_poisson, &self._bitgen, size, self.lock, 1, 0,
lam, 'lam', CONS_POISSON,
0.0, '', CONS_NONE,
0.0, '', CONS_NONE)
def geometric(self, p, size=None):
"""
geometric(p, size=None)
Draw samples from the geometric distribution.
Bernoulli trials are experiments with one of two outcomes:
success or failure (an example of such an experiment is flipping
a coin). The geometric distribution models the number of trials
that must be run in order to achieve success. It is therefore
supported on the positive integers, ``k = 1, 2, ...``.
The probability mass function of the geometric distribution is
.. math:: f(k) = (1 - p)^{k - 1} p
where `p` is the probability of success of an individual trial.
Parameters
----------
p : float or array_like of floats
The probability of success of an individual trial.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
a single value is returned if ``p`` is a scalar. Otherwise,
``np.array(p).size`` samples are drawn.
Returns
-------
out : ndarray or scalar
Drawn samples from the parameterized geometric distribution.
References
----------
.. [1] Wikipedia, "Geometric distribution",
https://en.wikipedia.org/wiki/Geometric_distribution
Examples
--------
Draw 10,000 values from the geometric distribution, with the
probability of an individual success equal to ``p = 0.35``:
>>> p, size = 0.35, 10000
>>> rng = np.random.default_rng()
>>> sample = rng.geometric(p=p, size=size)
What proportion of trials succeeded after a single run?
>>> (sample == 1).sum()/size
0.34889999999999999 # may vary
The geometric distribution with ``p=0.35`` looks as follows:
>>> import matplotlib.pyplot as plt
>>> count, bins, _ = plt.hist(sample, bins=30, density=True)
>>> plt.plot(bins, (1-p)**(bins-1)*p)
>>> plt.xlim([0, 25])
>>> plt.show()
"""
return disc(&random_geometric, &self._bitgen, size, self.lock, 1, 0,
p, 'p', CONS_BOUNDED_GT_0_1,
0.0, '', CONS_NONE,
0.0, '', CONS_NONE)
def permutation(self, object x, axis=0):
"""
permutation(x, axis=0)
Randomly permute a sequence, or return a permuted range.
Parameters
----------
x : int or array_like
If `x` is an integer, randomly permute ``np.arange(x)``.
If `x` is an array, make a copy and shuffle the elements
randomly.
axis : int, optional
The axis which `x` is shuffled along. Default is 0.
Returns
-------
out : ndarray
Permuted sequence or array range.
Examples
--------
>>> rng = np.random.default_rng()
>>> rng.permutation(10)
array([1, 7, 4, 3, 0, 9, 2, 5, 8, 6]) # random
>>> rng.permutation([1, 4, 9, 12, 15])
array([15, 1, 9, 4, 12]) # random
>>> arr = np.arange(9).reshape((3, 3))
>>> rng.permutation(arr)
array([[6, 7, 8], # random
[0, 1, 2],
[3, 4, 5]])
>>> rng.permutation("abc")
Traceback (most recent call last):
...
numpy.exceptions.AxisError: axis 0 is out of bounds for array of dimension 0
>>> arr = np.arange(9).reshape((3, 3))
>>> rng.permutation(arr, axis=1)
array([[0, 2, 1], # random
[3, 5, 4],
[6, 8, 7]])
"""
if isinstance(x, (int, np.integer)):
arr = np.arange(x)
self.shuffle(arr)
return arr
arr = np.asarray(x)
axis = normalize_axis_index(axis, arr.ndim)
if arr.ndim == 1:
if np.may_share_memory(arr, x):
arr = np.array(arr)
self.shuffle(arr)
return arr
idx = np.arange(arr.shape[axis], dtype=np.intp)
self.shuffle(idx)
slices = [slice(None)] * arr.ndim
slices[axis] = idx
return arr[tuple(slices)]
@cython.embedsignature(True)
def default_rng(seed=None):
"""Construct a new Generator with the default BitGenerator (PCG64).
Parameters
----------
seed : {None, int, array_like[ints], SeedSequence, BitGenerator, Generator}, optional
A seed to initialize the `BitGenerator`. If None, then fresh,
unpredictable entropy will be pulled from the OS. If an ``int`` or
``array_like[ints]`` is passed, then all values must be non-negative and will be
passed to `SeedSequence` to derive the initial `BitGenerator` state. One may also
pass in a `SeedSequence` instance.
Additionally, when passed a `BitGenerator`, it will be wrapped by
`Generator`. If passed a `Generator`, it will be returned unaltered.
Returns
-------
Generator
The initialized generator object.
Notes
-----
If ``seed`` is not a `BitGenerator` or a `Generator`, a new `BitGenerator`
is instantiated. This function does not manage a default global instance.
See :ref:`seeding_and_entropy` for more information about seeding.
Examples
--------
`default_rng` is the recommended constructor for the random number class
`Generator`. Here are several ways we can construct a random
number generator using `default_rng` and the `Generator` class.
Here we use `default_rng` to generate a random float:
>>> import numpy as np
>>> rng = np.random.default_rng(12345)
>>> print(rng)
Generator(PCG64)
>>> rfloat = rng.random()
>>> rfloat
0.22733602246716966
>>> type(rfloat)
<class 'float'>
Here we use `default_rng` to generate 3 random integers between 0
(inclusive) and 10 (exclusive):
>>> import numpy as np
>>> rng = np.random.default_rng(12345)
>>> rints = rng.integers(low=0, high=10, size=3)
>>> rints
array([6, 2, 7])
>>> type(rints[0])
<class 'numpy.int64'>
Here we specify a seed so that we have reproducible results:
>>> import numpy as np
>>> rng = np.random.default_rng(seed=42)
>>> print(rng)
Generator(PCG64)
>>> arr1 = rng.random((3, 3))
>>> arr1
array([[0.77395605, 0.43887844, 0.85859792],
[0.69736803, 0.09417735, 0.97562235],
[0.7611397 , 0.78606431, 0.12811363]])
If we exit and restart our Python interpreter, we'll see that we
generate the same random numbers again:
>>> import numpy as np
>>> rng = np.random.default_rng(seed=42)
>>> arr2 = rng.random((3, 3))
>>> arr2
array([[0.77395605, 0.43887844, 0.85859792],
[0.69736803, 0.09417735, 0.97562235],
[0.7611397 , 0.78606431, 0.12811363]])
"""
if _check_bit_generator(seed):
return Generator(seed)
elif isinstance(seed, Generator):
return seed
`
return Generator(PCG64(seed))
.\numpy\numpy\random\_mt19937.pyi
from typing import TypedDict
from numpy import uint32
from numpy.typing import NDArray
from numpy.random.bit_generator import BitGenerator, SeedSequence
from numpy._typing import _ArrayLikeInt_co
class _MT19937Internal(TypedDict):
key: NDArray[uint32]
pos: int
class _MT19937State(TypedDict):
bit_generator: str
state: _MT19937Internal
class MT19937(BitGenerator):
def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None:
...
def _legacy_seeding(self, seed: _ArrayLikeInt_co) -> None:
...
def jumped(self, jumps: int = ...) -> MT19937:
...
@property
def state(self) -> _MT19937State:
...
@state.setter
def state(self, value: _MT19937State) -> None:
...
.\numpy\numpy\random\_mt19937.pyx
import operator
import numpy as np
cimport numpy as np
from libc.stdint cimport uint32_t, uint64_t
from numpy.random cimport BitGenerator, SeedSequence
__all__ = ['MT19937']
np.import_array()
cdef extern from "src/mt19937/mt19937.h":
struct s_mt19937_state:
uint32_t key[624]
int pos
ctypedef s_mt19937_state mt19937_state
uint64_t mt19937_next64(mt19937_state *state) nogil
uint32_t mt19937_next32(mt19937_state *state) nogil
double mt19937_next_double(mt19937_state *state) nogil
void mt19937_init_by_array(mt19937_state *state, uint32_t *init_key, int key_length)
void mt19937_seed(mt19937_state *state, uint32_t seed)
void mt19937_jump(mt19937_state *state)
enum:
RK_STATE_LEN
cdef uint64_t mt19937_uint64(void *st) noexcept nogil:
return mt19937_next64(<mt19937_state *> st)
cdef uint32_t mt19937_uint32(void *st) noexcept nogil:
return mt19937_next32(<mt19937_state *> st)
cdef double mt19937_double(void *st) noexcept nogil:
return mt19937_next_double(<mt19937_state *> st)
cdef uint64_t mt19937_raw(void *st) noexcept nogil:
return <uint64_t>mt19937_next32(<mt19937_state *> st)
cdef class MT19937(BitGenerator):
"""
MT19937(seed=None)
Mersenne Twister伪随机数生成器的容器类。
Parameters
----------
seed : {None, int, array_like[ints], SeedSequence}, optional
初始化BitGenerator的种子。如果为None,则从操作系统获取新的不可预测熵。
如果传递一个int或array_like[ints],则将传递给SeedSequence以派生初始BitGenerator状态。
也可以传递SeedSequence实例。
Attributes
----------
lock: threading.Lock
共享的锁实例,以便在多个生成器中使用相同的位生成器而不会损坏状态。
从位生成器生成值的代码应持有位生成器的锁。
Notes
-----
MT19937提供一个包含函数指针的胶囊,用于生成双精度数,以及无符号32位和64位整数。
这些在Python中不能直接消耗,必须由Generator或类似对象消耗,支持低级访问。
Python标准库模块"random"也包含一个Mersenne Twister伪随机数生成器。
**状态和种子**
MT19937状态向量包括一个624元素的32位无符号整数数组,以及一个介于0和624之间的单个整数值,
用于索引主数组中的当前位置。
输入种子由SeedSequence处理以填充整个状态。第一个元素被重置,只设置其最高位。
**并行特性**
在并行应用程序中使用BitGenerator的首选方法是使用
"""
"""
cdef mt19937_state rng_state
"""
def __init__(self, seed=None):
BitGenerator.__init__(self, seed)
val = self._seed_seq.generate_state(RK_STATE_LEN, np.uint32)
self.rng_state.key[0] = 0x80000000UL
for i in range(1, RK_STATE_LEN):
self.rng_state.key[i] = val[i]
self.rng_state.pos = i
self._bitgen.state = &self.rng_state
self._bitgen.next_uint64 = &mt19937_uint64
self._bitgen.next_uint32 = &mt19937_uint32
self._bitgen.next_double = &mt19937_double
self._bitgen.next_raw = &mt19937_raw
def _legacy_seeding(self, seed):
"""
_legacy_seeding(seed)
Seed the generator in a backward compatible way. For modern
applications, creating a new instance is preferable. Calling this
overrides self._seed_seq
Parameters
----------
seed : {None, int, array_like}
Random seed initializing the pseudo-random number generator.
Can be an integer in [0, 2**32-1], array of integers in
[0, 2**32-1], a `SeedSequence, or ``None``. If `seed`
is ``None``, then fresh, unpredictable entropy will be pulled from
the OS.
Raises
------
ValueError
If seed values are out of range for the PRNG.
"""
cdef np.ndarray obj
with self.lock:
try:
if seed is None:
seed = SeedSequence()
val = seed.generate_state(RK_STATE_LEN)
self.rng_state.key[0] = 0x80000000UL
for i in range(1, RK_STATE_LEN):
self.rng_state.key[i] = val[i]
else:
if hasattr(seed, 'squeeze'):
seed = seed.squeeze()
idx = operator.index(seed)
if idx > int(2**32 - 1) or idx < 0:
raise ValueError("Seed must be between 0 and 2**32 - 1")
mt19937_seed(&self.rng_state, seed)
except TypeError:
obj = np.asarray(seed)
if obj.size == 0:
raise ValueError("Seed must be non-empty")
obj = obj.astype(np.int64, casting='safe')
if obj.ndim != 1:
raise ValueError("Seed array must be 1-d")
if ((obj > int(2**32 - 1)) | (obj < 0)).any():
raise ValueError("Seed must be between 0 and 2**32 - 1")
obj = obj.astype(np.uint32, casting='unsafe', order='C')
mt19937_init_by_array(&self.rng_state, <uint32_t*> obj.data, np.PyArray_DIM(obj, 0))
self._seed_seq = None
cdef jump_inplace(self, iter):
"""
Jump state in-place
Not part of public API
Parameters
----------
iter : integer, positive
Number of times to jump the state of the rng.
"""
cdef np.npy_intp i
for i in range(iter):
mt19937_jump(&self.rng_state)
def jumped(self, np.npy_intp jumps=1):
"""
jumped(jumps=1)
Returns a new bit generator with the state jumped
The state of the returned bit generator is jumped as-if
2**(128 * jumps) random numbers have been generated.
Parameters
----------
jumps : integer, positive
Number of times to jump the state of the bit generator returned
Returns
-------
bit_generator : MT19937
New instance of generator jumped iter times
Notes
-----
The jump step is computed using a modified version of Matsumoto's
implementation of Horner's method. The step polynomial is precomputed
to perform 2**128 steps. The jumped state has been verified to match
the state produced using Matsumoto's original code.
References
----------
.. [1] Matsumoto, M, Generating multiple disjoint streams of
pseudorandom number sequences. Accessed on: May 6, 2020.
http://www.math.sci.hiroshima-u.ac.jp/m-mat/MT/JUMP/
.. [2] Hiroshi Haramoto, Makoto Matsumoto, Takuji Nishimura, François
Panneton, Pierre L\'Ecuyer, "Efficient Jump Ahead for F2-Linear
Random Number Generators", INFORMS JOURNAL ON COMPUTING, Vol. 20,
No. 3, Summer 2008, pp. 385-390.
"""
cdef MT19937 bit_generator
bit_generator = self.__class__()
bit_generator.state = self.state
bit_generator.jump_inplace(jumps)
return bit_generator
@property
def state(self):
"""
Get or set the PRNG state
Returns
-------
state : dict
Dictionary containing the information required to describe the
state of the PRNG
"""
key = np.zeros(624, dtype=np.uint32)
for i in range(624):
key[i] = self.rng_state.key[i]
return {'bit_generator': self.__class__.__name__,
'state': {'key': key, 'pos': self.rng_state.pos}}
@state.setter
def state(self, value):
if isinstance(value, tuple):
if value[0] != 'MT19937' or len(value) not in (3, 5):
raise ValueError('state is not a legacy MT19937 state')
value ={'bit_generator': 'MT19937',
'state': {'key': value[1], 'pos': value[2]}}
if not isinstance(value, dict):
raise TypeError('state must be a dict')
bitgen = value.get('bit_generator', '')
if bitgen != self.__class__.__name__:
raise ValueError('state must be for a {0} '
'PRNG'.format(self.__class__.__name__))
key = value['state']['key']
for i in range(624):
self.rng_state.key[i] = key[i]
self.rng_state.pos = value['state']['pos']
.\numpy\numpy\random\_pcg64.pyi
from typing import TypedDict
from numpy.random.bit_generator import BitGenerator, SeedSequence
from numpy._typing import _ArrayLikeInt_co
class _PCG64Internal(TypedDict):
state: int
inc: int
class _PCG64State(TypedDict):
bit_generator: str
state: _PCG64Internal
has_uint32: int
uinteger: int
class PCG64(BitGenerator):
def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ...
def jumped(self, jumps: int = ...) -> PCG64: ...
@property
def state(
self,
) -> _PCG64State: ...
@state.setter
def state(
self,
value: _PCG64State,
) -> None: ...
def advance(self, delta: int) -> PCG64: ...
class PCG64DXSM(BitGenerator):
def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ...
def jumped(self, jumps: int = ...) -> PCG64DXSM: ...
@property
def state(
self,
) -> _PCG64State: ...
@state.setter
def state(
self,
value: _PCG64State,
) -> None: ...
def advance(self, delta: int) -> PCG64DXSM: ...
.\numpy\numpy\random\_pcg64.pyx
import numpy as np
cimport numpy as np
from libc.stdint cimport uint32_t, uint64_t
from ._common cimport uint64_to_double, wrap_int
from numpy.random cimport BitGenerator
__all__ = ['PCG64']
cdef extern from "src/pcg64/pcg64.h":
ctypedef int pcg64_random_t
struct s_pcg64_state:
pcg64_random_t *pcg_state
int has_uint32
uint32_t uinteger
ctypedef s_pcg64_state pcg64_state
uint64_t pcg64_next64(pcg64_state *state) nogil
uint32_t pcg64_next32(pcg64_state *state) nogil
void pcg64_jump(pcg64_state *state)
void pcg64_advance(pcg64_state *state, uint64_t *step)
void pcg64_set_seed(pcg64_state *state, uint64_t *seed, uint64_t *inc)
void pcg64_get_state(pcg64_state *state, uint64_t *state_arr, int *has_uint32, uint32_t *uinteger)
void pcg64_set_state(pcg64_state *state, uint64_t *state_arr, int has_uint32, uint32_t uinteger)
uint64_t pcg64_cm_next64(pcg64_state *state) noexcept nogil
uint32_t pcg64_cm_next32(pcg64_state *state) noexcept nogil
void pcg64_cm_advance(pcg64_state *state, uint64_t *step)
cdef uint64_t pcg64_uint64(void* st) noexcept nogil:
return pcg64_next64(<pcg64_state *>st)
cdef uint32_t pcg64_uint32(void *st) noexcept nogil:
return pcg64_next32(<pcg64_state *> st)
cdef double pcg64_double(void* st) noexcept nogil:
return uint64_to_double(pcg64_next64(<pcg64_state *>st))
cdef uint64_t pcg64_cm_uint64(void* st) noexcept nogil:
return pcg64_cm_next64(<pcg64_state *>st)
cdef uint32_t pcg64_cm_uint32(void *st) noexcept nogil:
return pcg64_cm_next32(<pcg64_state *> st)
cdef double pcg64_cm_double(void* st) noexcept nogil:
return uint64_to_double(pcg64_cm_next64(<pcg64_state *>st))
cdef class PCG64(BitGenerator):
"""
PCG64(seed=None)
BitGenerator for the PCG-64 pseudo-random number generator.
Parameters
----------
seed : {None, int, array_like[ints], SeedSequence}, optional
A seed to initialize the `BitGenerator`. If None, then fresh,
unpredictable entropy will be pulled from the OS. If an ``int`` or
``array_like[ints]`` is passed, then it will be passed to
`SeedSequence` to derive the initial `BitGenerator` state. One may also
pass in a `SeedSequence` instance.
Notes
-----
PCG-64 is a 128-bit implementation of O'Neill's permutation congruential
generator ([1]_, [2]_). PCG-64 has a period of :math:`2^{128}` and supports
advancing an arbitrary number of steps as well as :math:`2^{127}` streams.
The specific member of the PCG family that we use is PCG XSL RR 128/64
as described in the paper ([2]_).
`PCG64` provides a capsule containing function pointers that produce
doubles, and unsigned 32 and 64- bit integers. These are not
directly consumable in Python and must be consumed by a `Generator`
"""
cdef pcg64_state rng_state
cdef pcg64_random_t pcg64_random_state
def __init__(self, seed=None):
BitGenerator.__init__(self, seed)
self.rng_state.pcg_state = &self.pcg64_random_state
self._bitgen.state = <void *>&self.rng_state
self._bitgen.next_uint64 = &pcg64_uint64
self._bitgen.next_uint32 = &pcg64_uint32
self._bitgen.next_double = &pcg64_double
self._bitgen.next_raw = &pcg64_uint64
val = self._seed_seq.generate_state(4, np.uint64)
pcg64_set_seed(&self.rng_state,
<uint64_t *>np.PyArray_DATA(val),
(<uint64_t *>np.PyArray_DATA(val) + 2))
self._reset_state_variables()
cdef _reset_state_variables(self):
self.rng_state.has_uint32 = 0
cdef jump_inplace(self, jumps):
"""
在原地跳跃 RNG 的状态
参数
----------
jumps : 整数,正数
要跳跃 RNG 状态的次数。
注意
-----
当乘以 2**128 时,步长为黄金分割率 phi-1。
"""
step = 0x9e3779b97f4a7c15f39cc0605cedc835
self.advance(step * int(jumps))
def jumped(self, jumps=1):
"""
jumped(jumps=1)
Returns a new bit generator with the state jumped.
Jumps the state as-if jumps * 210306068529402873165736369884012333109
random numbers have been generated.
Parameters
----------
jumps : integer, positive
Number of times to jump the state of the bit generator returned
Returns
-------
bit_generator : PCG64
New instance of generator jumped iter times
Notes
-----
The step size is phi-1 when multiplied by 2**128 where phi is the
golden ratio.
"""
cdef PCG64 bit_generator
bit_generator = self.__class__()
bit_generator.state = self.state
bit_generator.jump_inplace(jumps)
return bit_generator
@property
def state(self):
"""
Get or set the PRNG state
Returns
-------
state : dict
Dictionary containing the information required to describe the
state of the PRNG
"""
cdef np.ndarray state_vec
cdef int has_uint32
cdef uint32_t uinteger
state_vec = <np.ndarray>np.empty(4, dtype=np.uint64)
pcg64_get_state(&self.rng_state,
<uint64_t *>np.PyArray_DATA(state_vec),
&has_uint32, &uinteger)
state = int(state_vec[0]) * 2**64 + int(state_vec[1])
inc = int(state_vec[2]) * 2**64 + int(state_vec[3])
return {'bit_generator': self.__class__.__name__,
'state': {'state': state, 'inc': inc},
'has_uint32': has_uint32,
'uinteger': uinteger}
@state.setter
def state(self, value):
cdef np.ndarray state_vec
cdef int has_uint32
cdef uint32_t uinteger
if not isinstance(value, dict):
raise TypeError('state must be a dict')
bitgen = value.get('bit_generator', '')
if bitgen != self.__class__.__name__:
raise ValueError('state must be for a {0} '
'RNG'.format(self.__class__.__name__))
state_vec = <np.ndarray>np.empty(4, dtype=np.uint64)
state_vec[0] = value['state']['state'] // 2 ** 64
state_vec[1] = value['state']['state'] % 2 ** 64
state_vec[2] = value['state']['inc'] // 2 ** 64
state_vec[3] = value['state']['inc'] % 2 ** 64
has_uint32 = value['has_uint32']
uinteger = value['uinteger']
pcg64_set_state(&self.rng_state,
<uint64_t *>np.PyArray_DATA(state_vec),
has_uint32, uinteger)
def advance(self, delta):
"""
advance(delta)
Advance the underlying RNG as-if delta draws have occurred.
Parameters
----------
delta : integer, positive
Number of draws to advance the RNG. Must be less than the
size state variable in the underlying RNG.
Returns
-------
self : PCG64
RNG advanced delta steps
Notes
-----
Advancing a RNG updates the underlying RNG state as-if a given
number of calls to the underlying RNG have been made. In general
there is not a one-to-one relationship between the number output
random values from a particular distribution and the number of
draws from the core RNG. This occurs for two reasons:
* The random values are simulated using a rejection-based method
and so, on average, more than one value from the underlying
RNG is required to generate an single draw.
* The number of bits required to generate a simulated value
differs from the number of bits generated by the underlying
RNG. For example, two 16-bit integer values can be simulated
from a single draw of a 32-bit RNG.
Advancing the RNG state resets any pre-computed random numbers.
This is required to ensure exact reproducibility.
"""
delta = wrap_int(delta, 128)
cdef np.ndarray d = np.empty(2, dtype=np.uint64)
d[0] = delta // 2**64
d[1] = delta % 2**64
pcg64_advance(&self.rng_state, <uint64_t *>np.PyArray_DATA(d))
self._reset_state_variables()
return self
cdef class PCG64DXSM(BitGenerator):
"""
PCG64DXSM(seed=None)
BitGenerator for the PCG-64 DXSM pseudo-random number generator.
Parameters
----------
seed : {None, int, array_like[ints], SeedSequence}, optional
A seed to initialize the `BitGenerator`. If None, then fresh,
unpredictable entropy will be pulled from the OS. If an ``int`` or
``array_like[ints]`` is passed, then it will be passed to
`SeedSequence` to derive the initial `BitGenerator` state. One may also
pass in a `SeedSequence` instance.
Notes
-----
PCG-64 DXSM is a 128-bit implementation of O'Neill's permutation congruential
generator ([1]_, [2]_). PCG-64 DXSM has a period of :math:`2^{128}` and supports
advancing an arbitrary number of steps as well as :math:`2^{127}` streams.
The specific member of the PCG family that we use is PCG CM DXSM 128/64. It
differs from `PCG64` in that it uses the stronger DXSM output function,
a 64-bit "cheap multiplier" in the LCG, and outputs from the state before
advancing it rather than advance-then-output.
`PCG64DXSM` provides a capsule containing function pointers that produce
doubles, and unsigned 32 and 64- bit integers. These are not
directly consumable in Python and must be consumed by a `Generator`
or similar object that supports low-level access.
Supports the method :meth:`advance` to advance the RNG an arbitrary number of
steps. The state of the PCG-64 DXSM RNG is represented by 2 128-bit unsigned
integers.
**State and Seeding**
The `PCG64DXSM` state vector consists of 2 unsigned 128-bit values,
which are represented externally as Python ints. One is the state of the
PRNG, which is advanced by a linear congruential generator (LCG). The
second is a fixed odd increment used in the LCG.
The input seed is processed by `SeedSequence` to generate both values. The
increment is not independently settable.
**Parallel Features**
The preferred way to use a BitGenerator in parallel applications is to use
the `SeedSequence.spawn` method to obtain entropy values, and to use these
to generate new BitGenerators:
>>> from numpy.random import Generator, PCG64DXSM, SeedSequence
>>> sg = SeedSequence(1234)
>>> rg = [Generator(PCG64DXSM(s)) for s in sg.spawn(10)]
**Compatibility Guarantee**
`PCG64DXSM` makes a guarantee that a fixed seed will always produce
the same random integer stream.
References
----------
.. [1] `"PCG, A Family of Better Random Number Generators"
<http://www.pcg-random.org/>`_
.. [2] O'Neill, Melissa E. `"PCG: A Family of Simple Fast Space-Efficient
Statistically Good Algorithms for Random Number Generation"
<https://www.cs.hmc.edu/tr/hmc-cs-2014-0905.pdf>`_
"""
cdef pcg64_state rng_state
cdef pcg64_random_t pcg64_random_state
def __init__(self, seed=None):
BitGenerator.__init__(self, seed)
self.rng_state.pcg_state = &self.pcg64_random_state
self._bitgen.state = <void *>&self.rng_state
self._bitgen.next_uint64 = &pcg64_cm_uint64
self._bitgen.next_uint32 = &pcg64_cm_uint32
self._bitgen.next_double = &pcg64_cm_double
self._bitgen.next_raw = &pcg64_cm_uint64
val = self._seed_seq.generate_state(4, np.uint64)
pcg64_set_seed(&self.rng_state,
<uint64_t *>np.PyArray_DATA(val),
(<uint64_t *>np.PyArray_DATA(val) + 2))
self._reset_state_variables()
cdef _reset_state_variables(self):
self.rng_state.has_uint32 = 0
self.rng_state.uinteger = 0
cdef jump_inplace(self, jumps):
"""
Jump state in-place
Not part of public API
Parameters
----------
jumps : integer, positive
Number of times to jump the state of the rng.
Notes
-----
The step size is phi-1 when multiplied by 2**128 where phi is the
golden ratio.
"""
step = 0x9e3779b97f4a7c15f39cc0605cedc835
self.advance(step * int(jumps))
def jumped(self, jumps=1):
"""
jumped(jumps=1)
Returns a new bit generator with the state jumped.
Jumps the state as-if jumps * 210306068529402873165736369884012333109
random numbers have been generated.
Parameters
----------
jumps : integer, positive
Number of times to jump the state of the bit generator returned
Returns
-------
bit_generator : PCG64DXSM
New instance of generator jumped iter times
Notes
-----
The step size is phi-1 when multiplied by 2**128 where phi is the
golden ratio.
"""
cdef PCG64DXSM bit_generator
bit_generator = self.__class__()
bit_generator.state = self.state
bit_generator.jump_inplace(jumps)
return bit_generator
def state(self):
"""
Get or set the PRNG state
Returns
-------
state : dict
Dictionary containing the information required to describe the
state of the PRNG
"""
cdef np.ndarray state_vec
cdef int has_uint32
cdef uint32_t uinteger
state_vec = <np.ndarray>np.empty(4, dtype=np.uint64)
pcg64_get_state(&self.rng_state,
<uint64_t *>np.PyArray_DATA(state_vec),
&has_uint32, &uinteger)
state = int(state_vec[0]) * 2**64 + int(state_vec[1])
inc = int(state_vec[2]) * 2**64 + int(state_vec[3])
return {'bit_generator': self.__class__.__name__,
'state': {'state': state, 'inc': inc},
'has_uint32': has_uint32,
'uinteger': uinteger}
@state.setter
def state(self, value):
cdef np.ndarray state_vec
cdef int has_uint32
cdef uint32_t uinteger
if not isinstance(value, dict):
raise TypeError('state must be a dict')
bitgen = value.get('bit_generator', '')
if bitgen != self.__class__.__name__:
raise ValueError('state must be for a {0} '
'RNG'.format(self.__class__.__name__))
state_vec = <np.ndarray>np.empty(4, dtype=np.uint64)
state_vec[0] = value['state']['state'] // 2 ** 64
state_vec[1] = value['state']['state'] % 2 ** 64
state_vec[2] = value['state']['inc'] // 2 ** 64
state_vec[3] = value['state']['inc'] % 2 ** 64
has_uint32 = value['has_uint32']
uinteger = value['uinteger']
pcg64_set_state(&self.rng_state,
<uint64_t *>np.PyArray_DATA(state_vec),
has_uint32, uinteger)
def advance(self, delta):
"""
advance(delta)
Advance the underlying RNG as-if delta draws have occurred.
Parameters
----------
delta : integer, positive
Number of draws to advance the RNG. Must be less than the
size state variable in the underlying RNG.
Returns
-------
self : PCG64
RNG advanced delta steps
Notes
-----
Advancing a RNG updates the underlying RNG state as-if a given
number of calls to the underlying RNG have been made. In general
there is not a one-to-one relationship between the number output
random values from a particular distribution and the number of
draws from the core RNG. This occurs for two reasons:
* The random values are simulated using a rejection-based method
and so, on average, more than one value from the underlying
RNG is required to generate an single draw.
* The number of bits required to generate a simulated value
differs from the number of bits generated by the underlying
RNG. For example, two 16-bit integer values can be simulated
from a single draw of a 32-bit RNG.
Advancing the RNG state resets any pre-computed random numbers.
This is required to ensure exact reproducibility.
"""
delta = wrap_int(delta, 128)
cdef np.ndarray d = np.empty(2, dtype=np.uint64)
d[0] = delta // 2**64
d[1] = delta % 2**64
pcg64_cm_advance(&self.rng_state, <uint64_t *>np.PyArray_DATA(d))
self._reset_state_variables()
return self
.\numpy\numpy\random\_philox.pyi
from typing import TypedDict
from numpy import uint64
from numpy.typing import NDArray
from numpy.random.bit_generator import BitGenerator, SeedSequence
from numpy._typing import _ArrayLikeInt_co
class _PhiloxInternal(TypedDict):
counter: NDArray[uint64]
key: NDArray[uint64]
class _PhiloxState(TypedDict):
bit_generator: str
state: _PhiloxInternal
buffer: NDArray[uint64]
buffer_pos: int
has_uint32: int
uinteger: int
class Philox(BitGenerator):
def __init__(
self,
seed: None | _ArrayLikeInt_co | SeedSequence = ...,
counter: None | _ArrayLikeInt_co = ...,
key: None | _ArrayLikeInt_co = ...,
) -> None: ...
@property
def state(
self,
) -> _PhiloxState: ...
@state.setter
def state(
self,
value: _PhiloxState,
) -> None: ...
def jumped(self, jumps: int = ...) -> Philox: ...
def advance(self, delta: int) -> Philox: ...
.\numpy\numpy\random\_philox.pyx
from cpython.pycapsule cimport PyCapsule_New
import numpy as np
cimport numpy as np
from libc.stdint cimport uint32_t, uint64_t
from ._common cimport uint64_to_double, int_to_array, wrap_int
from numpy.random cimport BitGenerator
__all__ = ['Philox']
np.import_array()
cdef int PHILOX_BUFFER_SIZE=4
cdef extern from 'src/philox/philox.h':
struct s_r123array2x64:
uint64_t v[2]
struct s_r123array4x64:
uint64_t v[4]
ctypedef s_r123array4x64 philox4x64_ctr_t
ctypedef s_r123array2x64 philox4x64_key_t
struct s_philox_state:
philox4x64_ctr_t *ctr
philox4x64_key_t *key
int buffer_pos
uint64_t *buffer
int has_uint32
uint32_t uinteger
ctypedef s_philox_state philox_state
uint64_t philox_next64(philox_state *state) noexcept nogil
uint32_t philox_next32(philox_state *state) noexcept nogil
void philox_jump(philox_state *state)
void philox_advance(uint64_t *step, philox_state *state)
cdef uint64_t philox_uint64(void* st) noexcept nogil:
return philox_next64(<philox_state *> st)
cdef uint32_t philox_uint32(void *st) noexcept nogil:
return philox_next32(<philox_state *> st)
cdef double philox_double(void* st) noexcept nogil:
return uint64_to_double(philox_next64(<philox_state *> st))
cdef class Philox(BitGenerator):
"""
Philox(seed=None, counter=None, key=None)
Container for the Philox (4x64) pseudo-random number generator.
Parameters
----------
seed : {None, int, array_like[ints], SeedSequence}, optional
A seed to initialize the `BitGenerator`. If None, then fresh,
unpredictable entropy will be pulled from the OS. If an ``int`` or
``array_like[ints]`` is passed, then it will be passed to
`SeedSequence` to derive the initial `BitGenerator` state. One may also
pass in a `SeedSequence` instance.
counter : {None, int, array_like}, optional
Counter to use in the Philox state. Can be either
a Python int (long in 2.x) in [0, 2**256) or a 4-element uint64 array.
If not provided, the RNG is initialized at 0.
key : {None, int, array_like}, optional
Key to use in the Philox state. Unlike ``seed``, the value in key is
directly set. Can be either a Python int in [0, 2**128) or a 2-element
uint64 array. `key` and ``seed`` cannot both be used.
Attributes
----------
lock: threading.Lock
Lock instance that is shared so that the same bit git generator can
be used in multiple Generators without corrupting the state. Code that
generates values from a bit generator should hold the bit generator's
lock.
Notes
-----
Philox is a 64-bit PRNG that uses a counter-based design based on weaker
(and faster) versions of cryptographic functions [1]_. Instances using
"""
pass
from numpy.random import BitGenerator
class Philox(BitGenerator):
"""
Philox is a cryptographic random number generator.
Philox can generate independent sequences by using different keys. It has a
period of 2^256 - 1 and supports arbitrary advancing and jumping by 2^128 increments.
Philox provides function pointers for producing doubles, and unsigned 32-bit and 64-bit integers.
These are not directly usable in Python and require a `Generator` or similar object for access.
State and Seeding:
- Philox state includes a 256-bit counter as a 4-element uint64 array and a 128-bit key
as a 2-element uint64 array.
- The counter is incremented by 1 for every 4 64-bit randoms produced.
- Different keys produce independent sequences.
Usage:
- Seed is processed by `SeedSequence` to generate the key. Counter is set to 0.
- Alternatively, you can set the key and counter directly.
Parallel Features:
- Use `SeedSequence.spawn` for parallel applications to obtain entropy values and generate new BitGenerators.
- `Philox.jumped()` advances state by 2^128 random numbers.
- `Philox.advance(step)` advances counter by `step` (0 to 2^256-1).
- Chaining `Philox` instances ensures segments are from the same sequence.
Compatibility Guarantee:
- Philox guarantees that the same seed will produce the same random stream.
Examples:
- Generate a random number using Philox:
>>> from numpy.random import Generator, Philox
>>> rg = Generator(Philox(1234))
>>> rg.standard_normal()
References:
- [Link to references]
"""
def __init__(self, seed=None):
"""
Initialize Philox with a seed.
Parameters
----------
seed : {None, int, array_like}, optional
Seed for generating random numbers. Default is None.
"""
super().__init__()
if seed is None:
self.key = []
self.counter = []
else:
self.key = SeedSequence(seed).generate_state(2)
self.counter = [0, 0]
def random(self):
"""Generate random floats."""
return self._philox()
def random_raw(self):
"""Generate random 32-bit unsigned integers."""
return self._philox()
def random_raw_long(self):
"""Generate random 64-bit unsigned integers."""
return self._philox()
def advance(self, step):
"""
Advance the counter by a specified step.
Parameters
----------
step : int
Step to advance the counter by.
"""
self.counter[0] += step
def jumped(self):
"""Advance the state as if 2^128 random numbers have been generated."""
self.advance(2 ** 128)
return self
def _philox(self):
"""Internal method for generating random numbers."""
return NotImplemented
"""
.. [1] John K. Salmon, Mark A. Moraes, Ron O. Dror, and David E. Shaw,
"Parallel Random Numbers: As Easy as 1, 2, 3," Proceedings of
the International Conference for High Performance Computing,
Networking, Storage and Analysis (SC11), New York, NY: ACM, 2011.
"""
cdef philox_state rng_state
cdef philox4x64_key_t philox_key
cdef philox4x64_ctr_t philox_ctr
def __init__(self, seed=None, counter=None, key=None):
if seed is not None and key is not None:
raise ValueError('seed and key cannot be both used')
BitGenerator.__init__(self, seed)
self.rng_state.ctr = &self.philox_ctr
self.rng_state.key = &self.philox_key
if key is not None:
key = int_to_array(key, 'key', 128, 64)
for i in range(2):
self.rng_state.key.v[i] = key[i]
self._seed_seq = None
else:
key = self._seed_seq.generate_state(2, np.uint64)
for i in range(2):
self.rng_state.key.v[i] = key[i]
counter = 0 if counter is None else counter
counter = int_to_array(counter, 'counter', 256, 64)
for i in range(4):
self.rng_state.ctr.v[i] = counter[i]
self._reset_state_variables()
self._bitgen.state = <void *>&self.rng_state
self._bitgen.next_uint64 = &philox_uint64
self._bitgen.next_uint32 = &philox_uint32
self._bitgen.next_double = &philox_double
self._bitgen.next_raw = &philox_uint64
cdef _reset_state_variables(self):
cdef philox_state *rng_state = &self.rng_state
rng_state[0].has_uint32 = 0
rng_state[0].uinteger = 0
rng_state[0].buffer_pos = PHILOX_BUFFER_SIZE
for i in range(PHILOX_BUFFER_SIZE):
rng_state[0].buffer[i] = 0
@property
def state(self):
"""
Get or set the PRNG state
Returns
-------
state : dict
Dictionary containing the information required to describe the
state of the PRNG
"""
ctr = np.empty(4, dtype=np.uint64)
key = np.empty(2, dtype=np.uint64)
buffer = np.empty(PHILOX_BUFFER_SIZE, dtype=np.uint64)
for i in range(4):
ctr[i] = self.rng_state.ctr.v[i]
if i < 2:
key[i] = self.rng_state.key.v[i]
for i in range(PHILOX_BUFFER_SIZE):
buffer[i] = self.rng_state.buffer[i]
state = {'counter': ctr, 'key': key}
return {'bit_generator': self.__class__.__name__,
'state': state,
'buffer': buffer,
'buffer_pos': self.rng_state.buffer_pos,
'has_uint32': self.rng_state.has_uint32,
'uinteger': self.rng_state.uinteger}
@state.setter
def state(self, value):
if not isinstance(value, dict):
raise TypeError('state must be a dict')
bitgen = value.get('bit_generator', '')
if bitgen != self.__class__.__name__:
raise ValueError('state must be for a {0} PRNG'.format(self.__class__.__name__))
for i in range(4):
self.rng_state.ctr.v[i] = <uint64_t> value['state']['counter'][i]
if i < 2:
self.rng_state.key.v[i] = <uint64_t> value['state']['key'][i]
for i in range(PHILOX_BUFFER_SIZE):
self.rng_state.buffer[i] = <uint64_t> value['buffer'][i]
self.rng_state.has_uint32 = value['has_uint32']
self.rng_state.uinteger = value['uinteger']
self.rng_state.buffer_pos = value['buffer_pos']
cdef jump_inplace(self, iter):
"""
Jump state in-place
Not part of public API
Parameters
----------
iter : integer, positive
Number of times to jump the state of the rng.
"""
self.advance(iter * int(2 ** 128))
def jumped(self, jumps=1):
"""
jumped(jumps=1)
Returns a new bit generator with the state jumped
The state of the returned bit generator is jumped as-if
(2**128) * jumps random numbers have been generated.
Parameters
----------
jumps : integer, positive
Number of times to jump the state of the bit generator returned
Returns
-------
bit_generator : Philox
New instance of generator jumped iter times
"""
cdef Philox bit_generator
bit_generator = self.__class__()
bit_generator.state = self.state
bit_generator.jump_inplace(jumps)
return bit_generator
def advance(self, delta):
"""
advance(delta)
Advance the underlying RNG as-if delta draws have occurred.
Parameters
----------
delta : integer, positive
Number of draws to advance the RNG. Must be less than the
size state variable in the underlying RNG.
Returns
-------
self : Philox
RNG advanced delta steps
Notes
-----
Advancing a RNG updates the underlying RNG state as-if a given
number of calls to the underlying RNG have been made. In general
there is not a one-to-one relationship between the number output
random values from a particular distribution and the number of
draws from the core RNG. This occurs for two reasons:
* The random values are simulated using a rejection-based method
and so, on average, more than one value from the underlying
RNG is required to generate an single draw.
* The number of bits required to generate a simulated value
differs from the number of bits generated by the underlying
RNG. For example, two 16-bit integer values can be simulated
from a single draw of a 32-bit RNG.
Advancing the RNG state resets any pre-computed random numbers.
This is required to ensure exact reproducibility.
"""
delta = wrap_int(delta, 256)
cdef np.ndarray delta_a
delta_a = int_to_array(delta, 'step', 256, 64)
philox_advance(<uint64_t *> delta_a.data, &self.rng_state)
self._reset_state_variables()
return self
.\numpy\numpy\random\_pickle.py
from .bit_generator import BitGenerator
from .mtrand import RandomState
from ._philox import Philox
from ._pcg64 import PCG64, PCG64DXSM
from ._sfc64 import SFC64
BitGenerators = {'MT19937': MT19937,
'PCG64': PCG64,
'PCG64DXSM': PCG64DXSM,
'Philox': Philox,
'SFC64': SFC64,
}
def __bit_generator_ctor(bit_generator: str | type[BitGenerator] = 'MT19937'):
"""
用于反序列化的辅助函数,返回一个随机数生成器对象
Parameters
----------
bit_generator : type[BitGenerator] or str
BitGenerator 类或包含 BitGenerator 名称的字符串
Returns
-------
BitGenerator
BitGenerator 的实例对象
"""
if isinstance(bit_generator, type):
bit_gen_class = bit_generator
elif bit_generator in BitGenerators:
bit_gen_class = BitGenerators[bit_generator]
else:
raise ValueError(
str(bit_generator) + ' is not a known BitGenerator module.'
)
return bit_gen_class()
def __generator_ctor(bit_generator_name="MT19937",
bit_generator_ctor=__bit_generator_ctor):
"""
用于反序列化的辅助函数,返回一个 Generator 对象
Parameters
----------
bit_generator_name : str or BitGenerator
包含核心 BitGenerator 名称的字符串或 BitGenerator 实例
bit_generator_ctor : callable, optional
接受 bit_generator_name 作为唯一参数并返回初始化的 bit generator 的可调用函数
Returns
-------
rg : Generator
使用指定核心 BitGenerator 的 Generator
"""
if isinstance(bit_generator_name, BitGenerator):
return Generator(bit_generator_name)
return Generator(bit_generator_ctor(bit_generator_name))
def __randomstate_ctor(bit_generator_name="MT19937",
bit_generator_ctor=__bit_generator_ctor):
"""
用于反序列化的辅助函数,返回一个类似于遗留 RandomState 的对象
Parameters
----------
bit_generator_name : str
包含核心 BitGenerator 名称的字符串
bit_generator_ctor : callable, optional
接受 bit_generator_name 作为唯一参数并返回初始化的 bit generator 的可调用函数
Returns
-------
rs : RandomState
使用指定核心 BitGenerator 的遗留 RandomState
"""
if isinstance(bit_generator_name, BitGenerator):
return RandomState(bit_generator_name)
return RandomState(bit_generator_ctor(bit_generator_name))
.\numpy\numpy\random\_sfc64.pyi
from typing import TypedDict
from numpy import uint64
from numpy.random.bit_generator import BitGenerator, SeedSequence
from numpy._typing import NDArray, _ArrayLikeInt_co
class _SFC64Internal(TypedDict):
state: NDArray[uint64]
class _SFC64State(TypedDict):
bit_generator: str
state: _SFC64Internal
has_uint32: int
uinteger: int
class SFC64(BitGenerator):
def __init__(self, seed: None | _ArrayLikeInt_co | SeedSequence = ...) -> None: ...
@property
def state(
self,
) -> _SFC64State: ...
@state.setter
def state(
self,
value: _SFC64State,
) -> None: ...
.\numpy\numpy\random\_sfc64.pyx
import numpy as np
cimport numpy as np
from libc.stdint cimport uint32_t, uint64_t
from ._common cimport uint64_to_double
from numpy.random cimport BitGenerator
__all__ = ['SFC64']
cdef extern from "src/sfc64/sfc64.h":
struct s_sfc64_state:
uint64_t s[4]
int has_uint32
uint32_t uinteger
ctypedef s_sfc64_state sfc64_state
uint64_t sfc64_next64(sfc64_state *state) nogil
uint32_t sfc64_next32(sfc64_state *state) nogil
void sfc64_set_seed(sfc64_state *state, uint64_t *seed)
void sfc64_get_state(sfc64_state *state, uint64_t *state_arr, int *has_uint32, uint32_t *uinteger)
void sfc64_set_state(sfc64_state *state, uint64_t *state_arr, int has_uint32, uint32_t uinteger)
cdef uint64_t sfc64_uint64(void* st) noexcept nogil:
return sfc64_next64(<sfc64_state *>st)
cdef uint32_t sfc64_uint32(void *st) noexcept nogil:
return sfc64_next32(<sfc64_state *> st)
cdef double sfc64_double(void* st) noexcept nogil:
return uint64_to_double(sfc64_next64(<sfc64_state *>st))
cdef class SFC64(BitGenerator):
"""
SFC64(seed=None)
BitGenerator for Chris Doty-Humphrey's Small Fast Chaotic PRNG.
Parameters
----------
seed : {None, int, array_like[ints], SeedSequence}, optional
A seed to initialize the `BitGenerator`. If None, then fresh,
unpredictable entropy will be pulled from the OS. If an ``int`` or
``array_like[ints]`` is passed, then it will be passed to
`SeedSequence` to derive the initial `BitGenerator` state. One may also
pass in a `SeedSequence` instance.
Notes
-----
`SFC64` is a 256-bit implementation of Chris Doty-Humphrey's Small Fast
Chaotic PRNG ([1]_). `SFC64` has a few different cycles that one might be
on, depending on the seed; the expected period will be about
:math:`2^{255}` ([2]_). `SFC64` incorporates a 64-bit counter which means
that the absolute minimum cycle length is :math:`2^{64}` and that distinct
seeds will not run into each other for at least :math:`2^{64}` iterations.
`SFC64` provides a capsule containing function pointers that produce
doubles, and unsigned 32 and 64- bit integers. These are not
directly consumable in Python and must be consumed by a `Generator`
or similar object that supports low-level access.
**State and Seeding**
The `SFC64` state vector consists of 4 unsigned 64-bit values. The last
is a 64-bit counter that increments by 1 each iteration.
The input seed is processed by `SeedSequence` to generate the first
3 values, then the `SFC64` algorithm is iterated a small number of times
to mix.
**Compatibility Guarantee**
`SFC64` makes a guarantee that a fixed seed will always produce the same
random integer stream.
References
----------
.. [1] `"PractRand"
<https://pracrand.sourceforge.net/RNG_engines.txt>`_
"""
pass
.. [2] `"Random Invertible Mapping Statistics"
<https://www.pcg-random.org/posts/random-invertible-mapping-statistics.html>`_
"""
# 定义 C 扩展类型变量 rng_state,用于存储 SFC64 状态
cdef sfc64_state rng_state
# 初始化函数,用于创建 SFC64 PRNG 对象
def __init__(self, seed=None):
# 调用 BitGenerator 的初始化函数,传递种子参数
BitGenerator.__init__(self, seed)
# 将 RNG 状态绑定到 _bitgen
self._bitgen.state = <void *>&self.rng_state
# 设置 _bitgen 的下一个随机数生成函数为 SFC64 的具体实现函数
self._bitgen.next_uint64 = &sfc64_uint64
self._bitgen.next_uint32 = &sfc64_uint32
self._bitgen.next_double = &sfc64_double
self._bitgen.next_raw = &sfc64_uint64
# 通过种子序列生成状态向量
val = self._seed_seq.generate_state(3, np.uint64)
# 使用生成的状态向量设置 SFC64 RNG 的初始状态
sfc64_set_seed(&self.rng_state, <uint64_t*>np.PyArray_DATA(val))
# 重置对象的状态变量
self._reset_state_variables()
# 重置对象状态变量的私有方法
cdef _reset_state_variables(self):
# 将状态变量标志为无效状态
self.rng_state.has_uint32 = 0
# 将整数缓存设置为 0
self.rng_state.uinteger = 0
# state 属性的 getter 方法,用于获取 PRNG 的状态
@property
def state(self):
"""
获取或设置 PRNG 的状态
Returns
-------
state : dict
包含描述 PRNG 状态所需信息的字典
"""
cdef np.ndarray state_vec
cdef int has_uint32
cdef uint32_t uinteger
# 创建一个包含四个 uint64 类型元素的数组作为状态向量
state_vec = <np.ndarray>np.empty(4, dtype=np.uint64)
# 获取当前 RNG 状态并存储在 state_vec 中
sfc64_get_state(&self.rng_state,
<uint64_t *>np.PyArray_DATA(state_vec),
&has_uint32, &uinteger)
# 返回包含 PRNG 状态信息的字典
return {'bit_generator': self.__class__.__name__,
'state': {'state': state_vec},
'has_uint32': has_uint32,
'uinteger': uinteger}
# state 属性的 setter 方法,用于设置 PRNG 的状态
@state.setter
def state(self, value):
cdef np.ndarray state_vec
cdef int has_uint32
cdef uint32_t uinteger
# 如果 value 不是字典类型,则抛出类型错误
if not isinstance(value, dict):
raise TypeError('state must be a dict')
# 获取字典中的 bit_generator 键值,检查是否与当前类名匹配
bitgen = value.get('bit_generator', '')
if bitgen != self.__class__.__name__:
raise ValueError('state must be for a {0} '
'RNG'.format(self.__class__.__name__))
# 创建一个包含四个 uint64 类型元素的数组 state_vec,并将状态向量值复制到其中
state_vec = <np.ndarray>np.empty(4, dtype=np.uint64)
state_vec[:] = value['state']['state']
# 获取 has_uint32 和 uinteger 值
has_uint32 = value['has_uint32']
uinteger = value['uinteger']
# 使用提供的状态设置 SFC64 RNG 的状态
sfc64_set_state(&self.rng_state,
<uint64_t *>np.PyArray_DATA(state_vec),
has_uint32, uinteger)
.\numpy\numpy\random\__init__.pxd
cimport numpy as np
from libc.stdint cimport uint32_t, uint64_t
cdef extern from "numpy/random/bitgen.h":
struct bitgen:
void *state
uint64_t (*next_uint64)(void *st) nogil
uint32_t (*next_uint32)(void *st) nogil
double (*next_double)(void *st) nogil
uint64_t (*next_raw)(void *st) nogil
ctypedef bitgen bitgen_t
from numpy.random.bit_generator cimport BitGenerator, SeedSequence
.\numpy\numpy\random\__init__.py
"""
========================
Random Number Generation
========================
Use ``default_rng()`` to create a `Generator` and call its methods.
=============== =========================================================
Generator
--------------- ---------------------------------------------------------
Generator 实现所有随机数分布的类
default_rng ``Generator`` 的默认构造函数
=============== =========================================================
============================================= ===
BitGenerator Streams that work with Generator
--------------------------------------------- ---
MT19937
PCG64
PCG64DXSM
Philox
SFC64
============================================= ===
============================================= ===
Getting entropy to initialize a BitGenerator
--------------------------------------------- ---
SeedSequence
============================================= ===
Legacy
------
For backwards compatibility with previous versions of numpy before 1.17, the
various aliases to the global `RandomState` methods are left alone and do not
use the new `Generator` API.
==================== =========================================================
Utility functions
-------------------- ---------------------------------------------------------
random ``[0, 1)`` 区间上均匀分布的浮点数
bytes 均匀分布的随机字节
permutation 随机排列序列 / 生成随机序列
shuffle 原地随机排列序列
choice 从一维数组中随机抽样
==================== =========================================================
==================== =========================================================
Compatibility
functions - removed
in the new API
-------------------- ---------------------------------------------------------
rand 均匀分布的随机值
randn 正态分布的随机值
ranf 均匀分布的浮点数
random_integers 给定范围内均匀分布的整数(已弃用,请使用 ``integers(..., closed=True)``)
random_sample `random_sample` 的别名
randint 给定范围内均匀分布的整数
seed 种子以初始化旧版本的随机数生成器
==================== =========================================================
==================== =========================================================
Univariate
distributions
-------------------- ---------------------------------------------------------
beta ``[0, 1]`` 区间上的 Beta 分布
binomial 二项分布
chisquare :math:`\\chi^2` 分布
exponential 指数分布
f F 分布(Fisher-Snedecor 分布)
gamma Gamma 分布
# geometric 几何分布。
# gumbel 甘贝尔分布。
# hypergeometric 超几何分布。
# laplace 拉普拉斯分布。
# logistic 逻辑斯蒂分布。
# lognormal 对数正态分布。
# logseries 对数级数分布。
# negative_binomial 负二项分布。
# noncentral_chisquare 非中心卡方分布。
# noncentral_f 非中心 F 分布。
# normal 正态分布。
# pareto 帕累托分布。
# poisson 泊松分布。
# power 幂分布。
# rayleigh 瑞利分布。
# triangular 三角分布。
# uniform 均匀分布。
# vonmises 冯·米塞斯分布(圆形分布)。
# wald 瓦尔德分布(反高斯分布)。
# weibull 威布尔分布。
# zipf 费希尔分布(用于排名数据的分布)。
==================== =========================================================
==================== ==========================================================
Multivariate
distributions
-------------------- ----------------------------------------------------------
# dirichlet 贝塔分布的多元化泛化。
# multinomial 二项分布的多元化泛化。
# multivariate_normal 正态分布的多元化泛化。
==================== ==========================================================
==================== =========================================================
Standard
distributions
-------------------- ---------------------------------------------------------
# standard_cauchy 标准柯西-洛伦兹分布。
# standard_exponential 标准指数分布。
# standard_gamma 标准伽马分布。
# standard_normal 标准正态分布。
# standard_t 标准学生 t 分布。
==================== =========================================================
==================== =========================================================
Internal functions
-------------------- ---------------------------------------------------------
# get_state 获取生成器内部状态的元组表示。
# set_state 设置生成器的状态。
==================== =========================================================
'randn', # 从标准正态分布中抽取随机样本
'random', # 生成一个0到1之间的随机浮点数
'random_integers', # 生成一个指定范围内的随机整数
'random_sample', # 生成一个指定范围内的随机浮点数
'ranf', # 生成一个0到1之间的随机浮点数
'rayleigh', # 从瑞利分布中抽取随机样本
'sample', # 从指定的序列中随机抽取指定长度的样本
'seed', # 初始化随机数生成器的种子
'set_state', # 设置随机数生成器的状态
'shuffle', # 将序列中的元素随机排序
'standard_cauchy', # 从标准柯西分布中抽取随机样本
'standard_exponential', # 从标准指数分布中抽取随机样本
'standard_gamma', # 从标准伽马分布中抽取随机样本
'standard_normal', # 从标准正态分布中抽取随机样本
'standard_t', # 从标准学生 t 分布中抽取随机样本
'triangular', # 从三角分布中抽取随机样本
'uniform', # 从均匀分布中抽取随机样本
'vonmises', # 从冯·米塞斯分布中抽取随机样本
'wald', # 从瓦尔德分布中抽取随机样本
'weibull', # 从威布尔分布中抽取随机样本
'zipf', # 从齐普夫分布中抽取随机样本
# 将以下模块导入用于模块冻结分析(例如 PyInstaller)
from . import _pickle
from . import _common
from . import _bounded_integers
# 导入 Generator 类和 default_rng 函数
from ._generator import Generator, default_rng
# 导入 SeedSequence 类和 BitGenerator 类
from .bit_generator import SeedSequence, BitGenerator
# 导入 MT19937 类
from ._mt19937 import MT19937
# 导入 PCG64 和 PCG64DXSM 类
from ._pcg64 import PCG64, PCG64DXSM
# 导入 Philox 类
from ._philox import Philox
# 导入 SFC64 类
from ._sfc64 import SFC64
# 导入 mtrand 中的所有内容
from .mtrand import *
# 将以下名称添加到 __all__ 列表中,以便在模块被 import * 时可以访问
__all__ += ['Generator', 'RandomState', 'SeedSequence', 'MT19937',
'Philox', 'PCG64', 'PCG64DXSM', 'SFC64', 'default_rng',
'BitGenerator']
def __RandomState_ctor():
"""Return a RandomState instance.
This function exists solely to assist (un)pickling.
Note that the state of the RandomState returned here is irrelevant, as this
function's entire purpose is to return a newly allocated RandomState whose
state pickle can set. Consequently the RandomState returned by this function
is a freshly allocated copy with a seed=0.
See https://github.com/numpy/numpy/issues/4763 for a detailed discussion
"""
# 返回一个具有种子为 0 的新分配 RandomState 实例
return RandomState(seed=0)
# 导入 PytestTester 类并将其命名为 test,用于单元测试
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
# 删除 PytestTester,使其不再在模块中可用
del PytestTester
.\numpy\numpy\random\__init__.pyi
from numpy._pytesttester import PytestTester
from numpy.random._generator import Generator as Generator
from numpy.random._generator import default_rng as default_rng
from numpy.random._mt19937 import MT19937 as MT19937
from numpy.random._pcg64 import (
PCG64 as PCG64,
PCG64DXSM as PCG64DXSM,
)
from numpy.random._philox import Philox as Philox
from numpy.random._sfc64 import SFC64 as SFC64
from numpy.random.bit_generator import BitGenerator as BitGenerator
from numpy.random.bit_generator import SeedSequence as SeedSequence
from numpy.random.mtrand import (
RandomState as RandomState,
beta as beta,
binomial as binomial,
bytes as bytes,
chisquare as chisquare,
choice as choice,
dirichlet as dirichlet,
exponential as exponential,
f as f,
gamma as gamma,
geometric as geometric,
get_bit_generator as get_bit_generator,
get_state as get_state,
gumbel as gumbel,
hypergeometric as hypergeometric,
laplace as laplace,
logistic as logistic,
lognormal as lognormal,
logseries as logseries,
multinomial as multinomial,
multivariate_normal as multivariate_normal,
negative_binomial as negative_binomial,
noncentral_chisquare as noncentral_chisquare,
noncentral_f as noncentral_f,
normal as normal,
pareto as pareto,
permutation as permutation,
poisson as poisson,
power as power,
rand as rand,
randint as randint,
randn as randn,
random as random,
random_integers as random_integers,
random_sample as random_sample,
ranf as ranf,
rayleigh as rayleigh,
sample as sample,
seed as seed,
set_bit_generator as set_bit_generator,
set_state as set_state,
shuffle as shuffle,
standard_cauchy as standard_cauchy,
standard_exponential as standard_exponential,
standard_gamma as standard_gamma,
standard_normal as standard_normal,
standard_t as standard_t,
triangular as triangular,
uniform as uniform,
vonmises as vonmises,
wald as wald,
weibull as weibull,
zipf as zipf,
)
__all__: list[str]
test: PytestTester
.\numpy\numpy\rec\__init__.py
from numpy._core.records import __all__, __doc__
from numpy._core.records import *
.\numpy\numpy\rec\__init__.pyi
from numpy._core.records import (
record as record,
recarray as recarray,
format_parser as format_parser,
fromarrays as fromarrays,
fromrecords as fromrecords,
fromstring as fromstring,
fromfile as fromfile,
array as array
)
__all__: list[str]
__path__: list[str]
.\numpy\numpy\strings\__init__.py
from numpy._core.strings import __all__, __doc__
from numpy._core.strings import *
.\numpy\numpy\strings\__init__.pyi
from numpy._core.strings import (
equal as equal,
not_equal as not_equal,
greater_equal as greater_equal,
less_equal as less_equal,
greater as greater,
less as less,
add as add,
multiply as multiply,
mod as mod,
isalpha as isalpha,
isalnum as isalnum,
isdigit as isdigit,
isspace as isspace,
isnumeric as isnumeric,
isdecimal as isdecimal,
islower as islower,
isupper as isupper,
istitle as istitle,
str_len as str_len,
find as find,
rfind as rfind,
index as index,
rindex as rindex,
count as count,
startswith as startswith,
endswith as endswith,
decode as decode,
encode as encode,
expandtabs as expandtabs,
center as center,
ljust as ljust,
rjust as rjust,
lstrip as lstrip,
rstrip as rstrip,
strip as strip,
zfill as zfill,
upper as upper,
lower as lower,
swapcase as swapcase,
capitalize as capitalize,
title as title,
replace as replace,
join as join,
split as split,
rsplit as rsplit,
splitlines as splitlines,
partition as partition,
rpartition as rpartition,
translate as translate
)
__all__: list[str]
.\numpy\numpy\testing\overrides.py
from numpy._core.overrides import ARRAY_FUNCTIONS as _array_functions
from numpy import ufunc as _ufunc
import numpy._core.umath as _umath
def get_overridable_numpy_ufuncs():
"""列出所有可以通过 `__array_ufunc__` 被重写的 numpy ufunc 函数
Parameters
----------
None
Returns
-------
set
包含所有可以在公共 numpy API 中通过 `__array_ufunc__` 被重写的 ufunc 集合。
"""
ufuncs = {obj for obj in _umath.__dict__.values()
if isinstance(obj, _ufunc)}
return ufuncs
def allows_array_ufunc_override(func):
"""确定一个函数是否可以通过 `__array_ufunc__` 被重写
Parameters
----------
func : callable
可能可以通过 `__array_ufunc__` 被重写的函数
Returns
-------
bool
如果 `func` 可以通过 `__array_ufunc__` 被重写则返回 `True`,否则返回 `False`。
Notes
-----
该函数等价于 ``isinstance(func, np.ufunc)`` 并且对于在 Numpy 之外定义的 ufuncs 也能正确工作。
"""
return isinstance(func, np.ufunc)
def get_overridable_numpy_array_functions():
"""列出所有可以通过 `__array_function__` 被重写的 numpy 函数
Parameters
----------
None
Returns
-------
set
包含所有可以在公共 numpy API 中通过 `__array_function__` 被重写的函数集合。
"""
from numpy.lib import recfunctions
return _array_functions.copy()
def allows_array_function_override(func):
"""确定一个 Numpy 函数是否可以通过 `__array_function__` 被重写
Parameters
----------
func : callable
可能可以通过 `__array_function__` 被重写的函数
Returns
-------
bool
如果 `func` 是可以在 Numpy API 中通过 `__array_function__` 被重写的函数则返回 `True`,否则返回 `False`。
"""
return func in _array_functions
.\numpy\numpy\testing\print_coercion_tables.py
"""Prints type-coercion tables for the built-in NumPy types
"""
import numpy as np
from numpy._core.numerictypes import obj2sctype
from collections import namedtuple
class GenericObject:
def __init__(self, v):
self.v = v
def __add__(self, other):
return self
def __radd__(self, other):
return self
dtype = np.dtype('O')
def print_cancast_table(ntypes):
print('X', end=' ')
for char in ntypes:
print(char, end=' ')
print()
for row in ntypes:
print(row, end=' ')
for col in ntypes:
if np.can_cast(row, col, "equiv"):
cast = "#"
elif np.can_cast(row, col, "safe"):
cast = "="
elif np.can_cast(row, col, "same_kind"):
cast = "~"
elif np.can_cast(row, col, "unsafe"):
cast = "."
else:
cast = " "
print(cast, end=' ')
print()
def print_coercion_table(ntypes, inputfirstvalue, inputsecondvalue, firstarray, use_promote_types=False):
print('+', end=' ')
for char in ntypes:
print(char, end=' ')
print()
for row in ntypes:
if row == 'O':
rowtype = GenericObject
else:
rowtype = obj2sctype(row)
print(row, end=' ')
for col in ntypes:
if col == 'O':
coltype = GenericObject
else:
coltype = obj2sctype(col)
try:
if firstarray:
rowvalue = np.array([rowtype(inputfirstvalue)], dtype=rowtype)
else:
rowvalue = rowtype(inputfirstvalue)
colvalue = coltype(inputsecondvalue)
if use_promote_types:
char = np.promote_types(rowvalue.dtype, colvalue.dtype).char
else:
value = np.add(rowvalue, colvalue)
if isinstance(value, np.ndarray):
char = value.dtype.char
else:
char = np.dtype(type(value)).char
except ValueError:
char = '!'
except OverflowError:
char = '@'
except TypeError:
char = '#'
print(char, end=' ')
print()
def print_new_cast_table(*, can_cast=True, legacy=False, flags=False):
"""Prints new casts, the values given are default "can-cast" values, not
actual ones.
"""
from numpy._core._multiarray_tests import get_all_cast_information
cast_table = {
-1: " ",
0: "#",
1: "#",
2: "=",
3: "~",
4: ".",
}
flags_table = {
0 : "▗", 7: "█",
1: "▚", 2: "▐", 4: "▄",
3: "▜", 5: "▙",
6: "▟",
}
cast_info = namedtuple("cast_info", ["can_cast", "legacy", "flags"])
no_cast_info = cast_info(" ", " ", " ")
casts = get_all_cast_information()
table = {}
dtypes = set()
for cast in casts:
dtypes.add(cast["from"])
dtypes.add(cast["to"])
if cast["from"] not in table:
table[cast["from"]] = {}
to_dict = table[cast["from"]]
can_cast = cast_table[cast["casting"]]
legacy = "L" if cast["legacy"] else "."
flags = 0
if cast["requires_pyapi"]:
flags |= 1
if cast["supports_unaligned"]:
flags |= 2
if cast["no_floatingpoint_errors"]:
flags |= 4
flags = flags_table[flags]
to_dict[cast["to"]] = cast_info(can_cast=can_cast, legacy=legacy, flags=flags)
types = np.typecodes["All"]
def sorter(x):
dtype = np.dtype(x.type)
try:
indx = types.index(dtype.char)
except ValueError:
indx = np.inf
return (indx, dtype.char)
dtypes = sorted(dtypes, key=sorter)
def print_table(field="can_cast"):
print('X', end=' ')
for dt in dtypes:
print(np.dtype(dt.type).char, end=' ')
print()
for from_dt in dtypes:
print(np.dtype(from_dt.type).char, end=' ')
row = table.get(from_dt, {})
for to_dt in dtypes:
print(getattr(row.get(to_dt, no_cast_info), field), end=' ')
print()
if can_cast:
print()
print("Casting: # is equivalent, = is safe, ~ is same-kind, and . is unsafe")
print()
print_table("can_cast")
if legacy:
print()
print("L denotes a legacy cast . a non-legacy one.")
print()
print_table("legacy")
if flags:
print()
print(f"{flags_table[0]}: no flags, {flags_table[1]}: PyAPI, "
f"{flags_table[2]}: supports unaligned, {flags_table[4]}: no-float-errors")
print()
print_table("flags")
if __name__ == '__main__':
print("can cast")
print_cancast_table(np.typecodes['All'])
print()
print("In these tables, ValueError is '!', OverflowError is '@', TypeError is '#'")
print()
print("scalar + scalar")
print_coercion_table(np.typecodes['All'], 0, 0, False)
print()
print("scalar + neg scalar")
print_coercion_table(np.typecodes['All'], 0, -1, False)
print()
print("array + scalar")
print_coercion_table(np.typecodes['All'], 0, 0, True)
print()
print("array + neg scalar")
print_coercion_table(np.typecodes['All'], 0, -1, True)
print()
print("promote_types")
print_coercion_table(np.typecodes['All'], 0, 0, False, True)
print("New casting type promotion:")
print_new_cast_table(can_cast=True, legacy=True, flags=True)
.\numpy\numpy\testing\tests\test_utils.py
import warnings
import sys
import os
import itertools
import pytest
import weakref
import re
import numpy as np
import numpy._core._multiarray_umath as ncu
from numpy.testing import (
assert_equal, assert_array_equal, assert_almost_equal,
assert_array_almost_equal, assert_array_less, build_err_msg,
assert_raises, assert_warns, assert_no_warnings, assert_allclose,
assert_approx_equal, assert_array_almost_equal_nulp, assert_array_max_ulp,
clear_and_catch_warnings, suppress_warnings, assert_string_equal, assert_,
tempdir, temppath, assert_no_gc_cycles, HAS_REFCOUNT
)
class _GenericTest:
def _test_equal(self, a, b):
self._assert_func(a, b)
def _test_not_equal(self, a, b):
with assert_raises(AssertionError):
self._assert_func(a, b)
def test_array_rank1_eq(self):
"""Test two equal array of rank 1 are found equal."""
a = np.array([1, 2])
b = np.array([1, 2])
self._test_equal(a, b)
def test_array_rank1_noteq(self):
"""Test two different array of rank 1 are found not equal."""
a = np.array([1, 2])
b = np.array([2, 2])
self._test_not_equal(a, b)
def test_array_rank2_eq(self):
"""Test two equal array of rank 2 are found equal."""
a = np.array([[1, 2], [3, 4]])
b = np.array([[1, 2], [3, 4]])
self._test_equal(a, b)
def test_array_diffshape(self):
"""Test two arrays with different shapes are found not equal."""
a = np.array([1, 2])
b = np.array([[1, 2], [1, 2]])
self._test_not_equal(a, b)
def test_objarray(self):
"""Test object arrays."""
a = np.array([1, 1], dtype=object)
self._test_equal(a, 1)
def test_array_likes(self):
self._test_equal([1, 2, 3], (1, 2, 3))
class TestArrayEqual(_GenericTest):
def setup_method(self):
self._assert_func = assert_array_equal
def test_generic_rank1(self):
"""Test rank 1 array for all dtypes."""
def foo(t):
a = np.empty(2, t)
a.fill(1)
b = a.copy()
c = a.copy()
c.fill(0)
self._test_equal(a, b)
self._test_not_equal(c, b)
for t in '?bhilqpBHILQPfdgFDG':
foo(t)
for t in ['S1', 'U1']:
foo(t)
def test_0_ndim_array(self):
x = np.array(473963742225900817127911193656584771)
y = np.array(18535119325151578301457182298393896)
with pytest.raises(AssertionError) as exc_info:
self._assert_func(x, y)
msg = str(exc_info.value)
assert_('Mismatched elements: 1 / 1 (100%)\n'
in msg)
y = x
self._assert_func(x, y)
x = np.array(4395065348745.5643764887869876)
y = np.array(0)
expected_msg = ('Mismatched elements: 1 / 1 (100%)\n'
'Max absolute difference among violations: '
'4.39506535e+12\n'
'Max relative difference among violations: inf\n')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._assert_func(x, y)
x = y
self._assert_func(x, y)
def test_generic_rank3(self):
"""Test rank 3 array for all dtypes."""
def foo(t):
a = np.empty((4, 2, 3), t)
a.fill(1)
b = a.copy()
c = a.copy()
c.fill(0)
self._test_equal(a, b)
for t in '?bhilqpBHILQPfdgFDG':
foo(t)
for t in ['S1', 'U1']:
foo(t)
def test_nan_array(self):
"""Test arrays with nan values in them."""
a = np.array([1, 2, np.nan])
b = np.array([1, 2, np.nan])
self._test_equal(a, b)
c = np.array([1, 2, 3])
self._test_not_equal(c, b)
def test_string_arrays(self):
"""Test two arrays with different shapes are found not equal."""
a = np.array(['floupi', 'floupa'])
b = np.array(['floupi', 'floupa'])
c = np.array(['floupipi', 'floupa'])
def test_recarrays(self):
"""Test record arrays."""
a = np.empty(2, [('floupi', float), ('floupa', float)])
a['floupi'] = [1, 2]
a['floupa'] = [1, 2]
b = a.copy()
c = np.empty(2, [('floupipi', float),
('floupi', float), ('floupa', float)])
c['floupipi'] = a['floupi'].copy()
c['floupa'] = a['floupa'].copy()
with pytest.raises(TypeError):
def test_masked_nan_inf(self):
a = np.ma.MaskedArray([3., 4., 6.5], mask=[False, True, False])
b = np.array([3., np.nan, 6.5])
self._test_equal(a, b)
self._test_equal(b, a)
a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, False, False])
b = np.array([np.inf, 4., 6.5])
self._test_equal(a, b)
self._test_equal(b, a)
def test_subclass_that_overrides_eq(self):
class MyArray(np.ndarray):
def __eq__(self, other):
return bool(np.equal(self, other).all())
def __ne__(self, other):
return not self == other
a = np.array([1., 2.]).view(MyArray)
b = np.array([2., 3.]).view(MyArray)
assert_(type(a == a), bool)
assert_(a == a)
assert_(a != b)
self._test_equal(a, a)
self._test_not_equal(a, b)
self._test_not_equal(b, a)
expected_msg = ('Mismatched elements: 1 / 2 (50%)\n'
'Max absolute difference among violations: 1.\n'
'Max relative difference among violations: 0.5')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._test_equal(a, b)
c = np.array([0., 2.9]).view(MyArray)
expected_msg = ('Mismatched elements: 1 / 2 (50%)\n'
'Max absolute difference among violations: 2.\n'
'Max relative difference among violations: inf')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._test_equal(b, c)
def test_subclass_that_does_not_implement_npall(self):
class MyArray(np.ndarray):
def __array_function__(self, *args, **kwargs):
return NotImplemented
a = np.array([1., 2.]).view(MyArray)
b = np.array([2., 3.]).view(MyArray)
with assert_raises(TypeError):
np.all(a)
self._test_equal(a, a)
self._test_not_equal(a, b)
self._test_not_equal(b, a)
def test_suppress_overflow_warnings(self):
with pytest.raises(AssertionError):
with np.errstate(all="raise"):
np.testing.assert_array_equal(
np.array([1, 2, 3], np.float32),
np.array([1, 1e-40, 3], np.float32))
def test_array_vs_scalar_is_equal(self):
"""测试当所有值相等时,比较数组和标量的相等性。"""
a = np.array([1., 1., 1.])
b = 1.
self._test_equal(a, b)
def test_array_vs_array_not_equal(self):
"""Test comparing an array with a scalar when not all values equal."""
a = np.array([34986, 545676, 439655, 563766])
b = np.array([34986, 545676, 439655, 0])
expected_msg = ('Mismatched elements: 1 / 4 (25%)\n'
'Max absolute difference among violations: 563766\n'
'Max relative difference among violations: inf')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._assert_func(a, b)
a = np.array([34986, 545676, 439655.2, 563766])
expected_msg = ('Mismatched elements: 2 / 4 (50%)\n'
'Max absolute difference among violations: '
'563766.\n'
'Max relative difference among violations: '
'4.54902139e-07')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._assert_func(a, b)
def test_array_vs_scalar_strict(self):
"""Test comparing an array with a scalar with strict option."""
a = np.array([1., 1., 1.])
b = 1.
with pytest.raises(AssertionError):
self._assert_func(a, b, strict=True)
def test_array_vs_array_strict(self):
"""Test comparing two arrays with strict option."""
a = np.array([1., 1., 1.])
b = np.array([1., 1., 1.])
self._assert_func(a, b, strict=True)
def test_array_vs_float_array_strict(self):
"""Test comparing two arrays with strict option."""
a = np.array([1, 1, 1])
b = np.array([1., 1., 1.])
with pytest.raises(AssertionError):
self._assert_func(a, b, strict=True)
class TestBuildErrorMessage:
def test_build_err_msg_defaults(self):
x = np.array([1.00001, 2.00002, 3.00003])
y = np.array([1.00002, 2.00003, 3.00004])
err_msg = 'There is a mismatch'
a = build_err_msg([x, y], err_msg)
b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array(['
'1.00001, 2.00002, 3.00003])\n DESIRED: array([1.00002, '
'2.00003, 3.00004])')
assert_equal(a, b)
def test_build_err_msg_no_verbose(self):
x = np.array([1.00001, 2.00002, 3.00003])
y = np.array([1.00002, 2.00003, 3.00004])
err_msg = 'There is a mismatch'
a = build_err_msg([x, y], err_msg, verbose=False)
b = '\nItems are not equal: There is a mismatch'
assert_equal(a, b)
def test_build_err_msg_custom_names(self):
x = np.array([1.00001, 2.00002, 3.00003])
y = np.array([1.00002, 2.00003, 3.00004])
err_msg = 'There is a mismatch'
a = build_err_msg([x, y], err_msg, names=('FOO', 'BAR'))
b = ('\nItems are not equal: There is a mismatch\n FOO: array(['
'1.00001, 2.00002, 3.00003])\n BAR: array([1.00002, 2.00003, '
'3.00004])')
assert_equal(a, b)
def test_build_err_msg_custom_precision(self):
x = np.array([1.000000001, 2.00002, 3.00003])
y = np.array([1.000000002, 2.00003, 3.00004])
err_msg = 'There is a mismatch'
a = build_err_msg([x, y], err_msg, precision=10)
b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array(['
'1.000000001, 2.00002 , 3.00003 ])\n DESIRED: array(['
'1.000000002, 2.00003 , 3.00004 ])')
assert_equal(a, b)
class TestEqual(TestArrayEqual):
def setup_method(self):
self._assert_func = assert_equal
def test_nan_items(self):
self._assert_func(np.nan, np.nan)
self._assert_func([np.nan], [np.nan])
self._test_not_equal(np.nan, [np.nan])
self._test_not_equal(np.nan, 1)
def test_inf_items(self):
self._assert_func(np.inf, np.inf)
self._assert_func([np.inf], [np.inf])
self._test_not_equal(np.inf, [np.inf])
def test_datetime(self):
self._test_equal(
np.datetime64("2017-01-01", "s"),
np.datetime64("2017-01-01", "s")
)
self._test_equal(
np.datetime64("2017-01-01", "s"),
np.datetime64("2017-01-01", "m")
)
self._test_not_equal(
np.datetime64("2017-01-01", "s"),
np.datetime64("2017-01-02", "s")
)
self._test_not_equal(
np.datetime64("2017-01-01", "s"),
np.datetime64("2017-01-02", "m")
)
def test_nat_items(self):
nadt_no_unit = np.datetime64("NaT")
nadt_s = np.datetime64("NaT", "s")
nadt_d = np.datetime64("NaT", "ns")
natd_no_unit = np.timedelta64("NaT")
natd_s = np.timedelta64("NaT", "s")
natd_d = np.timedelta64("NaT", "ns")
dts = [nadt_no_unit, nadt_s, nadt_d]
tds = [natd_no_unit, natd_s, natd_d]
for a, b in itertools.product(dts, dts):
self._assert_func(a, b)
self._assert_func([a], [b])
self._test_not_equal([a], b)
for a, b in itertools.product(tds, tds):
self._assert_func(a, b)
self._assert_func([a], [b])
self._test_not_equal([a], b)
for a, b in itertools.product(tds, dts):
self._test_not_equal(a, b)
self._test_not_equal(a, [b])
self._test_not_equal([a], [b])
self._test_not_equal([a], np.datetime64("2017-01-01", "s"))
self._test_not_equal([b], np.datetime64("2017-01-01", "s"))
self._test_not_equal([a], np.timedelta64(123, "s"))
self._test_not_equal([b], np.timedelta64(123, "s"))
def test_non_numeric(self):
self._assert_func('ab', 'ab')
self._test_not_equal('ab', 'abb')
def test_complex_item(self):
self._assert_func(complex(1, 2), complex(1, 2))
self._assert_func(complex(1, np.nan), complex(1, np.nan))
self._test_not_equal(complex(1, np.nan), complex(1, 2))
self._test_not_equal(complex(np.nan, 1), complex(1, np.nan))
self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2))
def test_negative_zero(self):
self._test_not_equal(ncu.PZERO, ncu.NZERO)
def test_complex(self):
x = np.array([complex(1, 2), complex(1, np.nan)])
y = np.array([complex(1, 2), complex(1, 2)])
self._assert_func(x, x)
self._test_not_equal(x, y)
def test_object(self):
import datetime
a = np.array([datetime.datetime(2000, 1, 1),
datetime.datetime(2000, 1, 2)])
self._test_not_equal(a, a[::-1])
class TestArrayAlmostEqual(_GenericTest):
def setup_method(self):
self._assert_func = assert_array_almost_equal
def test_closeness(self):
expected_msg = ('Mismatched elements: 1 / 1 (100%)\n'
'Max absolute difference among violations: 1.5\n'
'Max relative difference among violations: inf')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._assert_func(1.5, 0.0, decimal=0)
self._assert_func([1.499999], [0.0], decimal=0)
expected_msg = ('Mismatched elements: 1 / 1 (100%)\n'
'Max absolute difference among violations: 1.5\n'
'Max relative difference among violations: inf')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._assert_func([1.5], [0.0], decimal=0)
a = [1.4999999, 0.00003]
b = [1.49999991, 0]
expected_msg = ('Mismatched elements: 1 / 2 (50%)\n'
'Max absolute difference among violations: 3.e-05\n'
'Max relative difference among violations: inf')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._assert_func(a, b, decimal=7)
expected_msg = ('Mismatched elements: 1 / 2 (50%)\n'
'Max absolute difference among violations: 3.e-05\n'
'Max relative difference among violations: 1.')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._assert_func(b, a, decimal=7)
def test_simple(self):
x = np.array([1234.2222])
y = np.array([1234.2223])
self._assert_func(x, y, decimal=3)
self._assert_func(x, y, decimal=4)
expected_msg = ('Mismatched elements: 1 / 1 (100%)\n'
'Max absolute difference among violations: '
'1.e-04\n'
'Max relative difference among violations: '
'8.10226812e-08')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._assert_func(x, y, decimal=5)
def test_array_vs_scalar(self):
a = [5498.42354, 849.54345, 0.00]
b = 5498.42354
expected_msg = ('Mismatched elements: 2 / 3 (66.7%)\n'
'Max absolute difference among violations: '
'5498.42354\n'
'Max relative difference among violations: 1.')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._assert_func(a, b, decimal=9)
expected_msg = ('Mismatched elements: 2 / 3 (66.7%)\n'
'Max absolute difference among violations: '
'5498.42354\n'
'Max relative difference among violations: 5.4722099')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._assert_func(b, a, decimal=9)
a = [5498.42354, 0.00]
expected_msg = ('Mismatched elements: 1 / 2 (50%)\n'
'Max absolute difference among violations: '
'5498.42354\n'
'Max relative difference among violations: inf')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._assert_func(b, a, decimal=7)
b = 0
expected_msg = ('Mismatched elements: 1 / 2 (50%)\n'
'Max absolute difference among violations: '
'5498.42354\n'
'Max relative difference among violations: inf')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._assert_func(a, b, decimal=7)
def test_nan(self):
anan = np.array([np.nan])
aone = np.array([1])
self._assert_func(anan, anan)
assert_raises(AssertionError,
lambda: self._assert_func(anan, aone))
assert_raises(AssertionError,
lambda: self._assert_func(anan, ainf))
assert_raises(AssertionError,
lambda: self._assert_func(ainf, anan))
def test_inf(self):
a = np.array([[1., 2.], [3., 4.]])
b = a.copy()
a[0, 0] = np.inf
assert_raises(AssertionError,
lambda: self._assert_func(a, b))
b[0, 0] = -np.inf
assert_raises(AssertionError,
lambda: self._assert_func(a, b))
def test_subclass(self):
a = np.array([[1., 2.], [3., 4.]])
b = np.ma.masked_array([[1., 2.], [0., 4.]],
[[False, False], [True, False]])
self._assert_func(a, b)
self._assert_func(b, a)
self._assert_func(b, b)
a = np.ma.MaskedArray(3.5, mask=True)
b = np.array([3., 4., 6.5])
self._test_equal(a, b)
self._test_equal(b, a)
a = np.ma.masked
b = np.array([3., 4., 6.5])
self._test_equal(a, b)
self._test_equal(b, a)
a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True])
b = np.array([1., 2., 3.])
self._test_equal(a, b)
self._test_equal(b, a)
a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True])
b = np.array(1.)
self._test_equal(a, b)
self._test_equal(b, a)
def test_subclass_2(self):
class MyArray(np.ndarray):
def __eq__(self, other):
return super().__eq__(other).view(np.ndarray)
def __lt__(self, other):
return super().__lt__(other).view(np.ndarray)
def all(self, *args, **kwargs):
return all(self)
a = np.array([1., 2.]).view(MyArray)
self._assert_func(a, a)
z = np.array([True, True]).view(MyArray)
all(z)
b = np.array([1., 202]).view(MyArray)
expected_msg = ('Mismatched elements: 1 / 2 (50%)\n'
'Max absolute difference among violations: 200.\n'
'Max relative difference among violations: 0.99009')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._assert_func(a, b)
def test_subclass_that_cannot_be_bool(self):
class MyArray(np.ndarray):
def __eq__(self, other):
return super().__eq__(other).view(np.ndarray)
def __lt__(self, other):
return super().__lt__(other).view(np.ndarray)
def all(self, *args, **kwargs):
raise NotImplementedError
a = np.array([1., 2.]).view(MyArray)
self._assert_func(a, a)
class TestAlmostEqual(_GenericTest):
def setup_method(self):
self._assert_func = assert_almost_equal
def test_closeness(self):
self._assert_func(1.499999, 0.0, decimal=0)
assert_raises(AssertionError,
lambda: self._assert_func(1.5, 0.0, decimal=0))
self._assert_func([1.499999], [0.0], decimal=0)
assert_raises(AssertionError,
lambda: self._assert_func([1.5], [0.0], decimal=0))
def test_nan_item(self):
self._assert_func(np.nan, np.nan)
assert_raises(AssertionError,
lambda: self._assert_func(np.nan, 1))
assert_raises(AssertionError,
lambda: self._assert_func(np.nan, np.inf))
assert_raises(AssertionError,
lambda: self._assert_func(np.inf, np.nan))
def test_inf_item(self):
self._assert_func(np.inf, np.inf)
self._assert_func(-np.inf, -np.inf)
assert_raises(AssertionError,
lambda: self._assert_func(np.inf, 1))
assert_raises(AssertionError,
lambda: self._assert_func(-np.inf, np.inf))
def test_simple_item(self):
self._test_not_equal(1, 2)
def test_complex_item(self):
self._assert_func(complex(1, 2), complex(1, 2))
self._assert_func(complex(1, np.nan), complex(1, np.nan))
self._assert_func(complex(np.inf, np.nan), complex(np.inf, np.nan))
self._test_not_equal(complex(1, np.nan), complex(1, 2))
self._test_not_equal(complex(np.nan, 1), complex(1, np.nan))
self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2))
def test_complex(self):
x = np.array([complex(1, 2), complex(1, np.nan)])
z = np.array([complex(1, 2), complex(np.nan, 1)])
y = np.array([complex(1, 2), complex(1, 2)])
self._assert_func(x, x)
self._test_not_equal(x, y)
self._test_not_equal(x, z)
def test_error_message(self):
"""Check the message is formatted correctly for the decimal value.
Also check the message when input includes inf or nan (gh12200)"""
x = np.array([1.00000000001, 2.00000000002, 3.00003])
y = np.array([1.00000000002, 2.00000000003, 3.00004])
expected_msg = ('Mismatched elements: 3 / 3 (100%)\n'
'Max absolute difference among violations: 1.e-05\n'
'Max relative difference among violations: '
'3.33328889e-06\n'
' ACTUAL: array([1.00000000001, '
'2.00000000002, '
'3.00003 ])\n'
' DESIRED: array([1.00000000002, 2.00000000003, '
'3.00004 ])')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._assert_func(x, y, decimal=12)
expected_msg = ('Mismatched elements: 1 / 3 (33.3%)\n'
'Max absolute difference among violations: 1.e-05\n'
'Max relative difference among violations: '
'3.33328889e-06\n'
' ACTUAL: array([1. , 2. , 3.00003])\n'
' DESIRED: array([1. , 2. , 3.00004])')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._assert_func(x, y)
x = np.array([np.inf, 0])
y = np.array([np.inf, 1])
expected_msg = ('Mismatched elements: 1 / 2 (50%)\n'
'Max absolute difference among violations: 1.\n'
'Max relative difference among violations: 1.\n'
' ACTUAL: array([inf, 0.])\n'
' DESIRED: array([inf, 1.])')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._assert_func(x, y)
x = np.array([1, 2])
y = np.array([0, 0])
expected_msg = ('Mismatched elements: 2 / 2 (100%)\n'
'Max absolute difference among violations: 2\n'
'Max relative difference among violations: inf')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._assert_func(x, y)
def test_error_message_2(self):
"""检查消息格式是否正确"""
"""当 x 或 y 中至少有一个是标量时。"""
x = 2
y = np.ones(20)
expected_msg = ('Mismatched elements: 20 / 20 (100%)\n'
'Max absolute difference among violations: 1.\n'
'Max relative difference among violations: 1.')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._assert_func(x, y)
y = 2
x = np.ones(20)
expected_msg = ('Mismatched elements: 20 / 20 (100%)\n'
'Max absolute difference among violations: 1.\n'
'Max relative difference among violations: 0.5')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._assert_func(x, y)
def test_subclass_that_cannot_be_bool(self):
class MyArray(np.ndarray):
def __eq__(self, other):
return super().__eq__(other).view(np.ndarray)
def __lt__(self, other):
return super().__lt__(other).view(np.ndarray)
def all(self, *args, **kwargs):
raise NotImplementedError
a = np.array([1., 2.]).view(MyArray)
self._assert_func(a, a)
class TestApproxEqual:
def setup_method(self):
self._assert_func = assert_approx_equal
def test_simple_0d_arrays(self):
x = np.array(1234.22)
y = np.array(1234.23)
self._assert_func(x, y, significant=5)
self._assert_func(x, y, significant=6)
assert_raises(AssertionError,
lambda: self._assert_func(x, y, significant=7))
def test_simple_items(self):
x = 1234.22
y = 1234.23
self._assert_func(x, y, significant=4)
self._assert_func(x, y, significant=5)
self._assert_func(x, y, significant=6)
assert_raises(AssertionError,
lambda: self._assert_func(x, y, significant=7))
def test_nan_array(self):
anan = np.array(np.nan)
aone = np.array(1)
ainf = np.array(np.inf)
self._assert_func(anan, anan)
assert_raises(AssertionError, lambda: self._assert_func(anan, aone))
assert_raises(AssertionError, lambda: self._assert_func(anan, ainf))
assert_raises(AssertionError, lambda: self._assert_func(ainf, anan))
def test_nan_items(self):
anan = np.array(np.nan)
aone = np.array(1)
ainf = np.array(np.inf)
self._assert_func(anan, anan)
assert_raises(AssertionError, lambda: self._assert_func(anan, aone))
assert_raises(AssertionError, lambda: self._assert_func(anan, ainf))
assert_raises(AssertionError, lambda: self._assert_func(ainf, anan))
class TestArrayAssertLess:
def setup_method(self):
self._assert_func = assert_array_less
def test_simple_arrays(self):
x = np.array([1.1, 2.2])
y = np.array([1.2, 2.3])
self._assert_func(x, y)
assert_raises(AssertionError, lambda: self._assert_func(y, x))
y = np.array([1.0, 2.3])
assert_raises(AssertionError, lambda: self._assert_func(x, y))
assert_raises(AssertionError, lambda: self._assert_func(y, x))
a = np.array([1, 3, 6, 20])
b = np.array([2, 4, 6, 8])
expected_msg = ('Mismatched elements: 2 / 4 (50%)\n'
'Max absolute difference among violations: 12\n'
'Max relative difference among violations: 1.5')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._assert_func(a, b)
def test_rank2(self):
x = np.array([[1.1, 2.2], [3.3, 4.4]])
y = np.array([[1.2, 2.3], [3.4, 4.5]])
self._assert_func(x, y)
expected_msg = ('Mismatched elements: 4 / 4 (100%)\n'
'Max absolute difference among violations: 0.1\n'
'Max relative difference among violations: 0.09090909')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._assert_func(y, x)
y = np.array([[1.0, 2.3], [3.4, 4.5]])
assert_raises(AssertionError, lambda: self._assert_func(x, y))
assert_raises(AssertionError, lambda: self._assert_func(y, x))
def test_rank3(self):
x = np.ones(shape=(2, 2, 2))
y = np.ones(shape=(2, 2, 2)) + 1
self._assert_func(x, y)
assert_raises(AssertionError, lambda: self._assert_func(y, x))
y[0, 0, 0] = 0
expected_msg = ('Mismatched elements: 1 / 8 (12.5%)\n'
'Max absolute difference among violations: 1.\n'
'Max relative difference among violations: inf')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._assert_func(x, y)
assert_raises(AssertionError, lambda: self._assert_func(y, x))
def test_simple_items(self):
x = 1.1
y = 2.2
self._assert_func(x, y)
expected_msg = ('Mismatched elements: 1 / 1 (100%)\n'
'Max absolute difference among violations: 1.1\n'
'Max relative difference among violations: 1.')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._assert_func(y, x)
y = np.array([2.2, 3.3])
self._assert_func(x, y)
assert_raises(AssertionError, lambda: self._assert_func(y, x))
y = np.array([1.0, 3.3])
assert_raises(AssertionError, lambda: self._assert_func(x, y))
def test_simple_items_and_array(self):
x = np.array([[621.345454, 390.5436, 43.54657, 626.4535],
[54.54, 627.3399, 13., 405.5435],
[543.545, 8.34, 91.543, 333.3]])
y = 627.34
self._assert_func(x, y)
y = 8.339999
self._assert_func(y, x)
x = np.array([[3.4536, 2390.5436, 435.54657, 324525.4535],
[5449.54, 999090.54, 130303.54, 405.5435],
[543.545, 8.34, 91.543, 999090.53999]])
y = 999090.54
expected_msg = ('Mismatched elements: 1 / 12 (8.33%)\n'
'Max absolute difference among violations: 0.\n'
'Max relative difference among violations: 0.')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._assert_func(x, y)
expected_msg = ('Mismatched elements: 12 / 12 (100%)\n'
'Max absolute difference among violations: '
'999087.0864\n'
'Max relative difference among violations: '
'289288.5934676')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._assert_func(y, x)
def test_zeroes(self):
x = np.array([546456., 0, 15.455])
y = np.array(87654.)
expected_msg = ('Mismatched elements: 1 / 3 (33.3%)\n'
'Max absolute difference among violations: 458802.\n'
'Max relative difference among violations: 5.23423917')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._assert_func(x, y)
expected_msg = ('Mismatched elements: 2 / 3 (66.7%)\n'
'Max absolute difference among violations: 87654.\n'
'Max relative difference among violations: '
'5670.5626011')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._assert_func(y, x)
y = 0
expected_msg = ('Mismatched elements: 3 / 3 (100%)\n'
'Max absolute difference among violations: 546456.\n'
'Max relative difference among violations: inf')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._assert_func(x, y)
expected_msg = ('Mismatched elements: 1 / 3 (33.3%)\n'
'Max absolute difference among violations: 0.\n'
'Max relative difference among violations: inf')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._assert_func(y, x)
def test_nan_noncompare(self):
anan = np.array(np.nan)
aone = np.array(1)
ainf = np.array(np.inf)
self._assert_func(anan, anan)
assert_raises(AssertionError, lambda: self._assert_func(aone, anan))
assert_raises(AssertionError, lambda: self._assert_func(anan, aone))
assert_raises(AssertionError, lambda: self._assert_func(anan, ainf))
assert_raises(AssertionError, lambda: self._assert_func(ainf, anan))
def test_nan_noncompare_array(self):
x = np.array([1.1, 2.2, 3.3])
anan = np.array(np.nan)
assert_raises(AssertionError, lambda: self._assert_func(x, anan))
assert_raises(AssertionError, lambda: self._assert_func(anan, x))
x = np.array([1.1, 2.2, np.nan])
assert_raises(AssertionError, lambda: self._assert_func(x, anan))
assert_raises(AssertionError, lambda: self._assert_func(anan, x))
y = np.array([1.0, 2.0, np.nan])
self._assert_func(y, x)
assert_raises(AssertionError, lambda: self._assert_func(x, y))
def test_inf_compare(self):
aone = np.array(1)
ainf = np.array(np.inf)
self._assert_func(aone, ainf)
self._assert_func(-ainf, aone)
self._assert_func(-ainf, ainf)
assert_raises(AssertionError, lambda: self._assert_func(ainf, aone))
assert_raises(AssertionError, lambda: self._assert_func(aone, -ainf))
assert_raises(AssertionError, lambda: self._assert_func(ainf, ainf))
assert_raises(AssertionError, lambda: self._assert_func(ainf, -ainf))
assert_raises(AssertionError, lambda: self._assert_func(-ainf, -ainf))
def test_inf_compare_array(self):
x = np.array([1.1, 2.2, np.inf])
ainf = np.array(np.inf)
assert_raises(AssertionError, lambda: self._assert_func(x, ainf))
assert_raises(AssertionError, lambda: self._assert_func(ainf, x))
assert_raises(AssertionError, lambda: self._assert_func(x, -ainf))
assert_raises(AssertionError, lambda: self._assert_func(-x, -ainf))
assert_raises(AssertionError, lambda: self._assert_func(-ainf, -x))
self._assert_func(-ainf, x)
def test_strict(self):
"""Test the behavior of the `strict` option."""
x = np.zeros(3)
y = np.ones(())
self._assert_func(x, y)
with pytest.raises(AssertionError):
self._assert_func(x, y, strict=True)
y = np.broadcast_to(y, x.shape)
self._assert_func(x, y)
with pytest.raises(AssertionError):
self._assert_func(x, y.astype(np.float32), strict=True)
class TestWarns:
def test_warn(self):
def f():
warnings.warn("yo")
return 3
before_filters = sys.modules['warnings'].filters[:]
assert_equal(assert_warns(UserWarning, f), 3)
after_filters = sys.modules['warnings'].filters
assert_raises(AssertionError, assert_no_warnings, f)
assert_equal(assert_no_warnings(lambda x: x, 1), 1)
assert_equal(before_filters, after_filters,
"assert_warns does not preserve warnings state")
def test_context_manager(self):
before_filters = sys.modules['warnings'].filters[:]
with assert_warns(UserWarning):
warnings.warn("yo")
after_filters = sys.modules['warnings'].filters
def no_warnings():
with assert_no_warnings():
warnings.warn("yo")
assert_raises(AssertionError, no_warnings)
assert_equal(before_filters, after_filters,
"assert_warns does not preserve warnings state")
def test_args(self):
def f(a=0, b=1):
warnings.warn("yo")
return a + b
assert assert_warns(UserWarning, f, b=20) == 20
with pytest.raises(RuntimeError) as exc:
with assert_warns(UserWarning, match="A"):
warnings.warn("B", UserWarning)
assert "assert_warns" in str(exc)
assert "pytest.warns" in str(exc)
with pytest.raises(RuntimeError) as exc:
with assert_warns(UserWarning, wrong="A"):
warnings.warn("B", UserWarning)
assert "assert_warns" in str(exc)
assert "pytest.warns" not in str(exc)
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", DeprecationWarning)
failed = False
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
try:
assert_warns(UserWarning, f)
failed = True
except DeprecationWarning:
pass
if failed:
raise AssertionError("wrong warning caught by assert_warn")
def test_simple(self):
x = 1e-3
y = 1e-9
assert_allclose(x, y, atol=1)
assert_raises(AssertionError, assert_allclose, x, y)
expected_msg = ('Mismatched elements: 1 / 1 (100%)\n'
'Max absolute difference among violations: 0.001\n'
'Max relative difference among violations: 999999.')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
assert_allclose(x, y)
z = 0
expected_msg = ('Mismatched elements: 1 / 1 (100%)\n'
'Max absolute difference among violations: 1.e-09\n'
'Max relative difference among violations: inf')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
assert_allclose(y, z)
expected_msg = ('Mismatched elements: 1 / 1 (100%)\n'
'Max absolute difference among violations: 1.e-09\n'
'Max relative difference among violations: 1.')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
assert_allclose(z, y)
a = np.array([x, y, x, y])
b = np.array([x, y, x, x])
assert_allclose(a, b, atol=1)
assert_raises(AssertionError, assert_allclose, a, b)
b[-1] = y * (1 + 1e-8)
assert_allclose(a, b)
assert_raises(AssertionError, assert_allclose, a, b, rtol=1e-9)
assert_allclose(6, 10, rtol=0.5)
assert_raises(AssertionError, assert_allclose, 10, 6, rtol=0.5)
b = np.array([x, y, x, x])
c = np.array([x, y, x, z])
expected_msg = ('Mismatched elements: 1 / 4 (25%)\n'
'Max absolute difference among violations: 0.001\n'
'Max relative difference among violations: inf')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
assert_allclose(b, c)
expected_msg = ('Mismatched elements: 1 / 4 (25%)\n'
'Max absolute difference among violations: 0.001\n'
'Max relative difference among violations: 1.')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
assert_allclose(c, b)
def test_equal_nan(self):
a = np.array([np.nan])
b = np.array([np.nan])
assert_allclose(a, b, equal_nan=True)
def test_not_equal_nan(self):
a = np.array([np.nan])
b = np.array([np.nan])
assert_raises(AssertionError, assert_allclose, a, b, equal_nan=False)
def test_equal_nan_default(self):
a = np.array([np.nan])
b = np.array([np.nan])
assert_array_equal(a, b)
assert_array_almost_equal(a, b)
assert_array_less(a, b)
assert_allclose(a, b)
def test_report_max_relative_error(self):
a = np.array([0, 1])
b = np.array([0, 2])
expected_msg = 'Max relative difference among violations: 0.5'
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
assert_allclose(a, b)
def test_timedelta(self):
a = np.array([[1, 2, 3, "NaT"]], dtype="m8[ns]")
assert_allclose(a, a)
def test_error_message_unsigned(self):
"""检查溢出可能发生时消息的格式是否正确 (gh21768)"""
x = np.asarray([0, 1, 8], dtype='uint8')
y = np.asarray([4, 4, 4], dtype='uint8')
expected_msg = 'Max absolute difference among violations: 4'
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
assert_allclose(x, y, atol=3)
def test_strict(self):
"""测试 `strict` 选项的行为。"""
x = np.ones(3)
y = np.ones(())
assert_allclose(x, y)
with pytest.raises(AssertionError):
assert_allclose(x, y, strict=True)
assert_allclose(x, x)
with pytest.raises(AssertionError):
assert_allclose(x, x.astype(np.float32), strict=True)
class TestArrayAlmostEqualNulp:
def test_float64_pass(self):
nulp = 5
x = np.linspace(-20, 20, 50, dtype=np.float64)
x = 10**x
x = np.r_[-x, x]
eps = np.finfo(x.dtype).eps
y = x + x*eps*nulp/2.
assert_array_almost_equal_nulp(x, y, nulp)
epsneg = np.finfo(x.dtype).epsneg
y = x - x*epsneg*nulp/2.
assert_array_almost_equal_nulp(x, y, nulp)
def test_float64_fail(self):
nulp = 5
x = np.linspace(-20, 20, 50, dtype=np.float64)
x = 10**x
x = np.r_[-x, x]
eps = np.finfo(x.dtype).eps
y = x + x*eps*nulp*2.
assert_raises(AssertionError, assert_array_almost_equal_nulp,
x, y, nulp)
epsneg = np.finfo(x.dtype).epsneg
y = x - x*epsneg*nulp*2.
assert_raises(AssertionError, assert_array_almost_equal_nulp,
x, y, nulp)
def test_float64_ignore_nan(self):
offset = np.uint64(0xffffffff)
nan1_i64 = np.array(np.nan, dtype=np.float64).view(np.uint64)
nan2_i64 = nan1_i64 ^ offset
nan1_f64 = nan1_i64.view(np.float64)
nan2_f64 = nan2_i64.view(np.float64)
assert_array_max_ulp(nan1_f64, nan2_f64, 0)
def test_float32_pass(self):
nulp = 5
x = np.linspace(-20, 20, 50, dtype=np.float32)
x = 10**x
x = np.r_[-x, x]
eps = np.finfo(x.dtype).eps
y = x + x*eps*nulp/2.
assert_array_almost_equal_nulp(x, y, nulp)
epsneg = np.finfo(x.dtype).epsneg
y = x - x*epsneg*nulp/2.
assert_array_almost_equal_nulp(x, y, nulp)
def test_float32_fail(self):
nulp = 5
x = np.linspace(-20, 20, 50, dtype=np.float32)
x = 10**x
x = np.r_[-x, x]
eps = np.finfo(x.dtype).eps
y = x + x*eps*nulp*2.
assert_raises(AssertionError, assert_array_almost_equal_nulp,
x, y, nulp)
epsneg = np.finfo(x.dtype).epsneg
y = x - x*epsneg*nulp*2.
assert_raises(AssertionError, assert_array_almost_equal_nulp,
x, y, nulp)
def test_float32_ignore_nan(self):
offset = np.uint32(0xffff)
nan1_i32 = np.array(np.nan, dtype=np.float32).view(np.uint32)
nan2_i32 = nan1_i32 ^ offset
nan1_f32 = nan1_i32.view(np.float32)
nan2_f32 = nan2_i32.view(np.float32)
assert_array_max_ulp(nan1_f32, nan2_f32, 0)
def test_float16_pass(self):
nulp = 5
x = np.linspace(-4, 4, 10, dtype=np.float16)
x = 10**x
x = np.r_[-x, x]
eps = np.finfo(x.dtype).eps
y = x + x*eps*nulp/2.
assert_array_almost_equal_nulp(x, y, nulp)
epsneg = np.finfo(x.dtype).epsneg
y = x - x*epsneg*nulp/2.
assert_array_almost_equal_nulp(x, y, nulp)
def test_float16_fail(self):
nulp = 5
x = np.linspace(-4, 4, 10, dtype=np.float16)
x = 10**x
x = np.r_[-x, x]
eps = np.finfo(x.dtype).eps
y = x + x*eps*nulp*2.
assert_raises(AssertionError, assert_array_almost_equal_nulp,
x, y, nulp)
epsneg = np.finfo(x.dtype).epsneg
y = x - x*epsneg*nulp*2.
assert_raises(AssertionError, assert_array_almost_equal_nulp,
x, y, nulp)
def test_float16_ignore_nan(self):
offset = np.uint16(0xff)
nan1_i16 = np.array(np.nan, dtype=np.float16).view(np.uint16)
nan2_i16 = nan1_i16 ^ offset
nan1_f16 = nan1_i16.view(np.float16)
nan2_f16 = nan2_i16.view(np.float16)
assert_array_max_ulp(nan1_f16, nan2_f16, 0)
def test_complex128_pass(self):
nulp = 5
x = np.linspace(-20, 20, 50, dtype=np.float64)
x = 10**x
x = np.r_[-x, x]
xi = x + x*1j
eps = np.finfo(x.dtype).eps
y = x + x*eps*nulp/2.
assert_array_almost_equal_nulp(xi, x + y*1j, nulp)
assert_array_almost_equal_nulp(xi, y + x*1j, nulp)
y = x + x*eps*nulp/4.
assert_array_almost_equal_nulp(xi, y + y*1j, nulp)
epsneg = np.finfo(x.dtype).epsneg
y = x - x*epsneg*nulp/2.
assert_array_almost_equal_nulp(xi, x + y*1j, nulp)
assert_array_almost_equal_nulp(xi, y + x*1j, nulp)
y = x - x*epsneg*nulp/4.
assert_array_almost_equal_nulp(xi, y
def test_complex128_fail(self):
nulp = 5
x = np.linspace(-20, 20, 50, dtype=np.float64)
x = 10**x
x = np.r_[-x, x]
xi = x + x*1j
eps = np.finfo(x.dtype).eps
y = x + x*eps*nulp*2.
assert_raises(AssertionError, assert_array_almost_equal_nulp,
xi, x + y*1j, nulp)
assert_raises(AssertionError, assert_array_almost_equal_nulp,
xi, y + x*1j, nulp)
y = x + x*eps*nulp
assert_raises(AssertionError, assert_array_almost_equal_nulp,
xi, y + y*1j, nulp)
epsneg = np.finfo(x.dtype).epsneg
y = x - x*epsneg*nulp*2.
assert_raises(AssertionError, assert_array_almost_equal_nulp,
xi, x + y*1j, nulp)
assert_raises(AssertionError, assert_array_almost_equal_nulp,
xi, y + x*1j, nulp)
y = x - x*epsneg*nulp
assert_raises(AssertionError, assert_array_almost_equal_nulp,
xi, y + y*1j, nulp)
def test_complex64_pass(self):
nulp = 5
x = np.linspace(-20, 20, 50, dtype=np.float32)
x = 10**x
x = np.r_[-x, x]
xi = x + x*1j
eps = np.finfo(x.dtype).eps
y = x + x*eps*nulp/2.
assert_array_almost_equal_nulp(xi, x + y*1j, nulp)
assert_array_almost_equal_nulp(xi, y + x*1j, nulp)
y = x + x*eps*nulp/4.
assert_array_almost_equal_nulp(xi, y + y*1j, nulp)
epsneg = np.finfo(x.dtype).epsneg
y = x - x*epsneg*nulp/2.
assert_array_almost_equal_nulp(xi, x + y*1j, nulp)
assert_array_almost_equal_nulp(xi, y + x*1j, nulp)
y = x - x*epsneg*nulp/4.
assert_array_almost_equal_nulp(xi, y + y*1j, nulp)
def test_complex64_fail(self):
nulp = 5
x = np.linspace(-20, 20, 50, dtype=np.float32)
x = 10**x
x = np.r_[-x, x]
xi = x + x*1j
eps = np.finfo(x.dtype).eps
y = x + x*eps*nulp*2.
assert_raises(AssertionError, assert_array_almost_equal_nulp,
xi, x + y*1j, nulp)
assert_raises(AssertionError, assert_array_almost_equal_nulp,
xi, y + x*1j, nulp)
y = x + x*eps*nulp
assert_raises(AssertionError, assert_array_almost_equal_nulp,
xi, y + y*1j, nulp)
epsneg = np.finfo(x.dtype).epsneg
y = x - x*epsneg*nulp*2.
class TestULP:
def test_equal(self):
x = np.random.randn(10)
assert_array_max_ulp(x, x, maxulp=0)
def test_single(self):
x = np.ones(10).astype(np.float32)
x += 0.01 * np.random.randn(10).astype(np.float32)
eps = np.finfo(np.float32).eps
assert_array_max_ulp(x, x+eps, maxulp=20)
def test_double(self):
x = np.ones(10).astype(np.float64)
x += 0.01 * np.random.randn(10).astype(np.float64)
eps = np.finfo(np.float64).eps
assert_array_max_ulp(x, x+eps, maxulp=200)
def test_inf(self):
for dt in [np.float32, np.float64]:
inf = np.array([np.inf]).astype(dt)
big = np.array([np.finfo(dt).max])
assert_array_max_ulp(inf, big, maxulp=200)
def test_nan(self):
for dt in [np.float32, np.float64]:
if dt == np.float32:
maxulp = 1e6
else:
maxulp = 1e12
inf = np.array([np.inf]).astype(dt)
nan = np.array([np.nan]).astype(dt)
big = np.array([np.finfo(dt).max])
tiny = np.array([np.finfo(dt).tiny])
zero = np.array([0.0]).astype(dt)
nzero = np.array([-0.0]).astype(dt)
assert_raises(AssertionError,
lambda: assert_array_max_ulp(nan, inf,
maxulp=maxulp))
assert_raises(AssertionError,
lambda: assert_array_max_ulp(nan, big,
maxulp=maxulp))
assert_raises(AssertionError,
lambda: assert_array_max_ulp(nan, tiny,
maxulp=maxulp))
assert_raises(AssertionError,
lambda: assert_array_max_ulp(nan, zero,
maxulp=maxulp))
assert_raises(AssertionError,
lambda: assert_array_max_ulp(nan, nzero,
maxulp=maxulp))
class TestStringEqual:
def test_simple(self):
assert_string_equal("hello", "hello")
assert_string_equal("hello\nmultiline", "hello\nmultiline")
with pytest.raises(AssertionError) as exc_info:
assert_string_equal("foo\nbar", "hello\nbar")
msg = str(exc_info.value)
assert_equal(msg, "Differences in strings:\n- foo\n+ hello")
assert_raises(AssertionError,
lambda: assert_string_equal("foo", "hello"))
def test_regex(self):
assert_string_equal("a+*b", "a+*b")
assert_raises(AssertionError,
lambda: assert_string_equal("aaa", "a+b"))
try:
mod_warns = mod.__warningregistry__
except AttributeError:
mod_warns = {}
num_warns = len(mod_warns)
if 'version' in mod_warns:
num_warns -= 1
assert_equal(num_warns, n_in_context)
def test_warn_len_equal_call_scenarios():
class mod:
pass
mod_inst = mod()
assert_warn_len_equal(mod=mod_inst,
n_in_context=0)
class mod:
def __init__(self):
self.__warningregistry__ = {'warning1': 1,
'warning2': 2}
mod_inst = mod()
assert_warn_len_equal(mod=mod_inst,
n_in_context=2)
def _get_fresh_mod():
my_mod = sys.modules[__name__]
try:
my_mod.__warningregistry__.clear()
except AttributeError:
pass
return my_mod
def test_clear_and_catch_warnings():
my_mod = _get_fresh_mod()
assert_equal(getattr(my_mod, '__warningregistry__', {}), {})
with clear_and_catch_warnings(modules=[my_mod]):
warnings.simplefilter('ignore')
warnings.warn('Some warning')
assert_equal(my_mod.__warningregistry__, {})
with clear_and_catch_warnings():
warnings.simplefilter('ignore')
warnings.warn('Some warning')
assert_warn_len_equal(my_mod, 0)
my_mod.__warningregistry__ = {'warning1': 1,
'warning2': 2}
with clear_and_catch_warnings(modules=[my_mod]):
warnings.simplefilter('ignore')
warnings.warn('Another warning')
assert_warn_len_equal(my_mod, 2)
with clear_and_catch_warnings():
warnings.simplefilter('ignore')
warnings.warn('Another warning')
assert_warn_len_equal(my_mod, 0)
def test_suppress_warnings_module():
my_mod = _get_fresh_mod()
assert_equal(getattr(my_mod, '__warningregistry__', {}), {})
def warn_other_module():
def warn(arr):
warnings.warn("Some warning 2", stacklevel=2)
return arr
np.apply_along_axis(warn, 0, [0])
assert_warn_len_equal(my_mod, 0)
with suppress_warnings() as sup:
sup.record(UserWarning)
sup.filter(module=np.lib._shape_base_impl)
warnings.warn("Some warning")
warn_other_module()
assert_equal(len(sup.log), 1)
assert_equal(sup.log[0].message.args[0], "Some warning")
assert_warn_len_equal(my_mod, 0)
sup = suppress_warnings()
sup.filter(module=my_mod)
with sup:
warnings.warn('Some warning')
assert_warn_len_equal(my_mod, 0)
sup.filter(module=my_mod)
with sup:
warnings.warn('Some warning')
assert_warn_len_equal(my_mod, 0)
with suppress_warnings():
warnings.simplefilter('ignore')
warnings.warn('Some warning')
assert_warn_len_equal(my_mod, 0)
def test_suppress_warnings_type():
my_mod = _get_fresh_mod()
assert_equal(getattr(my_mod, '__warningregistry__', {}), {})
with suppress_warnings() as sup:
sup.filter(UserWarning)
warnings.warn('Some warning')
assert_warn_len_equal(my_mod, 0)
sup = suppress_warnings()
sup.filter(UserWarning)
with sup:
warnings.warn('Some warning')
assert_warn_len_equal(my_mod, 0)
sup.filter(module=my_mod)
with sup:
warnings.warn('Some warning')
assert_warn_len_equal(my_mod, 0)
with suppress_warnings():
warnings.simplefilter('ignore')
warnings.warn('Some warning')
assert_warn_len_equal(my_mod, 0)
def test_suppress_warnings_decorate_no_record():
sup = suppress_warnings()
sup.filter(UserWarning)
@sup
def warn(category):
warnings.warn('Some warning', category)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
warn(UserWarning)
warn(RuntimeWarning)
assert_equal(len(w), 1)
def test_suppress_warnings_record():
sup = suppress_warnings()
log1 = sup.record()
with sup:
log2 = sup.record(message='Some other warning 2')
sup.filter(message='Some warning')
warnings.warn('Some warning')
warnings.warn('Some other warning')
warnings.warn('Some other warning 2')
assert_equal(len(sup.log), 2)
assert_equal(len(log1), 1)
assert_equal(len(log2), 1)
assert_equal(log2[0].message.args[0], 'Some other warning 2')
with sup:
log2 = sup.record(message='Some other warning 2')
sup.filter(message='Some warning')
warnings.warn('Some warning')
warnings.warn('Some other warning')
warnings.warn('Some other warning 2')
assert_equal(len(sup.log), 2)
assert_equal(len(log1), 1)
assert_equal(len(log2), 1)
assert_equal(log2[0].message.args[0], 'Some other warning 2')
with suppress_warnings() as sup:
sup.record()
with suppress_warnings() as sup2:
sup2.record(message='Some warning')
warnings.warn('Some warning')
warnings.warn('Some other warning')
assert_equal(len(sup2.log), 1)
assert_equal(len(sup.log), 1)
def test_suppress_warnings_forwarding():
def warn_other_module():
def warn(arr):
warnings.warn("Some warning", stacklevel=2)
return arr
np.apply_along_axis(warn, 0, [0])
with suppress_warnings() as sup:
sup.record()
with suppress_warnings("always"):
for i in range(2):
warnings.warn("Some warning")
assert_equal(len(sup.log), 2)
with suppress_warnings() as sup:
sup.record()
with suppress_warnings("location"):
for i in range(2):
warnings.warn("Some warning")
warnings.warn("Some warning")
assert_equal(len(sup.log), 2)
with suppress_warnings() as sup:
sup.record()
with suppress_warnings("module"):
for i in range(2):
warnings.warn("Some warning")
warnings.warn("Some warning")
warn_other_module()
assert_equal(len(sup.log), 2)
with suppress_warnings() as sup:
sup.record()
with suppress_warnings("once"):
for i in range(2):
warnings.warn("Some warning")
warnings.warn("Some other warning")
warn_other_module()
assert_equal(len(sup.log), 2)
def test_tempdir():
with tempdir() as tdir:
fpath = os.path.join(tdir, 'tmp')
with open(fpath, 'w'):
pass
assert_(not os.path.isdir(tdir))
raised = False
try:
with tempdir() as tdir:
raise ValueError()
except ValueError:
raised = True
assert_(raised)
assert_(not os.path.isdir(tdir))
def test_temppath():
with temppath() as fpath:
with open(fpath, 'w'):
pass
assert_(not os.path.isfile(fpath))
raised = False
try:
with temppath() as fpath:
raise ValueError()
except ValueError:
raised = True
assert_(raised)
assert_(not os.path.isfile(fpath))
class my_cacw(clear_and_catch_warnings):
class_modules = (sys.modules[__name__],)
def test_clear_and_catch_warnings_inherit():
my_mod = _get_fresh_mod()
with my_cacw():
warnings.simplefilter('ignore')
warnings.warn('Some warning')
assert_equal(my_mod.__warningregistry__, {})
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
class TestAssertNoGcCycles:
""" Test assert_no_gc_cycles """
def test_passes(self):
def no_cycle():
b = []
b.append([])
return b
with assert_no_gc_cycles():
no_cycle()
assert_no_gc_cycles(no_cycle)
def test_asserts(self):
def make_cycle():
a = []
a.append(a)
a.append(a)
return a
with assert_raises(AssertionError):
with assert_no_gc_cycles():
make_cycle()
with assert_raises(AssertionError):
assert_no_gc_cycles(make_cycle)
@pytest.mark.slow
def test_fails(self):
"""
Test that in cases where the garbage cannot be collected, we raise an
error, instead of hanging forever trying to clear it.
"""
class ReferenceCycleInDel:
"""
An object that not only contains a reference cycle, but creates new
cycles whenever it's garbage-collected and its __del__ runs
"""
make_cycle = True
def __init__(self):
self.cycle = self
def __del__(self):
self.cycle = None
if ReferenceCycleInDel.make_cycle:
ReferenceCycleInDel()
try:
w = weakref.ref(ReferenceCycleInDel())
try:
with assert_raises(RuntimeError):
assert_no_gc_cycles(lambda: None)
except AssertionError:
if w() is not None:
pytest.skip("GC does not call __del__ on cyclic objects")
raise
finally:
ReferenceCycleInDel.make_cycle = False
@pytest.mark.parametrize('assert_func', [assert_array_equal,
assert_array_almost_equal])
def test_xy_rename(assert_func):
assert_func(1, 1)
assert_func(actual=1, desired=1)
assert_message = "Arrays are not..."
with pytest.raises(AssertionError, match=assert_message):
assert_func(1, 2)
with pytest.raises(AssertionError, match=assert_message):
assert_func(actual=1, desired=2)
dep_message = 'Use of keyword argument...'
with pytest.warns(DeprecationWarning, match=dep_message):
assert_func(x=1, desired=1)
with pytest.warns(DeprecationWarning, match=dep_message):
assert_func(1, y=1)
type_message = '...got multiple values for argument'
with pytest.warns(DeprecationWarning, match=dep_message), \
pytest.raises(TypeError, match=type_message):
assert_func(1, x=1)
assert_func(1, 2, y=2)