NumPy 源码解析(八十六)
.\numpy\numpy\_core\tests\test_scalarbuffer.py
"""
Test scalar buffer interface adheres to PEP 3118
"""
import numpy as np
from numpy._core._rational_tests import rational
from numpy._core._multiarray_tests import get_buffer_info
import pytest
from numpy.testing import assert_, assert_equal, assert_raises
scalars_and_codes = [
(np.bool, '?'),
(np.byte, 'b'),
(np.short, 'h'),
(np.intc, 'i'),
(np.long, 'l'),
(np.longlong, 'q'),
(np.ubyte, 'B'),
(np.ushort, 'H'),
(np.uintc, 'I'),
(np.ulong, 'L'),
(np.ulonglong, 'Q'),
(np.half, 'e'),
(np.single, 'f'),
(np.double, 'd'),
(np.longdouble, 'g'),
(np.csingle, 'Zf'),
(np.cdouble, 'Zd'),
(np.clongdouble, 'Zg'),
]
scalars_only, codes_only = zip(*scalars_and_codes)
class TestScalarPEP3118:
@pytest.mark.parametrize('scalar', scalars_only, ids=codes_only)
def test_scalar_match_array(self, scalar):
x = scalar()
a = np.array([], dtype=np.dtype(scalar))
mv_x = memoryview(x)
mv_a = memoryview(a)
assert_equal(mv_x.format, mv_a.format)
@pytest.mark.parametrize('scalar', scalars_only, ids=codes_only)
def test_scalar_dim(self, scalar):
x = scalar()
mv_x = memoryview(x)
assert_equal(mv_x.itemsize, np.dtype(scalar).itemsize)
assert_equal(mv_x.ndim, 0)
assert_equal(mv_x.shape, ())
assert_equal(mv_x.strides, ())
assert_equal(mv_x.suboffsets, ())
@pytest.mark.parametrize('scalar, code', scalars_and_codes, ids=codes_only)
def test_scalar_code_and_properties(self, scalar, code):
x = scalar()
expected = dict(strides=(), itemsize=x.dtype.itemsize, ndim=0,
shape=(), format=code, readonly=True)
mv_x = memoryview(x)
assert self._as_dict(mv_x) == expected
@pytest.mark.parametrize('scalar', scalars_only, ids=codes_only)
def test_scalar_buffers_readonly(self, scalar):
x = scalar()
with pytest.raises(BufferError, match="scalar buffer is readonly"):
get_buffer_info(x, ["WRITABLE"])
def test_void_scalar_structured_data(self):
dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
x = np.array(('ndarray_scalar', (1.2, 3.0)), dtype=dt)[()]
assert_(isinstance(x, np.void))
mv_x = memoryview(x)
expected_size = 16 * np.dtype((np.str_, 1)).itemsize
expected_size += 2 * np.dtype(np.float64).itemsize
assert_equal(mv_x.itemsize, expected_size)
assert_equal(mv_x.ndim, 0)
assert_equal(mv_x.shape, ())
assert_equal(mv_x.strides, ())
assert_equal(mv_x.suboffsets, ())
a = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt)
assert_(isinstance(a, np.ndarray))
mv_a = memoryview(a)
assert_equal(mv_x.itemsize, mv_a.itemsize)
with pytest.raises(BufferError, match="scalar buffer is readonly"):
get_buffer_info(x, ["WRITABLE"])
def _as_dict(self, m):
return dict(strides=m.strides, shape=m.shape, itemsize=m.itemsize,
ndim=m.ndim, format=m.format, readonly=m.readonly)
def test_datetime_memoryview(self):
dt1 = np.datetime64('2016-01-01')
dt2 = np.datetime64('2017-01-01')
expected = dict(strides=(1,), itemsize=1, ndim=1, shape=(8,),
format='B', readonly=True)
v = memoryview(dt1)
assert self._as_dict(v) == expected
v = memoryview(dt2 - dt1)
assert self._as_dict(v) == expected
dt = np.dtype([('a', 'uint16'), ('b', 'M8[s]')])
a = np.empty(1, dt)
assert_raises((ValueError, BufferError), memoryview, a[0])
with pytest.raises(BufferError, match="scalar buffer is readonly"):
get_buffer_info(dt1, ["WRITABLE"])
@pytest.mark.parametrize('s', [
pytest.param("\x32\x32", id="ascii"),
pytest.param("\uFE0F\uFE0F", id="basic multilingual"),
pytest.param("\U0001f4bb\U0001f4bb", id="non-BMP"),
])
def test_str_ucs4(self, s):
s = np.str_(s)
expected = dict(strides=(), itemsize=8, ndim=0, shape=(), format='2w',
readonly=True)
v = memoryview(s)
assert self._as_dict(v) == expected
code_points = np.frombuffer(v, dtype='i4')
assert_equal(code_points, [ord(c) for c in s])
with pytest.raises(BufferError, match="scalar buffer is readonly"):
get_buffer_info(s, ["WRITABLE"])
def test_user_scalar_fails_buffer(self):
r = rational(1)
with assert_raises(TypeError):
memoryview(r)
with pytest.raises(BufferError, match="scalar buffer is readonly"):
get_buffer_info(r, ["WRITABLE"])
.\numpy\numpy\_core\tests\test_scalarinherit.py
""" Test printing of scalar types.
"""
import pytest
import numpy as np
from numpy.testing import assert_, assert_raises
class A:
pass
class B(A, np.float64):
pass
class C(B):
pass
class D(C, B):
pass
class B0(np.float64, A):
pass
class C0(B0):
pass
class HasNew:
def __new__(cls, *args, **kwargs):
return cls, args, kwargs
class B1(np.float64, HasNew):
pass
class TestInherit:
def test_init(self):
x = B(1.0)
assert_(str(x) == '1.0')
y = C(2.0)
assert_(str(y) == '2.0')
z = D(3.0)
assert_(str(z) == '3.0')
def test_init2(self):
x = B0(1.0)
assert_(str(x) == '1.0')
y = C0(2.0)
assert_(str(y) == '2.0')
def test_gh_15395(self):
x = B1(1.0)
assert_(str(x) == '1.0')
with pytest.raises(TypeError):
B1(1.0, 2.0)
class TestCharacter:
def test_char_radd(self):
np_s = np.bytes_('abc')
np_u = np.str_('abc')
s = b'def'
u = 'def'
assert_(np_s.__radd__(np_s) is NotImplemented)
assert_(np_s.__radd__(np_u) is NotImplemented)
assert_(np_s.__radd__(s) is NotImplemented)
assert_(np_s.__radd__(u) is NotImplemented)
assert_(np_u.__radd__(np_s) is NotImplemented)
assert_(np_u.__radd__(np_u) is NotImplemented)
assert_(np_u.__radd__(s) is NotImplemented)
assert_(np_u.__radd__(u) is NotImplemented)
assert_(s + np_s == b'defabc')
class MyStr(str, np.generic):
pass
with assert_raises(TypeError):
ret = s + MyStr('abc')
class MyBytes(bytes, np.generic):
pass
ret = s + MyBytes(b'abc')
assert(type(ret) is type(s))
def test_char_repeat(self):
np_s = np.bytes_('abc')
np_u = np.str_('abc')
res_s = b'abc' * 5
res_u = 'abc' * 5
assert_(np_s * 5 == res_s)
assert_(np_u * 5 == res_u)
.\numpy\numpy\_core\tests\test_scalarmath.py
import contextlib
import sys
import warnings
import itertools
import operator
import platform
from numpy._utils import _pep440
import pytest
from hypothesis import given, settings
from hypothesis.strategies import sampled_from
from hypothesis.extra import numpy as hynp
import numpy as np
from numpy.exceptions import ComplexWarning
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_almost_equal,
assert_array_equal, IS_PYPY, suppress_warnings, _gen_alignment_data,
assert_warns, _SUPPORTS_SVE,
)
types = [np.bool, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc,
np.int_, np.uint, np.longlong, np.ulonglong,
np.single, np.double, np.longdouble, np.csingle,
np.cdouble, np.clongdouble]
floating_types = np.floating.__subclasses__()
complex_floating_types = np.complexfloating.__subclasses__()
objecty_things = [object(), None]
reasonable_operators_for_scalars = [
operator.lt, operator.le, operator.eq, operator.ne, operator.ge,
operator.gt, operator.add, operator.floordiv, operator.mod,
operator.mul, operator.pow, operator.sub, operator.truediv,
]
class TestTypes:
def test_types(self):
for atype in types:
a = atype(1)
assert_(a == 1, "error with %r: got %r" % (atype, a))
def test_type_add(self):
for k, atype in enumerate(types):
a_scalar = atype(3)
a_array = np.array([3], dtype=atype)
for l, btype in enumerate(types):
b_scalar = btype(1)
b_array = np.array([1], dtype=btype)
c_scalar = a_scalar + b_scalar
c_array = a_array + b_array
assert_equal(c_scalar.dtype, c_array.dtype,
"error with types (%d/'%c' + %d/'%c')" %
(k, np.dtype(atype).char, l, np.dtype(btype).char))
def test_type_create(self):
for k, atype in enumerate(types):
a = np.array([1, 2, 3], atype)
b = atype([1, 2, 3])
assert_equal(a, b)
def test_leak(self):
for i in range(200000):
np.add(1, 1)
def check_ufunc_scalar_equivalence(op, arr1, arr2):
scalar1 = arr1[()]
scalar2 = arr2[()]
assert isinstance(scalar1, np.generic)
assert isinstance(scalar2, np.generic)
if arr1.dtype.kind == "c" or arr2.dtype.kind == "c":
comp_ops = {operator.ge, operator.gt, operator.le, operator.lt}
if op in comp_ops and (np.isnan(scalar1) or np.isnan(scalar2)):
pytest.xfail("complex comp ufuncs use sort-order, scalars do not.")
if op == operator.pow and arr2.item() in [-1, 0, 0.5, 1, 2]:
pytest.skip("array**2 can have incorrect/weird result dtype")
with warnings.catch_warnings(), np.errstate(all="ignore"):
warnings.simplefilter("error", DeprecationWarning)
try:
res = op(arr1, arr2)
except Exception as e:
with pytest.raises(type(e)):
op(scalar1, scalar2)
else:
scalar_res = op(scalar1, scalar2)
assert_array_equal(scalar_res, res, strict=True)
@pytest.mark.slow
@settings(max_examples=10000, deadline=2000)
@given(
sampled_from(reasonable_operators_for_scalars),
hynp.arrays(dtype=hynp.scalar_dtypes(), shape=()),
hynp.arrays(dtype=hynp.scalar_dtypes(), shape=())
)
def test_array_scalar_ufunc_equivalence(op, arr1, arr2):
"""
This is a thorough test attempting to cover important promotion paths
and ensuring that arrays and scalars stay as aligned as possible.
However, if it creates troubles, it should maybe just be removed.
"""
check_ufunc_scalar_equivalence(op, arr1, arr2)
@pytest.mark.slow
@given(
sampled_from(reasonable_operators_for_scalars),
hynp.scalar_dtypes(),
hynp.scalar_dtypes()
)
def test_array_scalar_ufunc_dtypes(op, dt1, dt2):
arr1 = np.array(2, dtype=dt1)
arr2 = np.array(3, dtype=dt2)
check_ufunc_scalar_equivalence(op, arr1, arr2)
@pytest.mark.parametrize("fscalar", [np.float16, np.float32])
def test_int_float_promotion_truediv(fscalar):
i = np.int8(1)
f = fscalar(1)
expected = np.result_type(i, f)
assert (i / f).dtype == expected
assert (f / i).dtype == expected
assert (i / i).dtype == np.dtype("float64")
assert (np.int16(1) / f).dtype == np.dtype("float32")
class TestBaseMath:
@pytest.mark.xfail(_SUPPORTS_SVE, reason="gh-22982")
def test_blocked(self):
for dt, sz in [(np.float32, 11), (np.float64, 7), (np.int32, 11)]:
for out, inp1, inp2, msg in _gen_alignment_data(dtype=dt,
type='binary',
max_size=sz):
exp1 = np.ones_like(inp1)
inp1[...] = np.ones_like(inp1)
inp2[...] = np.zeros_like(inp2)
assert_almost_equal(np.add(inp1, inp2), exp1, err_msg=msg)
assert_almost_equal(np.add(inp1, 2), exp1 + 2, err_msg=msg)
assert_almost_equal(np.add(1, inp2), exp1, err_msg=msg)
np.add(inp1, inp2, out=out)
assert_almost_equal(out, exp1, err_msg=msg)
inp2[...] += np.arange(inp2.size, dtype=dt) + 1
assert_almost_equal(np.square(inp2),
np.multiply(inp2, inp2), err_msg=msg)
if dt != np.int32:
assert_almost_equal(np.reciprocal(inp2),
np.divide(1, inp2), err_msg=msg)
inp1[...] = np.ones_like(inp1)
np.add(inp1, 2, out=out)
assert_almost_equal(out, exp1 + 2, err_msg=msg)
inp2[...] = np.ones_like(inp2)
np.add(2, inp2, out=out)
assert_almost_equal(out, exp1 + 2, err_msg=msg)
def test_lower_align(self):
d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)
o = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)
assert_almost_equal(d + d, d * 2)
np.add(d, d, out=o)
np.add(np.ones_like(d), d, out=o)
np.add(d, np.ones_like(d), out=o)
np.add(np.ones_like(d), d)
np.add(d, np.ones_like(d))
class TestPower:
def test_small_types(self):
for t in [np.int8, np.int16, np.float16]:
a = t(3)
b = a ** 4
assert_(b == 81, "error with %r: got %r" % (t, b))
def test_large_types(self):
for t in [np.int32, np.int64, np.float32, np.float64, np.longdouble]:
a = t(51)
b = a ** 4
msg = "error with %r: got %r" % (t, b)
if np.issubdtype(t, np.integer):
assert_(b == 6765201, msg)
else:
assert_almost_equal(b, 6765201, err_msg=msg)
def test_integers_to_negative_integer_power(self):
exp = [np.array(-1, dt)[()] for dt in 'bhilq']
base = [np.array(1, dt)[()] for dt in 'bhilqBHILQ']
for i1, i2 in itertools.product(base, exp):
if i1.dtype != np.uint64:
assert_raises(ValueError, operator.pow, i1, i2)
else:
res = operator.pow(i1, i2)
assert_(res.dtype.type is np.float64)
assert_almost_equal(res, 1.)
base = [np.array(-1, dt)[()] for dt in 'bhilq']
for i1, i2 in itertools.product(base, exp):
if i1.dtype != np.uint64:
assert_raises(ValueError, operator.pow, i1, i2)
else:
res = operator.pow(i1, i2)
assert_(res.dtype.type is np.float64)
assert_almost_equal(res, -1.)
base = [np.array(2, dt)[()] for dt in 'bhilqBHILQ']
for i1, i2 in itertools.product(base, exp):
if i1.dtype != np.uint64:
assert_raises(ValueError, operator.pow, i1, i2)
else:
res = operator.pow(i1, i2)
assert_(res.dtype.type is np.float64)
assert_almost_equal(res, .5)
def test_mixed_types(self):
typelist = [np.int8, np.int16, np.float16,
np.float32, np.float64, np.int8,
np.int16, np.int32, np.int64]
for t1 in typelist:
for t2 in typelist:
a = t1(3)
b = t2(2)
result = a**b
msg = ("error with %r and %r:"
"got %r, expected %r") % (t1, t2, result, 9)
if np.issubdtype(np.dtype(result), np.integer):
assert_(result == 9, msg)
else:
assert_almost_equal(result, 9, err_msg=msg)
def test_modular_power(self):
a = 5
b = 4
c = 10
expected = pow(a, b, c)
for t in (np.int32, np.float32, np.complex64):
assert_raises(TypeError, operator.pow, t(a), b, c)
assert_raises(TypeError, operator.pow, np.array(t(a)), b, c)
def floordiv_and_mod(x, y):
return (x // y, x % y)
def _signs(dt):
if dt in np.typecodes['UnsignedInteger']:
return (+1,)
else:
return (+1, -1)
class TestModulus:
def test_modulus_basic(self):
dt = np.typecodes['AllInteger'] + np.typecodes['Float']
for op in [floordiv_and_mod, divmod]:
for dt1, dt2 in itertools.product(dt, dt):
for sg1, sg2 in itertools.product(_signs(dt1), _signs(dt2)):
fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s'
msg = fmt % (op.__name__, dt1, dt2, sg1, sg2)
a = np.array(sg1*71, dtype=dt1)[()]
b = np.array(sg2*19, dtype=dt2)[()]
div, rem = op(a, b)
assert_equal(div*b + rem, a, err_msg=msg)
if sg2 == -1:
assert_(b < rem <= 0, msg)
else:
assert_(b > rem >= 0, msg)
def test_float_modulus_exact(self):
nlst = list(range(-127, 0))
plst = list(range(1, 128))
dividend = nlst + [0] + plst
divisor = nlst + plst
arg = list(itertools.product(dividend, divisor))
tgt = list(divmod(*t) for t in arg)
a, b = np.array(arg, dtype=int).T
tgtdiv, tgtrem = np.array(tgt, dtype=float).T
tgtdiv = np.where((tgtdiv == 0.0) & ((b < 0) ^ (a < 0)), -0.0, tgtdiv)
tgtrem = np.where((tgtrem == 0.0) & (b < 0), -0.0, tgtrem)
for op in [floordiv_and_mod, divmod]:
for dt in np.typecodes['Float']:
msg = 'op: %s, dtype: %s' % (op.__name__, dt)
fa = a.astype(dt)
fb = b.astype(dt)
div, rem = zip(*[op(a_, b_) for a_, b_ in zip(fa, fb)])
assert_equal(div, tgtdiv, err_msg=msg)
assert_equal(rem, tgtrem, err_msg=msg)
def test_float_modulus_roundoff(self):
dt = np.typecodes['Float']
for op in [floordiv_and_mod, divmod]:
for dt1, dt2 in itertools.product(dt, dt):
for sg1, sg2 in itertools.product((+1, -1), (+1, -1)):
fmt = 'op: %s, dt1: %s, dt2: %s, sg1: %s, sg2: %s'
msg = fmt % (op.__name__, dt1, dt2, sg1, sg2)
a = np.array(sg1*78*6e-8, dtype=dt1)[()]
b = np.array(sg2*6e-8, dtype=dt2)[()]
div, rem = op(a, b)
assert_equal(div*b + rem, a, err_msg=msg)
if sg2 == -1:
assert_(b < rem <= 0, msg)
else:
assert_(b > rem >= 0, msg)
def test_float_modulus_corner_cases(self):
for dt in np.typecodes['Float']:
b = np.array(1.0, dtype=dt)
a = np.nextafter(np.array(0.0, dtype=dt), -b)
rem = operator.mod(a, b)
assert_(rem <= b, 'dt: %s' % dt)
rem = operator.mod(-a, -b)
assert_(rem >= -b, 'dt: %s' % dt)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in remainder")
sup.filter(RuntimeWarning, "divide by zero encountered in remainder")
sup.filter(RuntimeWarning, "divide by zero encountered in floor_divide")
sup.filter(RuntimeWarning, "divide by zero encountered in divmod")
sup.filter(RuntimeWarning, "invalid value encountered in divmod")
for dt in np.typecodes['Float']:
fone = np.array(1.0, dtype=dt)
fzer = np.array(0.0, dtype=dt)
finf = np.array(np.inf, dtype=dt)
fnan = np.array(np.nan, dtype=dt)
rem = operator.mod(fone, fzer)
assert_(np.isnan(rem), 'dt: %s' % dt)
rem = operator.mod(fone, fnan)
assert_(np.isnan(rem), 'dt: %s' % dt)
rem = operator.mod(finf, fone)
assert_(np.isnan(rem), 'dt: %s' % dt)
for op in [floordiv_and_mod, divmod]:
div, mod = op(fone, fzer)
assert_(np.isinf(div)) and assert_(np.isnan(mod))
def test_inplace_floordiv_handling(self):
a = np.array([1, 2], np.int64)
b = np.array([1, 2], np.uint64)
with pytest.raises(TypeError,
match=r"Cannot cast ufunc 'floor_divide' output from"):
a //= b
class TestComplexDivision:
def test_zero_division(self):
with np.errstate(all="ignore"):
for t in [np.complex64, np.complex128]:
a = t(0.0)
b = t(1.0)
assert_(np.isinf(b/a))
b = t(complex(np.inf, np.inf))
assert_(np.isinf(b/a))
b = t(complex(np.inf, np.nan))
assert_(np.isinf(b/a))
b = t(complex(np.nan, np.inf))
assert_(np.isinf(b/a))
b = t(complex(np.nan, np.nan))
assert_(np.isnan(b/a))
b = t(0.)
assert_(np.isnan(b/a))
def test_signed_zeros(self):
with np.errstate(all="ignore"):
for t in [np.complex64, np.complex128]:
data = (
(( 0.0,-1.0), ( 0.0, 1.0), (-1.0,-0.0)),
(( 0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)),
(( 0.0,-1.0), (-0.0,-1.0), ( 1.0, 0.0)),
(( 0.0,-1.0), (-0.0, 1.0), (-1.0, 0.0)),
(( 0.0, 1.0), ( 0.0,-1.0), (-1.0, 0.0)),
(( 0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)),
((-0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)),
((-0.0, 1.0), ( 0.0,-1.0), (-1.0,-0.0))
)
for cases in data:
n = cases[0]
d = cases[1]
ex = cases[2]
result = t(complex(n[0], n[1])) / t(complex(d[0], d[1]))
assert_equal(result.real, ex[0])
assert_equal(result.imag, ex[1])
def test_branches(self):
with np.errstate(all="ignore"):
for t in [np.complex64, np.complex128]:
data = list()
data.append(((2.0, 1.0), (2.0, 1.0), (1.0, 0.0)))
data.append(((1.0, 2.0), (1.0, 2.0), (1.0, 0.0)))
for cases in data:
n = cases[0]
d = cases[1]
ex = cases[2]
result = t(complex(n[0], n[1])) / t(complex(d[0], d[1]))
assert_equal(result.real, ex[0])
assert_equal(result.imag, ex[1])
class TestConversion:
def test_int_from_long(self):
l = [1e6, 1e12, 1e18, -1e6, -1e12, -1e18]
li = [10**6, 10**12, 10**18, -10**6, -10**12, -10**18]
for T in [None, np.float64, np.int64]:
a = np.array(l, dtype=T)
assert_equal([int(_m) for _m in a], li)
a = np.array(l[:3], dtype=np.uint64)
assert_equal([int(_m) for _m in a], li[:3])
def test_iinfo_long_values(self):
for code in 'bBhH':
with pytest.raises(OverflowError):
np.array(np.iinfo(code).max + 1, dtype=code)
for code in np.typecodes['AllInteger']:
res = np.array(np.iinfo(code).max, dtype=code)
tgt = np.iinfo(code).max
assert_(res == tgt)
for code in np.typecodes['AllInteger']:
res = np.dtype(code).type(np.iinfo(code).max)
tgt = np.iinfo(code).max
assert_(res == tgt)
def test_int_raise_behaviour(self):
def overflow_error_func(dtype):
dtype(np.iinfo(dtype).max + 1)
for code in [np.int_, np.uint, np.longlong, np.ulonglong]:
assert_raises(OverflowError, overflow_error_func, code)
def test_int_from_infinite_longdouble(self):
x = np.longdouble(np.inf)
assert_raises(OverflowError, int, x)
with suppress_warnings() as sup:
sup.record(ComplexWarning)
x = np.clongdouble(np.inf)
assert_raises(OverflowError, int, x)
assert_equal(len(sup.log), 1)
@pytest.mark.skipif(not IS_PYPY, reason="Test is PyPy only (gh-9972)")
def test_int_from_infinite_longdouble___int__(self):
x = np.longdouble(np.inf)
assert_raises(OverflowError, x.__int__)
with suppress_warnings() as sup:
sup.record(ComplexWarning)
x = np.clongdouble(np.inf)
assert_raises(OverflowError, x.__int__)
assert_equal(len(sup.log), 1)
@pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble),
reason="long double is same as double")
@pytest.mark.skipif(platform.machine().startswith("ppc"),
reason="IBM double double")
def test_int_from_huge_longdouble(self):
exp = np.finfo(np.double).maxexp - 1
huge_ld = 2 * 1234 * np.longdouble(2) ** exp
huge_i = 2 * 1234 * 2 ** exp
assert_(huge_ld != np.inf)
assert_equal(int(huge_ld), huge_i)
def test_int_from_longdouble(self):
x = np.longdouble(1.5)
assert_equal(int(x), 1)
x = np.longdouble(-10.5)
assert_equal(int(x), -10)
def test_numpy_scalar_relational_operators(self):
for dt1 in np.typecodes['AllInteger']:
assert_(1 > np.array(0, dtype=dt1)[()], "type %s failed" % (dt1,))
assert_(not 1 < np.array(0, dtype=dt1)[()], "type %s failed" % (dt1,))
for dt2 in np.typecodes['AllInteger']:
assert_(np.array(1, dtype=dt1)[()] > np.array(0, dtype=dt2)[()],
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1)[()] < np.array(0, dtype=dt2)[()],
"type %s and %s failed" % (dt1, dt2))
for dt1 in 'BHILQP':
assert_(-1 < np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,))
assert_(not -1 > np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,))
assert_(-1 != np.array(1, dtype=dt1)[()], "type %s failed" % (dt1,))
for dt2 in 'bhilqp':
assert_(np.array(1, dtype=dt1)[()] > np.array(-1, dtype=dt2)[()],
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1)[()] < np.array(-1, dtype=dt2)[()],
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(1, dtype=dt1)[()] != np.array(-1, dtype=dt2)[()],
"type %s and %s failed" % (dt1, dt2))
for dt1 in 'bhlqp' + np.typecodes['Float']:
assert_(1 > np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,))
assert_(not 1 < np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,))
assert_(-1 == np.array(-1, dtype=dt1)[()], "type %s failed" % (dt1,))
for dt2 in 'bhlqp' + np.typecodes['Float']:
assert_(np.array(1, dtype=dt1)[()] > np.array(-1, dtype=dt2)[()],
"type %s and %s failed" % (dt1, dt2))
assert_(not np.array(1, dtype=dt1)[()] < np.array(-1, dtype=dt2)[()],
"type %s and %s failed" % (dt1, dt2))
assert_(np.array(-1, dtype=dt1)[()] == np.array(-1, dtype=dt2)[()],
"type %s and %s failed" % (dt1, dt2))
def test_scalar_comparison_to_none(self):
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', FutureWarning)
assert_(not np.float32(1) == None)
assert_(not np.str_('test') == None)
assert_(not np.datetime64('NaT') == None)
assert_(np.float32(1) != None)
assert_(np.str_('test') != None)
assert_(np.datetime64('NaT') != None)
assert_(len(w) == 0)
assert_(np.equal(np.datetime64('NaT'), None))
class TestRepr:
def _test_type_repr(self, t):
finfo = np.finfo(t)
last_fraction_bit_idx = finfo.nexp + finfo.nmant
last_exponent_bit_idx = finfo.nexp
storage_bytes = np.dtype(t).itemsize * 8
for which in ['small denorm', 'small norm']:
constr = np.array([0x00] * storage_bytes, dtype=np.uint8)
if which == 'small denorm':
byte = last_fraction_bit_idx // 8
bytebit = 7 - (last_fraction_bit_idx % 8)
constr[byte] = 1 << bytebit
elif which == 'small norm':
byte = last_exponent_bit_idx // 8
bytebit = 7 - (last_exponent_bit_idx % 8)
constr[byte] = 1 << bytebit
else:
raise ValueError('hmm')
val = constr.view(t)[0]
val_repr = repr(val)
val2 = t(eval(val_repr))
if not (val2 == 0 and val < 1e-100):
assert_equal(val, val2)
def test_float_repr(self):
for t in [np.float32, np.float64]:
self._test_type_repr(t)
if not IS_PYPY:
class TestSizeOf:
def test_equal_nbytes(self):
for type in types:
x = type(0)
assert_(sys.getsizeof(x) > x.nbytes)
def test_error(self):
d = np.float32()
assert_raises(TypeError, d.__sizeof__, "a")
def test_no_seq_repeat_basic_array_like(self):
class ArrayLike:
def __init__(self, arr):
self.arr = arr
def __array__(self, dtype=None, copy=None):
return self.arr
for arr_like in (ArrayLike(np.ones(3)), memoryview(np.ones(3))):
assert_array_equal(arr_like * np.float32(3.), np.full(3, 3.))
assert_array_equal(np.float32(3.) * arr_like, np.full(3, 3.))
assert_array_equal(arr_like * np.int_(3), np.full(3, 3))
assert_array_equal(np.int_(3) * arr_like, np.full(3, 3))
class TestNegative:
def test_exceptions(self):
a = np.ones((), dtype=np.bool)[()]
assert_raises(TypeError, operator.neg, a)
def test_result(self):
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
for dt in types:
a = np.ones((), dtype=dt)[()]
if dt in np.typecodes['UnsignedInteger']:
st = np.dtype(dt).type
max = st(np.iinfo(dt).max)
assert_equal(operator.neg(a), max)
else:
assert_equal(operator.neg(a) + a, 0)
class TestSubtract:
def test_exceptions(self):
a = np.ones((), dtype=np.bool)[()]
assert_raises(TypeError, operator.sub, a, a)
def test_result(self):
types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
for dt in types:
a = np.ones((), dtype=dt)[()]
assert_equal(operator.sub(a, a), 0)
class TestAbs:
def _test_abs_func(self, absfunc, test_dtype):
x = test_dtype(-1.5)
assert_equal(absfunc(x), 1.5)
x = test_dtype(0.0)
res = absfunc(x)
assert_equal(res, 0.0)
x = test_dtype(-0.0)
res = absfunc(x)
assert_equal(res, 0.0)
x = test_dtype(np.finfo(test_dtype).max)
assert_equal(absfunc(x), x.real)
with suppress_warnings() as sup:
sup.filter(UserWarning)
x = test_dtype(np.finfo(test_dtype).tiny)
assert_equal(absfunc(x), x.real)
x = test_dtype(np.finfo(test_dtype).min)
assert_equal(absfunc(x), -x.real)
@pytest.mark.parametrize("dtype", floating_types + complex_floating_types)
def test_builtin_abs(self, dtype):
if (
sys.platform == "cygwin" and dtype == np.clongdouble and
(
_pep440.parse(platform.release().split("-")[0])
< _pep440.Version("3.3.0")
)
):
pytest.xfail(
reason="absl is computed in double precision on cygwin < 3.3"
)
self._test_abs_func(abs, dtype)
@pytest.mark.parametrize("dtype", floating_types + complex_floating_types)
def test_numpy_abs(self, dtype):
if (
sys.platform == "cygwin" and dtype == np.clongdouble and
(
_pep440.parse(platform.release().split("-")[0])
< _pep440.Version("3.3.0")
)
):
pytest.xfail(
reason="absl is computed in double precision on cygwin < 3.3"
)
self._test_abs_func(np.abs, dtype)
class TestBitShifts:
@pytest.mark.parametrize('type_code', np.typecodes['AllInteger'])
@pytest.mark.parametrize('op',
[operator.rshift, operator.lshift], ids=['>>', '<<'])
def test_shift_all_bits(self, type_code, op):
"""Shifts where the shift amount is the width of the type or wider """
dt = np.dtype(type_code)
nbits = dt.itemsize * 8
for val in [5, -5]:
for shift in [nbits, nbits + 4]:
val_scl = np.array(val).astype(dt)[()]
shift_scl = dt.type(shift)
res_scl = op(val_scl, shift_scl)
if val_scl < 0 and op is operator.rshift:
assert_equal(res_scl, -1)
else:
assert_equal(res_scl, 0)
val_arr = np.array([val_scl]*32, dtype=dt)
shift_arr = np.array([shift]*32, dtype=dt)
res_arr = op(val_arr, shift_arr)
assert_equal(res_arr, res_scl)
class TestHash:
@pytest.mark.parametrize("type_code", np.typecodes['AllInteger'])
def test_integer_hashes(self, type_code):
scalar = np.dtype(type_code).type
for i in range(128):
assert hash(i) == hash(scalar(i))
@pytest.mark.parametrize("type_code", np.typecodes['AllFloat'])
def test_float_and_complex_hashes(self, type_code):
scalar = np.dtype(type_code).type
for val in [np.pi, np.inf, 3, 6.]:
numpy_val = scalar(val)
if numpy_val.dtype.kind == 'c':
val = complex(numpy_val)
else:
val = float(numpy_val)
assert val == numpy_val
assert hash(val) == hash(numpy_val)
if hash(float(np.nan)) != hash(float(np.nan)):
assert hash(scalar(np.nan)) != hash(scalar(np.nan))
@pytest.mark.parametrize("type_code", np.typecodes['Complex'])
def test_complex_hashes(self, type_code):
scalar = np.dtype(type_code).type
for val in [np.pi+1j, np.inf-3j, 3j, 6.+1j]:
numpy_val = scalar(val)
assert hash(complex(numpy_val)) == hash(numpy_val)
@contextlib.contextmanager
def recursionlimit(n):
o = sys.getrecursionlimit()
try:
sys.setrecursionlimit(n)
yield
finally:
sys.setrecursionlimit(o)
@given(sampled_from(objecty_things),
sampled_from(reasonable_operators_for_scalars),
sampled_from(types))
def test_operator_object_left(o, op, type_):
try:
with recursionlimit(200):
op(o, type_(1))
except TypeError:
pass
@given(sampled_from(objecty_things),
sampled_from(reasonable_operators_for_scalars),
sampled_from(types))
def test_operator_object_right(o, op, type_):
try:
with recursionlimit(200):
op(type_(1), o)
except TypeError:
pass
@given(sampled_from(reasonable_operators_for_scalars),
sampled_from(types),
sampled_from(types))
def test_operator_scalars(op, type1, type2):
try:
op(type1(1), type2(1))
except TypeError:
pass
@pytest.mark.parametrize("op", reasonable_operators_for_scalars)
@pytest.mark.parametrize("sctype", [np.longdouble, np.clongdouble])
def test_longdouble_operators_with_obj(sctype, op):
pass
try:
op(sctype(3), None)
except TypeError:
pass
try:
op(None, sctype(3))
except TypeError:
pass
@pytest.mark.parametrize("op", reasonable_operators_for_scalars)
@pytest.mark.parametrize("sctype", [np.longdouble, np.clongdouble])
@np.errstate(all="ignore")
def test_longdouble_operators_with_large_int(sctype, op):
if sctype == np.clongdouble and op in [operator.mod, operator.floordiv]:
with pytest.raises(TypeError):
op(sctype(3), 2**64)
with pytest.raises(TypeError):
op(sctype(3), 2**64)
else:
assert op(sctype(3), -2**64) == op(sctype(3), sctype(-2**64))
assert op(2**64, sctype(3)) == op(sctype(2**64), sctype(3))
@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
@pytest.mark.parametrize("operation", [
lambda min, max: max + max,
lambda min, max: min - max,
lambda min, max: max * max], ids=["+", "-", "*"])
def test_scalar_integer_operation_overflow(dtype, operation):
st = np.dtype(dtype).type
min = st(np.iinfo(dtype).min)
max = st(np.iinfo(dtype).max)
with pytest.warns(RuntimeWarning, match="overflow encountered"):
operation(min, max)
@pytest.mark.parametrize("dtype", np.typecodes["Integer"])
@pytest.mark.parametrize("operation", [
lambda min, neg_1: -min,
lambda min, neg_1: abs(min),
lambda min, neg_1: min * neg_1,
pytest.param(lambda min, neg_1: min // neg_1,
marks=pytest.mark.skip(reason="broken on some platforms"))],
ids=["neg", "abs", "*", "//"])
def test_scalar_signed_integer_overflow(dtype, operation):
st = np.dtype(dtype).type
min = st(np.iinfo(dtype).min)
neg_1 = st(-1)
with pytest.warns(RuntimeWarning, match="overflow encountered"):
operation(min, neg_1)
@pytest.mark.parametrize("dtype", np.typecodes["UnsignedInteger"])
def test_scalar_unsigned_integer_overflow(dtype):
val = np.dtype(dtype).type(8)
with pytest.warns(RuntimeWarning, match="overflow encountered"):
-val
zero = np.dtype(dtype).type(0)
-zero
@pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
@pytest.mark.parametrize("operation", [
lambda val, zero: val // zero,
lambda val, zero: val % zero, ], ids=["//", "%"])
def test_scalar_integer_operation_divbyzero(dtype, operation):
st = np.dtype(dtype).type
val = st(100)
zero = st(0)
with pytest.warns(RuntimeWarning, match="divide by zero"):
operation(val, zero)
ops_with_names = [
("__lt__", "__gt__", operator.lt, True),
("__le__", "__ge__", operator.le, True),
("__eq__", "__eq__", operator.eq, True),
("__ne__", "__ne__", operator.ne, True),
("__gt__", "__lt__", operator.gt, True),
("__ge__", "__le__", operator.ge, True),
("__floordiv__", "__rfloordiv__", operator.floordiv, False),
("__truediv__", "__rtruediv__", operator.truediv, False),
("__add__", "__radd__", operator.add, False),
("__mod__", "__rmod__", operator.mod, False),
("__mul__", "__rmul__", operator.mul, False),
("__pow__", "__rpow__", operator.pow, False),
("__sub__", "__rsub__", operator.sub, False),
@pytest.mark.parametrize(["__op__", "__rop__", "op", "cmp"], ops_with_names)
@pytest.mark.parametrize("sctype", [np.float32, np.float64, np.longdouble])
def test_subclass_deferral(sctype, __op__, __rop__, op, cmp):
"""
This test covers scalar subclass deferral. Note that this is exceedingly
complicated, especially since it tends to fall back to the array paths and
these additionally add the "array priority" mechanism.
The behaviour was modified subtly in 1.22 (to make it closer to how Python
scalars work). Due to its complexity and the fact that subclassing NumPy
scalars is probably a bad idea to begin with. There is probably room
for adjustments here.
"""
class myf_simple1(sctype):
pass
class myf_simple2(sctype):
pass
def op_func(self, other):
return __op__
def rop_func(self, other):
return __rop__
myf_op = type("myf_op", (sctype,), {__op__: op_func, __rop__: rop_func})
res = op(myf_simple1(1), myf_simple2(2))
assert type(res) == sctype or type(res) == np.bool
assert op(myf_simple1(1), myf_simple2(2)) == op(1, 2)
assert op(myf_op(1), myf_simple1(2)) == __op__
assert op(myf_simple1(1), myf_op(2)) == op(1, 2)
def test_longdouble_complex():
x = np.longdouble(1)
assert x + 1j == 1+1j
assert 1j + x == 1+1j
@pytest.mark.parametrize(["__op__", "__rop__", "op", "cmp"], ops_with_names)
@pytest.mark.parametrize("subtype", [float, int, complex, np.float16])
@np._no_nep50_warning()
def test_pyscalar_subclasses(subtype, __op__, __rop__, op, cmp):
def op_func(self, other):
return __op__
def rop_func(self, other):
return __rop__
myt = type("myt", (subtype,),
{__op__: op_func, __rop__: rop_func, "__array_ufunc__": None})
assert op(myt(1), np.float64(2)) == __op__
assert op(np.float64(1), myt(2)) == __rop__
if op in {operator.mod, operator.floordiv} and subtype == complex:
return
if __rop__ == __op__:
return
myt = type("myt", (subtype,), {__rop__: rop_func})
res = op(myt(1), np.float16(2))
expected = op(subtype(1), np.float16(2))
assert res == expected
assert type(res) == type(expected)
res = op(np.float32(2), myt(1))
expected = op(np.float32(2), subtype(1))
assert res == expected
assert type(res) == type(expected)
res = op(myt(1), np.longdouble(2))
expected = op(subtype(1), np.longdouble(2))
assert res == expected
assert type(res) == type(expected)
res = op(np.float32(2), myt(1))
expected = op(np.longdouble(2), subtype(1))
assert res == expected
def test_truediv_int():
assert np.uint8(3) / 123454 == np.float64(3) / 123454
@pytest.mark.slow
@pytest.mark.parametrize("op", [op for op in reasonable_operators_for_scalars if op is not operator.pow])
@pytest.mark.parametrize("sctype", types)
@pytest.mark.parametrize("other_type", [float, int, complex])
@pytest.mark.parametrize("rop", [True, False])
def test_scalar_matches_array_op_with_pyscalar(op, sctype, other_type, rop):
val1 = sctype(2)
val2 = other_type(2)
if rop:
_op = op
op = lambda x, y: _op(y, x)
try:
res = op(val1, val2)
except TypeError:
try:
expected = op(np.asarray(val1), val2)
raise AssertionError("ufunc didn't raise.")
except TypeError:
return
else:
expected = op(np.asarray(val1), val2)
assert res == expected
if isinstance(val1, float) and other_type is complex and rop:
assert np.array(res).dtype == expected.dtype
else:
assert res.dtype == expected.dtype
.\numpy\numpy\_core\tests\test_scalarprint.py
import code
import platform
import pytest
import sys
from tempfile import TemporaryFile
import numpy as np
from numpy.testing import assert_, assert_equal, assert_raises, IS_MUSL
class TestRealScalars:
def test_str(self):
svals = [0.0, -0.0, 1, -1, np.inf, -np.inf, np.nan]
styps = [np.float16, np.float32, np.float64, np.longdouble]
wanted = [
['0.0', '0.0', '0.0', '0.0' ],
['-0.0', '-0.0', '-0.0', '-0.0'],
['1.0', '1.0', '1.0', '1.0' ],
['-1.0', '-1.0', '-1.0', '-1.0'],
['inf', 'inf', 'inf', 'inf' ],
['-inf', '-inf', '-inf', '-inf'],
['nan', 'nan', 'nan', 'nan']]
for wants, val in zip(wanted, svals):
for want, styp in zip(wants, styps):
msg = 'for str({}({}))'.format(np.dtype(styp).name, repr(val))
assert_equal(str(styp(val)), want, err_msg=msg)
def test_scalar_cutoffs(self):
def check(v):
assert_equal(str(np.float64(v)), str(v))
assert_equal(str(np.float64(v)), repr(v))
assert_equal(repr(np.float64(v)), f"np.float64({v!r})")
assert_equal(repr(np.float64(v)), f"np.float64({v})")
check(1.12345678901234567890)
check(0.0112345678901234567890)
check(1e-5)
check(1e-4)
check(1e15)
check(1e16)
def test_py2_float_print(self):
x = np.double(0.1999999999999)
with TemporaryFile('r+t') as f:
print(x, file=f)
f.seek(0)
output = f.read()
assert_equal(output, str(x) + '\n')
def userinput():
yield 'np.sqrt(2)'
raise EOFError
gen = userinput()
input_func = lambda prompt="": next(gen)
with TemporaryFile('r+t') as fo, TemporaryFile('r+t') as fe:
orig_stdout, orig_stderr = sys.stdout, sys.stderr
sys.stdout, sys.stderr = fo, fe
code.interact(local={'np': np}, readfunc=input_func, banner='')
sys.stdout, sys.stderr = orig_stdout, orig_stderr
fo.seek(0)
capture = fo.read().strip()
assert_equal(capture, repr(np.sqrt(2)))
def test_dragon4_interface(self):
tps = [np.float16, np.float32, np.float64]
if hasattr(np, 'float128') and not IS_MUSL:
tps.append(np.float128)
fpos = np.format_float_positional
fsci = np.format_float_scientific
for tp in tps:
assert_equal(fpos(tp('1.0'), pad_left=4, pad_right=4), " 1. ")
assert_equal(fpos(tp('-1.0'), pad_left=4, pad_right=4), " -1. ")
assert_equal(fpos(tp('-10.2'),
pad_left=4, pad_right=4), " -10.2 ")
assert_equal(fsci(tp('1.23e1'), exp_digits=5), "1.23e+00001")
assert_equal(fpos(tp('1.0'), unique=False, precision=4), "1.0000")
assert_equal(fsci(tp('1.0'), unique=False, precision=4),
"1.0000e+00")
assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='k'),
"1.0000")
assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='.'),
"1.")
assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='.'),
"1.2" if tp != np.float16 else "1.2002")
assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='0'),
"1.0")
assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='0'),
"1.2" if tp != np.float16 else "1.2002")
assert_equal(fpos(tp('1.'), trim='0'), "1.0")
assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='-'),
"1")
assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='-'),
"1.2" if tp != np.float16 else "1.2002")
assert_equal(fpos(tp('1.'), trim='-'), "1")
assert_equal(fpos(tp('1.001'), precision=1, trim='-'), "1")
@pytest.mark.skipif(not platform.machine().startswith("ppc64"),
reason="only applies to ppc float128 values")
def test_ppc64_ibm_double_double128(self):
x = np.float128('2.123123123123123123123123123123123e-286')
got = [str(x/np.float128('2e' + str(i))) for i in range(0,40)]
expected = [
"1.06156156156156156156156156156157e-286",
"1.06156156156156156156156156156158e-287",
"1.06156156156156156156156156156159e-288",
"1.0615615615615615615615615615616e-289",
"1.06156156156156156156156156156157e-290",
"1.06156156156156156156156156156156e-291",
"1.0615615615615615615615615615616e-292",
"1.0615615615615615615615615615615e-293",
"1.061561561561561561561561561562e-294",
"1.06156156156156156156156156155e-295",
"1.0615615615615615615615615615616e-296",
"1.06156156156156156156156156156156e-297",
"1.06156156156156156156156156156157e-298",
"1.0615615615615615615615615615616e-299",
"1.0615615615615615615615615615615e-300",
"1.061561561561561561561561561562e-301",
"1.06156156156156156156156156155e-302",
"1.0615615615615615615615615616e-303",
"1.061561561561561561561561562e-304",
"1.0615615615615615615615615616e-305",
"1.0615615615615615615615615616e-306",
"1.06156156156156156156156156156e-307",
"1.0615615615615615615615615615616e-308",
"1.06156156156156156156156156156e-309",
"1.06156156156156157e-310",
"1.0615615615615616e-311",
"1.06156156156156e-312",
"1.06156156156157e-313",
"1.0615615615616e-314",
"1.06156156156e-315",
"1.06156156155e-316",
"1.061562e-317",
"1.06156e-318",
"1.06155e-319",
"1.0617e-320",
"1.06e-321",
"1.04e-322",
"1e-323",
"0.0",
"0.0"]
assert_equal(got, expected)
a = np.float128('2')/np.float128('3')
b = np.float128(str(a))
assert_equal(str(a), str(b))
assert_(a != b)
def float32_roundtrip(self):
x = np.float32(1024 - 2**-14)
y = np.float32(1024 - 2**-13)
assert_(repr(x) != repr(y))
assert_equal(np.float32(repr(x)), x)
assert_equal(np.float32(repr(y)), y)
def float64_vs_python(self):
assert_equal(repr(np.float64(0.1)), repr(0.1))
assert_(repr(np.float64(0.20000000000000004)) != repr(0.2))
.\numpy\numpy\_core\tests\test_scalar_ctors.py
"""
Test the scalar constructors, which also do type-coercion
"""
import pytest
import numpy as np
from numpy.testing import (
assert_equal, assert_almost_equal, assert_warns,
)
class TestFromString:
def test_floating(self):
fsingle = np.single('1.234')
fdouble = np.double('1.234')
flongdouble = np.longdouble('1.234')
assert_almost_equal(fsingle, 1.234)
assert_almost_equal(fdouble, 1.234)
assert_almost_equal(flongdouble, 1.234)
def test_floating_overflow(self):
""" Strings containing an unrepresentable float overflow """
fhalf = np.half('1e10000')
assert_equal(fhalf, np.inf)
fsingle = np.single('1e10000')
assert_equal(fsingle, np.inf)
fdouble = np.double('1e10000')
assert_equal(fdouble, np.inf)
flongdouble = assert_warns(RuntimeWarning, np.longdouble, '1e10000')
assert_equal(flongdouble, np.inf)
fhalf = np.half('-1e10000')
assert_equal(fhalf, -np.inf)
fsingle = np.single('-1e10000')
assert_equal(fsingle, -np.inf)
fdouble = np.double('-1e10000')
assert_equal(fdouble, -np.inf)
flongdouble = assert_warns(RuntimeWarning, np.longdouble, '-1e10000')
assert_equal(flongdouble, -np.inf)
class TestExtraArgs:
def test_superclass(self):
s = np.str_(b'\x61', encoding='unicode-escape')
assert s == 'a'
s = np.str_(b'\x61', 'unicode-escape')
assert s == 'a'
with pytest.raises(UnicodeDecodeError):
np.str_(b'\\xx', encoding='unicode-escape')
with pytest.raises(UnicodeDecodeError):
np.str_(b'\\xx', 'unicode-escape')
assert np.bytes_(-2) == b'-2'
def test_datetime(self):
dt = np.datetime64('2000-01', ('M', 2))
assert np.datetime_data(dt) == ('M', 2)
with pytest.raises(TypeError):
np.datetime64('2000', garbage=True)
def test_bool(self):
with pytest.raises(TypeError):
np.bool(False, garbage=True)
def test_void(self):
with pytest.raises(TypeError):
np.void(b'test', garbage=True)
class TestFromInt:
def test_intp(self):
assert_equal(1024, np.intp(1024))
def test_uint64_from_negative(self):
with pytest.raises(OverflowError):
np.uint64(-2)
int_types = [np.byte, np.short, np.intc, np.long, np.longlong]
uint_types = [np.ubyte, np.ushort, np.uintc, np.ulong, np.ulonglong]
float_types = [np.half, np.single, np.double, np.longdouble]
cfloat_types = [np.csingle, np.cdouble, np.clongdouble]
class TestArrayFromScalar:
""" gh-15467 and gh-19125 """
def _do_test(self, t1, t2, arg=2):
if arg is None:
x = t1()
elif isinstance(arg, tuple):
if t1 is np.clongdouble:
pytest.xfail("creating a clongdouble from real and "
"imaginary parts isn't supported")
x = t1(*arg)
else:
x = t1(arg)
arr = np.array(x, dtype=t2)
if t2 is None:
assert arr.dtype.type is t1
else:
assert arr.dtype.type is t2
@pytest.mark.parametrize('t1', int_types + uint_types)
@pytest.mark.parametrize('t2', int_types + uint_types + [None])
def test_integers(self, t1, t2):
return self._do_test(t1, t2)
@pytest.mark.parametrize('t1', float_types)
@pytest.mark.parametrize('t2', float_types + [None])
def test_reals(self, t1, t2):
return self._do_test(t1, t2)
@pytest.mark.parametrize('t1', cfloat_types)
@pytest.mark.parametrize('t2', cfloat_types + [None])
@pytest.mark.parametrize('arg', [2, 1 + 3j, (1, 2), None])
def test_complex(self, t1, t2, arg):
self._do_test(t1, t2, arg)
@pytest.mark.parametrize('t', cfloat_types)
def test_complex_errors(self, t):
with pytest.raises(TypeError):
t(1j, 1j)
with pytest.raises(TypeError):
t(1, None)
with pytest.raises(TypeError):
t(None, 1)
@pytest.mark.parametrize("length",
[5, np.int8(5), np.array(5, dtype=np.uint16)])
def test_void_via_length(length):
res = np.void(length)
assert type(res) is np.void
assert res.item() == b"\0" * 5
assert res.dtype == "V5"
@pytest.mark.parametrize("bytes_",
[b"spam", np.array(567.)])
def test_void_from_byteslike(bytes_):
res = np.void(bytes_)
expected = bytes(bytes_)
assert type(res) is np.void
assert res.item() == expected
res = np.void(bytes_, dtype="V100")
assert type(res) is np.void
assert res.item()[:len(expected)] == expected
assert res.item()[len(expected):] == b"\0" * (res.nbytes - len(expected))
res = np.void(bytes_, dtype="V4")
assert type(res) is np.void
assert res.item() == expected[:4]
def test_void_arraylike_trumps_byteslike():
m = memoryview(b"just one mintleaf?")
res = np.void(m)
assert type(res) is np.ndarray
assert res.dtype == "V1"
assert res.shape == (18,)
def test_void_dtype_arg():
res = np.void((1, 2), dtype="i,i")
assert res.item() == (1, 2)
res = np.void((2, 3), "i,i")
assert res.item() == (2, 3)
@pytest.mark.parametrize("data",
[5, np.int8(5), np.array(5, dtype=np.uint16)])
def test_void_from_integer_with_dtype(data):
res = np.void(data, dtype="i,i")
assert type(res) is np.void
assert res.dtype == "i,i"
assert res["f0"] == 5 and res["f1"] == 5
def test_void_from_structure():
dtype = np.dtype([('s', [('f', 'f8'), ('u', 'U1')]), ('i', 'i2')])
data = np.array(((1., 'a'), 2), dtype=dtype)
res = np.void(data[()], dtype=dtype)
assert type(res) is np.void
assert res.dtype == dtype
assert res == data[()]
def test_void_bad_dtype():
with pytest.raises(TypeError,
match="void: descr must be a `void.*int64"):
np.void(4, dtype="i8")
with pytest.raises(TypeError,
match=r"void: descr must be a `void.*\(4,\)"):
np.void(4, dtype="4i")
.\numpy\numpy\_core\tests\test_scalar_methods.py
"""
Test the scalar constructors, which also do type-coercion
"""
import fractions
import platform
import types
from typing import Any, Type
import pytest
import numpy as np
from numpy._core import sctypes
from numpy.testing import assert_equal, assert_raises, IS_MUSL
class TestAsIntegerRatio:
@pytest.mark.parametrize("ftype", [
np.half, np.single, np.double, np.longdouble])
@pytest.mark.parametrize("f, ratio", [
(0.875, (7, 8)),
(-0.875, (-7, 8)),
(0.0, (0, 1)),
(11.5, (23, 2)),
])
def test_small(self, ftype, f, ratio):
assert_equal(ftype(f).as_integer_ratio(), ratio)
@pytest.mark.parametrize("ftype", [
np.half, np.single, np.double, np.longdouble])
def test_simple_fractions(self, ftype):
R = fractions.Fraction
assert_equal(R(0, 1),
R(*ftype(0.0).as_integer_ratio()))
assert_equal(R(5, 2),
R(*ftype(2.5).as_integer_ratio()))
assert_equal(R(1, 2),
R(*ftype(0.5).as_integer_ratio()))
assert_equal(R(-2100, 1),
R(*ftype(-2100.0).as_integer_ratio()))
@pytest.mark.parametrize("ftype", [
np.half, np.single, np.double, np.longdouble])
def test_errors(self, ftype):
assert_raises(OverflowError, ftype('inf').as_integer_ratio)
assert_raises(OverflowError, ftype('-inf').as_integer_ratio)
assert_raises(ValueError, ftype('nan').as_integer_ratio)
def test_against_known_values(self):
R = fractions.Fraction
assert_equal(R(1075, 512),
R(*np.half(2.1).as_integer_ratio()))
assert_equal(R(-1075, 512),
R(*np.half(-2.1).as_integer_ratio()))
assert_equal(R(4404019, 2097152),
R(*np.single(2.1).as_integer_ratio()))
assert_equal(R(-4404019, 2097152),
R(*np.single(-2.1).as_integer_ratio()))
assert_equal(R(4728779608739021, 2251799813685248),
R(*np.double(2.1).as_integer_ratio()))
assert_equal(R(-4728779608739021, 2251799813685248),
R(*np.double(-2.1).as_integer_ratio()))
@pytest.mark.parametrize("ftype, frac_vals, exp_vals", [
(np.half, [0.0, 0.01154830649280303, 0.31082276347447274,
0.527350517124794, 0.8308562335072596],
[0, 1, 0, -8, 12]),
(np.single, [0.0, 0.09248576989263226, 0.8160498218131407,
0.17389442853722373, 0.7956044195067877],
[0, 12, 10, 17, -26]),
(np.double, [0.0, 0.031066908499895136, 0.5214135908877832,
0.45780736035689296, 0.5906586745934036],
[0, -801, 51, 194, -653]),
pytest.param(
np.longdouble,
[0.0, 0.20492557202724854, 0.4277180662199366, 0.9888085019891495,
0.9620175814461964],
[0, -7400, 14266, -7822, -8721],
marks=[
pytest.mark.skipif(
np.finfo(np.double) == np.finfo(np.longdouble),
reason="long double is same as double"),
pytest.mark.skipif(
platform.machine().startswith("ppc"),
reason="IBM double double"),
]
)
])
def test_roundtrip(self, ftype, frac_vals, exp_vals):
for frac, exp in zip(frac_vals, exp_vals):
f = np.ldexp(ftype(frac), exp)
assert f.dtype == ftype
n, d = f.as_integer_ratio()
try:
nf = np.longdouble(n)
df = np.longdouble(d)
if not np.isfinite(df):
raise OverflowError
except (OverflowError, RuntimeWarning):
pytest.skip("longdouble too small on this platform")
assert_equal(nf / df, f, "{}/{}".format(n, d))
class TestIsInteger:
@pytest.mark.parametrize("str_value", ["inf", "nan"])
@pytest.mark.parametrize("code", np.typecodes["Float"])
def test_special(self, code: str, str_value: str) -> None:
cls = np.dtype(code).type
value = cls(str_value)
assert not value.is_integer()
@pytest.mark.parametrize(
"code", np.typecodes["Float"] + np.typecodes["AllInteger"]
)
def test_true(self, code: str) -> None:
float_array = np.arange(-5, 5).astype(code)
for value in float_array:
assert value.is_integer()
@pytest.mark.parametrize("code", np.typecodes["Float"])
def test_false(self, code: str) -> None:
float_array = np.arange(-5, 5).astype(code)
float_array *= 1.1
for value in float_array:
if value == 0:
continue
assert not value.is_integer()
class TestClassGetItem:
@pytest.mark.parametrize("cls", [
np.number,
np.integer,
np.inexact,
np.unsignedinteger,
np.signedinteger,
np.floating,
])
def test_abc(self, cls: Type[np.number]) -> None:
alias = cls[Any]
assert isinstance(alias, types.GenericAlias)
assert alias.__origin__ is cls
def test_abc_complexfloating(self) -> None:
alias = np.complexfloating[Any, Any]
assert isinstance(alias, types.GenericAlias)
assert alias.__origin__ is np.complexfloating
@pytest.mark.parametrize("arg_len", range(4))
def test_abc_complexfloating_subscript_tuple(self, arg_len: int) -> None:
arg_tup = (Any,) * arg_len
if arg_len in (1, 2):
assert np.complexfloating[arg_tup]
else:
match = f"Too {'few' if arg_len == 0 else 'many'} arguments"
with pytest.raises(TypeError, match=match):
np.complexfloating[arg_tup]
@pytest.mark.parametrize("cls", [np.generic, np.flexible, np.character])
def test_abc_non_numeric(self, cls: Type[np.generic]) -> None:
with pytest.raises(TypeError):
cls[Any]
@pytest.mark.parametrize("code", np.typecodes["All"])
def test_concrete(self, code: str) -> None:
cls = np.dtype(code).type
with pytest.raises(TypeError):
cls[Any]
@pytest.mark.parametrize("arg_len", range(4))
def test_subscript_tuple(self, arg_len: int) -> None:
arg_tup = (Any,) * arg_len
if arg_len == 1:
assert np.number[arg_tup]
else:
with pytest.raises(TypeError):
np.number[arg_tup]
def test_subscript_scalar(self) -> None:
assert np.number[Any]
class TestBitCount:
@pytest.mark.parametrize("itype", sctypes['int']+sctypes['uint'])
def test_small(self, itype):
for a in range(max(np.iinfo(itype).min, 0), 128):
msg = f"Smoke test for {itype}({a}).bit_count()"
assert itype(a).bit_count() == bin(a).count("1"), msg
def test_bit_count(self):
for exp in [10, 17, 63]:
a = 2**exp
assert np.uint64(a).bit_count() == 1
assert np.uint64(a - 1).bit_count() == exp
assert np.uint64(a ^ 63).bit_count() == 7
assert np.uint64((a - 1) ^ 510).bit_count() == exp - 8
.\numpy\numpy\_core\tests\test_shape_base.py
import pytest
import numpy as np
from numpy._core import (
array, arange, atleast_1d, atleast_2d, atleast_3d, block, vstack, hstack,
newaxis, concatenate, stack
)
from numpy.exceptions import AxisError
from numpy._core.shape_base import (_block_dispatcher, _block_setup,
_block_concatenate, _block_slicing)
from numpy.testing import (
assert_, assert_raises, assert_array_equal, assert_equal,
assert_raises_regex, assert_warns, IS_PYPY
)
class TestAtleast1d:
def test_0D_array(self):
a = array(1)
b = array(2)
res = [atleast_1d(a), atleast_1d(b)]
desired = [array([1]), array([2])]
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1, 2])
b = array([2, 3])
res = [atleast_1d(a), atleast_1d(b)]
desired = [array([1, 2]), array([2, 3])]
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
res = [atleast_1d(a), atleast_1d(b)]
desired = [a, b]
assert_array_equal(res, desired)
def test_3D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
a = array([a, a])
b = array([b, b])
res = [atleast_1d(a), atleast_1d(b)]
desired = [a, b]
assert_array_equal(res, desired)
def test_r1array(self):
""" Test to make sure equivalent Travis O's r1array function
"""
assert_(atleast_1d(3).shape == (1,))
assert_(atleast_1d(3j).shape == (1,))
assert_(atleast_1d(3.0).shape == (1,))
assert_(atleast_1d([[2, 3], [4, 5]]).shape == (2, 2))
class TestAtleast2d:
def test_0D_array(self):
a = array(1)
b = array(2)
res = [atleast_2d(a), atleast_2d(b)]
desired = [array([[1]]), array([[2]])]
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1, 2])
b = array([2, 3])
res = [atleast_2d(a), atleast_2d(b)]
desired = [array([[1, 2]]), array([[2, 3]])]
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
res = [atleast_2d(a), atleast_2d(b)]
desired = [a, b]
assert_array_equal(res, desired)
def test_3D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
a = array([a, a])
b = array([b, b])
res = [atleast_2d(a), atleast_2d(b)]
desired = [a, b]
assert_array_equal(res, desired)
def test_r2array(self):
""" Test to make sure equivalent Travis O's r2array function
"""
assert_(atleast_2d(3).shape == (1, 1))
assert_(atleast_2d([3j, 1]).shape == (1, 2))
assert_(atleast_2d([[[3, 1], [4, 5]], [[3, 5], [1, 2]]]).shape == (2, 2, 2))
class TestAtleast3d:
pass
def test_0D_array(self):
a = array(1)
b = array(2)
res = [atleast_3d(a), atleast_3d(b)]
desired = [array([[[1]]]), array([[[2]]])]
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1, 2])
b = array([2, 3])
res = [atleast_3d(a), atleast_3d(b)]
desired = [array([[[1], [2]]]), array([[[2], [3]]])]
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
res = [atleast_3d(a), atleast_3d(b)]
desired = [a[:,:, newaxis], b[:,:, newaxis]]
assert_array_equal(res, desired)
def test_3D_array(self):
a = array([[1, 2], [1, 2]])
b = array([[2, 3], [2, 3]])
a = array([a, a])
b = array([b, b])
res = [atleast_3d(a), atleast_3d(b)]
desired = [a, b]
assert_array_equal(res, desired)
class TestHstack:
def test_non_iterable(self):
assert_raises(TypeError, hstack, 1)
def test_empty_input(self):
assert_raises(ValueError, hstack, ())
def test_0D_array(self):
a = array(1)
b = array(2)
res = hstack([a, b])
desired = array([1, 2])
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1])
b = array([2])
res = hstack([a, b])
desired = array([1, 2])
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1], [2]])
b = array([[1], [2]])
res = hstack([a, b])
desired = array([[1, 1], [2, 2]])
assert_array_equal(res, desired)
def test_generator(self):
with pytest.raises(TypeError, match="arrays to stack must be"):
hstack((np.arange(3) for _ in range(2)))
with pytest.raises(TypeError, match="arrays to stack must be"):
hstack(map(lambda x: x, np.ones((3, 2))))
def test_casting_and_dtype(self):
a = np.array([1, 2, 3])
b = np.array([2.5, 3.5, 4.5])
res = np.hstack((a, b), casting="unsafe", dtype=np.int64)
expected_res = np.array([1, 2, 3, 2, 3, 4])
assert_array_equal(res, expected_res)
def test_casting_and_dtype_type_error(self):
a = np.array([1, 2, 3])
b = np.array([2.5, 3.5, 4.5])
with pytest.raises(TypeError):
hstack((a, b), casting="safe", dtype=np.int64)
class TestVstack:
def test_non_iterable(self):
assert_raises(TypeError, vstack, 1)
def test_empty_input(self):
assert_raises(ValueError, vstack, ())
def test_0D_array(self):
a = array(1)
b = array(2)
res = vstack([a, b])
desired = array([[1], [2]])
assert_array_equal(res, desired)
def test_1D_array(self):
a = array([1])
b = array([2])
res = vstack([a, b])
desired = array([[1], [2]])
assert_array_equal(res, desired)
def test_2D_array(self):
a = array([[1], [2]])
b = array([[1], [2]])
res = vstack([a, b])
desired = array([[1], [2], [1], [2]])
assert_array_equal(res, desired)
def test_2D_array2(self):
a = array([1, 2])
b = array([1, 2])
res = vstack([a, b])
desired = array([[1, 2], [1, 2]])
assert_array_equal(res, desired)
def test_generator(self):
with pytest.raises(TypeError, match="arrays to stack must be"):
vstack((np.arange(3) for _ in range(2)))
def test_casting_and_dtype(self):
a = np.array([1, 2, 3])
b = np.array([2.5, 3.5, 4.5])
res = np.vstack((a, b), casting="unsafe", dtype=np.int64)
expected_res = np.array([[1, 2, 3], [2, 3, 4]])
assert_array_equal(res, expected_res)
def test_casting_and_dtype_type_error(self):
a = np.array([1, 2, 3])
b = np.array([2.5, 3.5, 4.5])
with pytest.raises(TypeError):
vstack((a, b), casting="safe", dtype=np.int64)
class TestConcatenate:
def test_returns_copy(self):
a = np.eye(3)
b = np.concatenate([a])
b[0, 0] = 2
assert b[0, 0] != a[0, 0]
def test_exceptions(self):
for ndim in [1, 2, 3]:
a = np.ones((1,)*ndim)
np.concatenate((a, a), axis=0)
assert_raises(AxisError, np.concatenate, (a, a), axis=ndim)
assert_raises(AxisError, np.concatenate, (a, a), axis=-(ndim + 1))
assert_raises(ValueError, concatenate, (0,))
assert_raises(ValueError, concatenate, (np.array(0),))
assert_raises_regex(
ValueError,
r"all the input arrays must have same number of dimensions, but "
r"the array at index 0 has 1 dimension\(s\) and the array at "
r"index 1 has 2 dimension\(s\)",
np.concatenate, (np.zeros(1), np.zeros((1, 1))))
a = np.ones((1, 2, 3))
b = np.ones((2, 2, 3))
axis = list(range(3))
for i in range(3):
np.concatenate((a, b), axis=axis[0])
assert_raises_regex(
ValueError,
"all the input array dimensions except for the concatenation axis "
"must match exactly, but along dimension {}, the array at "
"index 0 has size 1 and the array at index 1 has size 2"
.format(i),
np.concatenate, (a, b), axis=axis[1])
assert_raises(ValueError, np.concatenate, (a, b), axis=axis[2])
a = np.moveaxis(a, -1, 0)
b = np.moveaxis(b, -1, 0)
axis.append(axis.pop(0))
assert_raises(ValueError, concatenate, ())
def test_concatenate_axis_None(self):
a = np.arange(4, dtype=np.float64).reshape((2, 2))
b = list(range(3))
c = ['x']
r = np.concatenate((a, a), axis=None)
assert_equal(r.dtype, a.dtype)
assert_equal(r.ndim, 1)
r = np.concatenate((a, b), axis=None)
assert_equal(r.size, a.size + len(b))
assert_equal(r.dtype, a.dtype)
r = np.concatenate((a, b, c), axis=None, dtype="U")
d = np.array(['0.0', '1.0', '2.0', '3.0',
'0', '1', '2', 'x'])
assert_array_equal(r, d)
out = np.zeros(a.size + len(b))
r = np.concatenate((a, b), axis=None)
rout = np.concatenate((a, b), axis=None, out=out)
assert_(out is rout)
assert_equal(r, rout)
def test_large_concatenate_axis_None(self):
x = np.arange(1, 100)
r = np.concatenate(x, None)
assert_array_equal(x, r)
with pytest.raises(ValueError):
np.concatenate(x, 100)
def test_concatenate(self):
r4 = list(range(4))
assert_array_equal(concatenate((r4,)), r4)
assert_array_equal(concatenate((tuple(r4),)), r4)
assert_array_equal(concatenate((array(r4),)), r4)
r3 = list(range(3))
assert_array_equal(concatenate((r4, r3)), r4 + r3)
assert_array_equal(concatenate((tuple(r4), r3)), r4 + r3)
assert_array_equal(concatenate((array(r4), r3)), r4 + r3)
assert_array_equal(concatenate((r4, r3), 0), r4 + r3)
assert_array_equal(concatenate((r4, r3), -1), r4 + r3)
a23 = array([[10, 11, 12], [13, 14, 15]])
a13 = array([[0, 1, 2]])
res = array([[10, 11, 12], [13, 14, 15], [0, 1, 2]])
assert_array_equal(concatenate((a23, a13)), res)
assert_array_equal(concatenate((a23, a13), 0), res)
assert_array_equal(concatenate((a23.T, a13.T), 1), res.T)
assert_array_equal(concatenate((a23.T, a13.T), -1), res.T)
assert_raises(ValueError, concatenate, (a23.T, a13.T), 0)
res = arange(2 * 3 * 7).reshape((2, 3, 7))
a0 = res[..., :4]
a1 = res[..., 4:6]
a2 = res[..., 6:]
assert_array_equal(concatenate((a0, a1, a2), 2), res)
assert_array_equal(concatenate((a0, a1, a2), -1), res)
assert_array_equal(concatenate((a0.T, a1.T, a2.T), 0), res.T)
out = res.copy()
rout = concatenate((a0, a1, a2), 2, out=out)
assert_(out is rout)
assert_equal(res, rout)
@pytest.mark.skipif(IS_PYPY, reason="PYPY handles sq_concat, nb_add differently than cpython")
def test_operator_concat(self):
import operator
a = array([1, 2])
b = array([3, 4])
n = [1,2]
res = array([1, 2, 3, 4])
assert_raises(TypeError, operator.concat, a, b)
assert_raises(TypeError, operator.concat, a, n)
assert_raises(TypeError, operator.concat, n, a)
assert_raises(TypeError, operator.concat, a, 1)
assert_raises(TypeError, operator.concat, 1, a)
def test_bad_out_shape(self):
a = array([1, 2])
b = array([3, 4])
assert_raises(ValueError, concatenate, (a, b), out=np.empty(5))
assert_raises(ValueError, concatenate, (a, b), out=np.empty((4,1)))
assert_raises(ValueError, concatenate, (a, b), out=np.empty((1,4)))
concatenate((a, b), out=np.empty(4))
@pytest.mark.parametrize("axis", [None, 0])
@pytest.mark.parametrize("out_dtype", ["c8", "f4", "f8", ">f8", "i8", "S4"])
@pytest.mark.parametrize("casting",
['no', 'equiv', 'safe', 'same_kind', 'unsafe'])
def test_out_and_dtype(self, axis, out_dtype, casting):
out = np.empty(4, dtype=out_dtype)
to_concat = (array([1.1, 2.2]), array([3.3, 4.4]))
if not np.can_cast(to_concat[0], out_dtype, casting=casting):
with assert_raises(TypeError):
concatenate(to_concat, out=out, axis=axis, casting=casting)
with assert_raises(TypeError):
concatenate(to_concat, dtype=out.dtype,
axis=axis, casting=casting)
else:
res_out = concatenate(to_concat, out=out,
axis=axis, casting=casting)
res_dtype = concatenate(to_concat, dtype=out.dtype,
axis=axis, casting=casting)
assert res_out is out
assert_array_equal(out, res_dtype)
assert res_dtype.dtype == out_dtype
with assert_raises(TypeError):
concatenate(to_concat, out=out, dtype=out_dtype, axis=axis)
@pytest.mark.parametrize("axis", [None, 0])
@pytest.mark.parametrize("string_dt", ["S", "U", "S0", "U0"])
@pytest.mark.parametrize("arrs",
[([0.],), ([0.], [1]), ([0], ["string"], [1.])])
def test_dtype_with_promotion(self, arrs, string_dt, axis):
res = np.concatenate(arrs, axis=axis, dtype=string_dt, casting="unsafe")
assert res.dtype == np.array(1.).astype(string_dt).dtype
@pytest.mark.parametrize("axis", [None, 0])
def test_string_dtype_does_not_inspect(self, axis):
with pytest.raises(TypeError):
np.concatenate(([None], [1]), dtype="S", axis=axis)
with pytest.raises(TypeError):
np.concatenate(([None], [1]), dtype="U", axis=axis)
@pytest.mark.parametrize("axis", [None, 0])
def test_subarray_error(self, axis):
with pytest.raises(TypeError, match=".*subarray dtype"):
np.concatenate(([1], [1]), dtype="(2,)i", axis=axis)
def test_stack():
assert_raises(TypeError, stack, 1)
for input_ in [(1, 2, 3),
[np.int32(1), np.int32(2), np.int32(3)],
[np.array(1), np.array(2), np.array(3)]]:
assert_array_equal(stack(input_), [1, 2, 3])
a = np.array([1, 2, 3])
b = np.array([4, 5, 6])
r1 = array([[1, 2, 3], [4, 5, 6]])
assert_array_equal(np.stack((a, b)), r1)
assert_array_equal(np.stack((a, b), axis=1), r1.T)
assert_array_equal(np.stack(list([a, b])), r1)
assert_array_equal(np.stack(array([a, b])), r1)
arrays = [np.random.randn(3) for _ in range(10)]
axes = [0, 1, -1, -2]
expected_shapes = [(10, 3), (3, 10), (3, 10), (10, 3)]
for axis, expected_shape in zip(axes, expected_shapes):
assert_equal(np.stack(arrays, axis).shape, expected_shape)
assert_raises_regex(AxisError, 'out of bounds', stack, arrays, axis=2)
assert_raises_regex(AxisError, 'out of bounds', stack, arrays, axis=-3)
arrays = [np.random.randn(3, 4) for _ in range(10)]
axes = [0, 1, 2, -1, -2, -3]
expected_shapes = [(10, 3, 4), (3, 10, 4), (3, 4, 10),
(3, 4, 10), (3, 10, 4), (10, 3, 4)]
for axis, expected_shape in zip(axes, expected_shapes):
assert_equal(np.stack(arrays, axis).shape, expected_shape)
assert_(stack([[], [], []]).shape == (3, 0))
assert_(stack([[], [], []], axis=1).shape == (0, 3))
out = np.zeros_like(r1)
np.stack((a, b), out=out)
assert_array_equal(out, r1)
assert_raises_regex(ValueError, 'need at least one array', stack, [])
assert_raises_regex(ValueError, 'must have the same shape',
stack, [1, np.arange(3)])
assert_raises_regex(ValueError, 'must have the same shape',
stack, [np.arange(3), 1])
assert_raises_regex(ValueError, 'must have the same shape',
stack, [np.arange(3), 1], axis=1)
assert_raises_regex(ValueError, 'must have the same shape',
stack, [np.zeros((3, 3)), np.zeros(3)], axis=1)
assert_raises_regex(ValueError, 'must have the same shape',
stack, [np.arange(2), np.arange(3)])
with pytest.raises(TypeError, match="arrays to stack must be"):
stack((x for x in range(3)))
a = np.array([1, 2, 3])
b = np.array([2.5, 3.5, 4.5])
res = np.stack((a, b), axis=1, casting="unsafe", dtype=np.int64)
expected_res = np.array([[1, 2], [2, 3], [3, 4]])
assert_array_equal(res, expected_res)
with assert_raises(TypeError):
stack((a, b), dtype=np.int64, axis=1, casting="safe")
@pytest.mark.parametrize("out_dtype", ["c8", "f4", "f8", ">f8", "i8"])
@pytest.mark.parametrize("casting",
['no', 'equiv', 'safe', 'same_kind', 'unsafe'])
def test_stack_out_and_dtype(axis, out_dtype, casting):
to_concat = (array([1, 2]), array([3, 4]))
res = array([[1, 2], [3, 4]])
out = np.zeros_like(res)
if not np.can_cast(to_concat[0], out_dtype, casting=casting):
with assert_raises(TypeError):
stack(to_concat, dtype=out_dtype,
axis=axis, casting=casting)
else:
res_out = stack(to_concat, out=out,
axis=axis, casting=casting)
res_dtype = stack(to_concat, dtype=out_dtype,
axis=axis, casting=casting)
assert res_out is out
assert_array_equal(out, res_dtype)
assert res_dtype.dtype == out_dtype
with assert_raises(TypeError):
stack(to_concat, out=out, dtype=out_dtype, axis=axis)
class TestBlock:
@pytest.fixture(params=['block', 'force_concatenate', 'force_slicing'])
def block(self, request):
def _block_force_concatenate(arrays):
arrays, list_ndim, result_ndim, _ = _block_setup(arrays)
return _block_concatenate(arrays, list_ndim, result_ndim)
def _block_force_slicing(arrays):
arrays, list_ndim, result_ndim, _ = _block_setup(arrays)
return _block_slicing(arrays, list_ndim, result_ndim)
if request.param == 'force_concatenate':
return _block_force_concatenate
elif request.param == 'force_slicing':
return _block_force_slicing
elif request.param == 'block':
return block
else:
raise ValueError('Unknown blocking request. There is a typo in the tests.')
def test_returns_copy(self, block):
a = np.eye(3)
b = block(a)
b[0, 0] = 2
assert b[0, 0] != a[0, 0]
def test_block_total_size_estimate(self, block):
_, _, _, total_size = _block_setup([1])
assert total_size == 1
_, _, _, total_size = _block_setup([[1]])
assert total_size == 1
_, _, _, total_size = _block_setup([[1, 1]])
assert total_size == 2
_, _, _, total_size = _block_setup([[1], [1]])
assert total_size == 2
_, _, _, total_size = _block_setup([[1, 2], [3, 4]])
assert total_size == 4
def test_block_simple_row_wise(self, block):
a_2d = np.ones((2, 2))
b_2d = 2 * a_2d
desired = np.array([[1, 1, 2, 2],
[1, 1, 2, 2]])
result = block([a_2d, b_2d])
assert_equal(desired, result)
def test_block_simple_column_wise(self, block):
a_2d = np.ones((2, 2))
b_2d = 2 * a_2d
expected = np.array([[1, 1],
[1, 1],
[2, 2],
[2, 2]])
result = block([[a_2d], [b_2d]])
assert_equal(expected, result)
def test_block_with_1d_arrays_row_wise(self, block):
a = np.array([1, 2, 3])
b = np.array([2, 3, 4])
expected = np.array([1, 2, 3, 2, 3, 4])
result = block([a, b])
assert_equal(expected, result)
def test_block_with_1d_arrays_multiple_rows(self, block):
a = np.array([1, 2, 3])
b = np.array([2, 3, 4])
expected = np.array([[1, 2, 3, 2, 3, 4],
[1, 2, 3, 2, 3, 4]])
result = block([[a, b], [a, b]])
assert_equal(expected, result)
def test_block_with_1d_arrays_column_wise(self, block):
a_1d = np.array([1, 2, 3])
b_1d = np.array([2, 3, 4])
expected = np.array([[1, 2, 3],
[2, 3, 4]])
result = block([[a_1d], [b_1d]])
assert_equal(expected, result)
def test_block_mixed_1d_and_2d(self, block):
a_2d = np.ones((2, 2))
b_1d = np.array([2, 2])
result = block([[a_2d], [b_1d]])
expected = np.array([[1, 1],
[1, 1],
[2, 2]])
assert_equal(expected, result)
def test_block_complicated(self, block):
one_2d = np.array([[1, 1, 1]])
two_2d = np.array([[2, 2, 2]])
three_2d = np.array([[3, 3, 3, 3, 3, 3]])
four_1d = np.array([4, 4, 4, 4, 4, 4])
five_0d = np.array(5)
six_1d = np.array([6, 6, 6, 6, 6])
zero_2d = np.zeros((2, 6))
expected = np.array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 3, 3, 3],
[4, 4, 4, 4, 4, 4],
[5, 6, 6, 6, 6, 6],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]])
result = block([[one_2d, two_2d],
[three_2d],
[four_1d],
[five_0d, six_1d],
[zero_2d]])
assert_equal(result, expected)
def test_nested(self, block):
one = np.array([1, 1, 1])
two = np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]])
three = np.array([3, 3, 3])
four = np.array([4, 4, 4])
five = np.array(5)
six = np.array([6, 6, 6, 6, 6])
zero = np.zeros((2, 6))
result = block([
[
block([
[one],
[three],
[four]
]),
two
],
[five, six],
[zero]
])
expected = np.array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 2, 2, 2],
[4, 4, 4, 2, 2, 2],
[5, 6, 6, 6, 6, 6],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0]])
assert_equal(result, expected)
def test_3d(self, block):
a000 = np.ones((2, 2, 2), int) * 1
a100 = np.ones((3, 2, 2), int) * 2
a010 = np.ones((2, 3, 2), int) * 3
a001 = np.ones((2, 2, 3), int) * 4
a011 = np.ones((2, 3, 3), int) * 5
a101 = np.ones((3, 2, 3), int) * 6
a110 = np.ones((3, 3, 2), int) * 7
a111 = np.ones((3, 3, 3), int) * 8
result = block([
[
[a000, a001],
[a010, a011],
],
[
[a100, a101],
[a110, a111],
]
])
expected = np.array([[[1, 1, 4, 4, 4],
[1, 1, 4, 4, 4],
[3, 3, 5, 5, 5],
[3, 3, 5, 5, 5],
[3, 3, 5, 5, 5]],
[[1, 1, 4, 4, 4],
[1, 1, 4, 4, 4],
[3, 3, 5, 5, 5],
[3, 3, 5, 5, 5],
[3, 3, 5, 5, 5]],
[[2, 2, 6, 6, 6],
[2, 2, 6, 6, 6],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8]],
[[2, 2, 6, 6, 6],
[2, 2, 6, 6, 6],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8]],
[[2, 2, 6, 6, 6],
[2, 2, 6, 6, 6],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8],
[7, 7, 8, 8, 8]]])
assert_array_equal(result, expected)
def test_block_with_mismatched_shape(self, block):
a = np.array([0, 0])
b = np.eye(2)
assert_raises(ValueError, block, [a, b])
assert_raises(ValueError, block, [b, a])
to_block = [[np.ones((2,3)), np.ones((2,2))],
[np.ones((2,2)), np.ones((2,2))]]
assert_raises(ValueError, block, to_block)
def test_no_lists(self, block):
assert_equal(block(1), np.array(1))
assert_equal(block(np.eye(3)), np.eye(3))
def test_invalid_nesting(self, block):
msg = 'depths are mismatched'
assert_raises_regex(ValueError, msg, block, [1, [2]])
assert_raises_regex(ValueError, msg, block, [1, []])
assert_raises_regex(ValueError, msg, block, [[1], 2])
assert_raises_regex(ValueError, msg, block, [[], 2])
assert_raises_regex(ValueError, msg, block, [
[[1], [2]],
[[3, 4]],
[5]
])
def test_empty_lists(self, block):
assert_raises_regex(ValueError, 'empty', block, [])
assert_raises_regex(ValueError, 'empty', block, [[]])
assert_raises_regex(ValueError, 'empty', block, [[1], []])
def test_tuple(self, block):
assert_raises_regex(TypeError, 'tuple', block, ([1, 2], [3, 4]))
assert_raises_regex(TypeError, 'tuple', block, [(1, 2), (3, 4)])
def test_different_ndims(self, block):
a = 1.
b = 2 * np.ones((1, 2))
c = 3 * np.ones((1, 1, 3))
result = block([a, b, c])
expected = np.array([[[1., 2., 2., 3., 3., 3.]]])
assert_equal(result, expected)
def test_different_ndims_depths(self, block):
a = 1.
b = 2 * np.ones((1, 2))
c = 3 * np.ones((1, 2, 3))
result = block([[a, b], [c]])
expected = np.array([[[1., 2., 2.],
[3., 3., 3.],
[3., 3., 3.]]])
assert_equal(result, expected)
def test_block_memory_order(self, block):
arr_c = np.zeros((3,)*3, order='C')
arr_f = np.zeros((3,)*3, order='F')
b_c = [[[arr_c, arr_c],
[arr_c, arr_c]],
[[arr_c, arr_c],
[arr_c, arr_c]]]
b_f = [[[arr_f, arr_f],
[arr_f, arr_f]],
[[arr_f, arr_f],
[arr_f, arr_f]]]
assert block(b_c).flags['C_CONTIGUOUS']
assert block(b_f).flags['F_CONTIGUOUS']
arr_c = np.zeros((3, 3), order='C')
arr_f = np.zeros((3, 3), order='F')
b_c = [[arr_c, arr_c],
[arr_c, arr_c]]
b_f = [[arr_f, arr_f],
[arr_f, arr_f]]
assert block(b_c).flags['C_CONTIGUOUS']
assert block(b_f).flags['F_CONTIGUOUS']
def test_block_dispatcher():
class ArrayLike:
pass
a = ArrayLike()
b = ArrayLike()
c = ArrayLike()
assert_equal(list(_block_dispatcher(a)), [a])
assert_equal(list(_block_dispatcher([a])), [a])
assert_equal(list(_block_dispatcher([a, b])), [a, b])
assert_equal(list(_block_dispatcher([[a], [b, [c]]])), [a, b, c])
assert_equal(list(_block_dispatcher((a, b))), [(a, b)])
.\numpy\numpy\_core\tests\test_simd.py
import pytest, math, re
import itertools
import operator
from numpy._core._simd import targets, clear_floatstatus, get_floatstatus
from numpy._core._multiarray_umath import __cpu_baseline__
class _Test_Utility:
npyv = None
sfx = None
target_name = None
def __getattr__(self, attr):
"""
To call NPV intrinsics without the attribute 'npyv' and
auto suffixing intrinsics according to class attribute 'sfx'
"""
return getattr(self.npyv, attr + "_" + self.sfx)
def _x2(self, intrin_name):
"""
Returns the intrinsic function name suffixed with 'sfx' followed by 'x2'.
"""
return getattr(self.npyv, f"{intrin_name}_{self.sfx}x2")
def _data(self, start=None, count=None, reverse=False):
"""
Create list of consecutive numbers according to number of vector's lanes.
"""
if start is None:
start = 1
if count is None:
count = self.nlanes
rng = range(start, start + count)
if reverse:
rng = reversed(rng)
if self._is_fp():
return [x / 1.0 for x in rng]
return list(rng)
def _is_unsigned(self):
"""
Checks if the current data type suffix indicates an unsigned integer type.
"""
return self.sfx[0] == 'u'
def _is_signed(self):
"""
Checks if the current data type suffix indicates a signed integer type.
"""
return self.sfx[0] == 's'
def _is_fp(self):
"""
Checks if the current data type suffix indicates a floating point type.
"""
return self.sfx[0] == 'f'
def _scalar_size(self):
"""
Returns the size of the scalar in bytes based on the current data type suffix.
"""
return int(self.sfx[1:])
def _int_clip(self, seq):
"""
Clips integer sequence 'seq' to fit within the valid range for the current data type suffix.
"""
if self._is_fp():
return seq
max_int = self._int_max()
min_int = self._int_min()
return [min(max(v, min_int), max_int) for v in seq]
def _int_max(self):
"""
Returns the maximum representable integer value for the current data type suffix.
"""
if self._is_fp():
return None
max_u = self._to_unsigned(self.setall(-1))[0]
if self._is_signed():
return max_u // 2
return max_u
def _int_min(self):
"""
Returns the minimum representable integer value for the current data type suffix.
"""
if self._is_fp():
return None
if self._is_unsigned():
return 0
return -(self._int_max() + 1)
def _true_mask(self):
"""
Returns the true mask for the current data type suffix.
"""
max_unsig = getattr(self.npyv, "setall_u" + self.sfx[1:])(-1)
return max_unsig[0]
def check_floatstatus(divbyzero=False, overflow=False,
underflow=False, invalid=False,
all=False):
"""
Checks the floating point status flags against specified conditions.
"""
err = get_floatstatus()
ret = (all or divbyzero) and (err & 1) != 0
ret |= (all or overflow) and (err & 2) != 0
ret |= (all or underflow) and (err & 4) != 0
ret |= (all or invalid) and (err & 8) != 0
return ret
def _to_unsigned(self, vector):
if isinstance(vector, (list, tuple)):
return getattr(self.npyv, "load_u" + self.sfx[1:])(vector)
else:
sfx = vector.__name__.replace("npyv_", "")
if sfx[0] == "b":
cvt_intrin = "cvt_u{0}_b{0}"
else:
cvt_intrin = "reinterpret_u{0}_{1}"
return getattr(self.npyv, cvt_intrin.format(sfx[1:], sfx))(vector)
def _pinfinity(self):
return float("inf")
def _ninfinity(self):
return -float("inf")
def _nan(self):
return float("nan")
def _cpu_features(self):
target = self.target_name
if target == "baseline":
target = __cpu_baseline__
else:
target = target.split('__')
return ' '.join(target)
class _SIMD_BOOL(_Test_Utility):
"""
To test all boolean vector types at once
"""
def _nlanes(self):
return getattr(self.npyv, "nlanes_u" + self.sfx[1:])
def _data(self, start=None, count=None, reverse=False):
true_mask = self._true_mask()
rng = range(self._nlanes())
if reverse:
rng = reversed(rng)
return [true_mask if x % 2 else 0 for x in rng]
def _load_b(self, data):
len_str = self.sfx[1:]
load = getattr(self.npyv, "load_u" + len_str)
cvt = getattr(self.npyv, f"cvt_b{len_str}_u{len_str}")
return cvt(load(data))
def test_operators_logical(self):
"""
Logical operations for boolean types.
Test intrinsics:
npyv_xor_##SFX, npyv_and_##SFX, npyv_or_##SFX, npyv_not_##SFX,
npyv_andc_b8, npvy_orc_b8, nvpy_xnor_b8
"""
data_a = self._data()
data_b = self._data(reverse=True)
vdata_a = self._load_b(data_a)
vdata_b = self._load_b(data_b)
data_and = [a & b for a, b in zip(data_a, data_b)]
vand = getattr(self, "and")(vdata_a, vdata_b)
assert vand == data_and
data_or = [a | b for a, b in zip(data_a, data_b)]
vor = getattr(self, "or")(vdata_a, vdata_b)
assert vor == data_or
data_xor = [a ^ b for a, b in zip(data_a, data_b)]
vxor = getattr(self, "xor")(vdata_a, vdata_b)
assert vxor == data_xor
vnot = getattr(self, "not")(vdata_a)
assert vnot == data_b
if self.sfx not in ("b8"):
return
data_andc = [(a & ~b) & 0xFF for a, b in zip(data_a, data_b)]
vandc = getattr(self, "andc")(vdata_a, vdata_b)
assert data_andc == vandc
data_orc = [(a | ~b) & 0xFF for a, b in zip(data_a, data_b)]
vorc = getattr(self, "orc")(vdata_a, vdata_b)
assert data_orc == vorc
data_xnor = [~(a ^ b) & 0xFF for a, b in zip(data_a, data_b)]
vxnor = getattr(self, "xnor")(vdata_a, vdata_b)
assert data_xnor == vxnor
def test_tobits(self):
data2bits = lambda data: sum([int(x != 0) << i for i, x in enumerate(data, 0)])
for data in (self._data(), self._data(reverse=True)):
vdata = self._load_b(data)
data_bits = data2bits(data)
tobits = self.tobits(vdata)
bin_tobits = bin(tobits)
assert bin_tobits == bin(data_bits)
def test_pack(self):
"""
Pack multiple vectors into one
Test intrinsics:
npyv_pack_b8_b16
npyv_pack_b8_b32
npyv_pack_b8_b64
"""
if self.sfx not in ("b16", "b32", "b64"):
return
data = self._data()
rdata = self._data(reverse=True)
vdata = self._load_b(data)
vrdata = self._load_b(rdata)
pack_simd = getattr(self.npyv, f"pack_b8_{self.sfx}")
if self.sfx == "b16":
spack = [(i & 0xFF) for i in (list(rdata) + list(data))]
vpack = pack_simd(vrdata, vdata)
elif self.sfx == "b32":
spack = [(i & 0xFF) for i in (2*list(rdata) + 2*list(data))]
vpack = pack_simd(vrdata, vrdata, vdata, vdata)
elif self.sfx == "b64":
spack = [(i & 0xFF) for i in (4*list(rdata) + 4*list(data))]
vpack = pack_simd(vrdata, vrdata, vrdata, vrdata,
vdata, vdata, vdata, vdata)
assert vpack == spack
@pytest.mark.parametrize("intrin", ["any", "all"])
@pytest.mark.parametrize("data", (
[-1, 0],
[0, -1],
[-1],
[0]
))
def test_operators_crosstest(self, intrin, data):
"""
Test intrinsics:
npyv_any_##SFX
npyv_all_##SFX
"""
data_a = self._load_b(data * self._nlanes())
func = eval(intrin)
intrin = getattr(self, intrin)
desired = func(data_a)
simd = intrin(data_a)
assert not not simd == desired
class _SIMD_INT(_Test_Utility):
"""
To test all integer vector types at once
"""
def test_operators_shift(self):
if self.sfx in ("u8", "s8"):
return
data_a = self._data(self._int_max() - self.nlanes)
data_b = self._data(self._int_min(), reverse=True)
vdata_a, vdata_b = self.load(data_a), self.load(data_b)
for count in range(self._scalar_size()):
data_shl_a = self.load([a << count for a in data_a])
shl = self.shl(vdata_a, count)
assert shl == data_shl_a
data_shr_a = self.load([a >> count for a in data_a])
shr = self.shr(vdata_a, count)
assert shr == data_shr_a
for count in range(1, self._scalar_size()):
data_shl_a = self.load([a << count for a in data_a])
shli = self.shli(vdata_a, count)
assert shli == data_shl_a
data_shr_a = self.load([a >> count for a in data_a])
shri = self.shri(vdata_a, count)
assert shri == data_shr_a
def test_arithmetic_subadd_saturated(self):
if self.sfx in ("u32", "s32", "u64", "s64"):
return
data_a = self._data(self._int_max() - self.nlanes)
data_b = self._data(self._int_min(), reverse=True)
vdata_a, vdata_b = self.load(data_a), self.load(data_b)
data_adds = self._int_clip([a + b for a, b in zip(data_a, data_b)])
adds = self.adds(vdata_a, vdata_b)
assert adds == data_adds
data_subs = self._int_clip([a - b for a, b in zip(data_a, data_b)])
subs = self.subs(vdata_a, vdata_b)
assert subs == data_subs
def test_math_max_min(self):
data_a = self._data()
data_b = self._data(self.nlanes)
vdata_a, vdata_b = self.load(data_a), self.load(data_b)
data_max = [max(a, b) for a, b in zip(data_a, data_b)]
simd_max = self.max(vdata_a, vdata_b)
assert simd_max == data_max
data_min = [min(a, b) for a, b in zip(data_a, data_b)]
simd_min = self.min(vdata_a, vdata_b)
assert simd_min == data_min
@pytest.mark.parametrize("start", [-100, -10000, 0, 100, 10000])
def test_reduce_max_min(self, start):
"""
Test intrinsics:
npyv_reduce_max_##sfx
npyv_reduce_min_##sfx
"""
vdata_a = self.load(self._data(start))
assert self.reduce_max(vdata_a) == max(vdata_a)
assert self.reduce_min(vdata_a) == min(vdata_a)
def test_conversions(self):
"""
Round to nearest even integer, assume CPU control register is set to rounding.
Test intrinsics:
npyv_round_s32_##SFX
"""
features = self._cpu_features()
if not self.npyv.simd_f64 and re.match(r".*(NEON|ASIMD)", features):
_round = lambda v: int(v + (0.5 if v >= 0 else -0.5))
else:
_round = round
vdata_a = self.load(self._data())
vdata_a = self.sub(vdata_a, self.setall(0.5))
data_round = [_round(x) for x in vdata_a]
vround = self.round_s32(vdata_a)
assert vround == data_round
class _SIMD_FP64(_Test_Utility):
"""
To only test double precision
"""
def test_conversions(self):
"""
Round to nearest even integer, assume CPU control register is set to rounding.
Test intrinsics:
npyv_round_s32_##SFX
"""
vdata_a = self.load(self._data())
vdata_a = self.sub(vdata_a, self.setall(0.5))
vdata_b = self.mul(vdata_a, self.setall(-1.5))
data_round = [round(x) for x in list(vdata_a) + list(vdata_b)]
vround = self.round_s32(vdata_a, vdata_b)
assert vround == data_round
class _SIMD_FP(_Test_Utility):
"""
To test all float vector types at once
"""
def test_arithmetic_fused(self):
vdata_a, vdata_b, vdata_c = [self.load(self._data())]*3
vdata_cx2 = self.add(vdata_c, vdata_c)
data_fma = self.load([a * b + c for a, b, c in zip(vdata_a, vdata_b, vdata_c)])
fma = self.muladd(vdata_a, vdata_b, vdata_c)
assert fma == data_fma
fms = self.mulsub(vdata_a, vdata_b, vdata_c)
data_fms = self.sub(data_fma, vdata_cx2)
assert fms == data_fms
nfma = self.nmuladd(vdata_a, vdata_b, vdata_c)
data_nfma = self.sub(vdata_cx2, data_fma)
assert nfma == data_nfma
nfms = self.nmulsub(vdata_a, vdata_b, vdata_c)
data_nfms = self.mul(data_fma, self.setall(-1))
assert nfms == data_nfms
fmas = list(self.muladdsub(vdata_a, vdata_b, vdata_c))
assert fmas[0::2] == list(data_fms)[0::2]
assert fmas[1::2] == list(data_fma)[1::2]
def test_abs(self):
pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
data = self._data()
vdata = self.load(self._data())
abs_cases = ((-0, 0), (ninf, pinf), (pinf, pinf), (nan, nan))
for case, desired in abs_cases:
data_abs = [desired]*self.nlanes
vabs = self.abs(self.setall(case))
assert vabs == pytest.approx(data_abs, nan_ok=True)
vabs = self.abs(self.mul(vdata, self.setall(-1)))
assert vabs == data
def test_sqrt(self):
pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
data = self._data()
vdata = self.load(self._data())
sqrt_cases = ((-0.0, -0.0), (0.0, 0.0), (-1.0, nan), (ninf, nan), (pinf, pinf))
for case, desired in sqrt_cases:
data_sqrt = [desired]*self.nlanes
sqrt = self.sqrt(self.setall(case))
assert sqrt == pytest.approx(data_sqrt, nan_ok=True)
data_sqrt = self.load([math.sqrt(x) for x in data])
sqrt = self.sqrt(vdata)
assert sqrt == data_sqrt
def test_square(self):
pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
data = self._data()
vdata = self.load(self._data())
square_cases = ((nan, nan), (pinf, pinf), (ninf, pinf))
for case, desired in square_cases:
data_square = [desired]*self.nlanes
square = self.square(self.setall(case))
assert square == pytest.approx(data_square, nan_ok=True)
data_square = [x*x for x in data]
square = self.square(vdata)
assert square == data_square
@pytest.mark.parametrize("intrin, func", [("ceil", math.ceil),
("trunc", math.trunc), ("floor", math.floor), ("rint", round)])
def test_rounding(self, intrin, func):
"""
Test intrinsics:
npyv_rint_##SFX
npyv_ceil_##SFX
npyv_trunc_##SFX
npyv_floor##SFX
"""
intrin_name = intrin
intrin = getattr(self, intrin)
pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
round_cases = ((nan, nan), (pinf, pinf), (ninf, ninf))
for case, desired in round_cases:
data_round = [desired]*self.nlanes
_round = intrin(self.setall(case))
assert _round == pytest.approx(data_round, nan_ok=True)
for x in range(0, 2**20, 256**2):
for w in (-1.05, -1.10, -1.15, 1.05, 1.10, 1.15):
data = self.load([(x+a)*w for a in range(self.nlanes)])
data_round = [func(x) for x in data]
_round = intrin(data)
assert _round == data_round
for i in (
1.1529215045988576e+18, 4.6116860183954304e+18,
5.902958103546122e+20, 2.3611832414184488e+21
):
x = self.setall(i)
y = intrin(x)
data_round = [func(n) for n in x]
assert y == data_round
if intrin_name == "floor":
data_szero = (-0.0,)
else:
data_szero = (-0.0, -0.25, -0.30, -0.45, -0.5)
for w in data_szero:
_round = self._to_unsigned(intrin(self.setall(w)))
data_round = self._to_unsigned(self.setall(-0.0))
assert _round == data_round
@pytest.mark.parametrize("intrin", [
"max", "maxp", "maxn", "min", "minp", "minn"
])
def test_max_min(self, intrin):
"""
Test intrinsics:
npyv_max_##sfx
npyv_maxp_##sfx
npyv_maxn_##sfx
npyv_min_##sfx
npyv_minp_##sfx
npyv_minn_##sfx
npyv_reduce_max_##sfx
npyv_reduce_maxp_##sfx
npyv_reduce_maxn_##sfx
npyv_reduce_min_##sfx
npyv_reduce_minp_##sfx
npyv_reduce_minn_##sfx
"""
pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
chk_nan = {"xp": 1, "np": 1, "nn": 2, "xn": 2}.get(intrin[-2:], 0)
func = eval(intrin[:3])
reduce_intrin = getattr(self, "reduce_" + intrin)
intrin = getattr(self, intrin)
hf_nlanes = self.nlanes//2
cases = (
([0.0, -0.0], [-0.0, 0.0]),
([10, -10], [10, -10]),
([pinf, 10], [10, ninf]),
([10, pinf], [ninf, 10]),
([10, -10], [10, -10]),
([-10, 10], [-10, 10])
)
for op1, op2 in cases:
vdata_a = self.load(op1*hf_nlanes)
vdata_b = self.load(op2*hf_nlanes)
data = func(vdata_a, vdata_b)
simd = intrin(vdata_a, vdata_b)
assert simd == data
data = func(vdata_a)
simd = reduce_intrin(vdata_a)
assert simd == data
if not chk_nan:
return
if chk_nan == 1:
test_nan = lambda a, b: (
b if math.isnan(a) else a if math.isnan(b) else b
)
else:
test_nan = lambda a, b: (
nan if math.isnan(a) or math.isnan(b) else b
)
cases = (
(nan, 10),
(10, nan),
(nan, pinf),
(pinf, nan),
(nan, nan)
)
for op1, op2 in cases:
vdata_ab = self.load([op1, op2]*hf_nlanes)
data = test_nan(op1, op2)
simd = reduce_intrin(vdata_ab)
assert simd == pytest.approx(data, nan_ok=True)
vdata_a = self.setall(op1)
vdata_b = self.setall(op2)
data = [data] * self.nlanes
simd = intrin(vdata_a, vdata_b)
assert simd == pytest.approx(data, nan_ok=True)
def test_reciprocal(self):
pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
data = self._data()
vdata = self.load(self._data())
recip_cases = ((nan, nan), (pinf, 0.0), (ninf, -0.0), (0.0, pinf), (-0.0, ninf))
for case, desired in recip_cases:
data_recip = [desired]*self.nlanes
recip = self.recip(self.setall(case))
assert recip == pytest.approx(data_recip, nan_ok=True)
data_recip = self.load([1/x for x in data])
recip = self.recip(vdata)
assert recip == data_recip
def test_special_cases(self):
"""
Compare Not NaN. Test intrinsics:
npyv_notnan_##SFX
"""
nnan = self.notnan(self.setall(self._nan()))
assert nnan == [0]*self.nlanes
@pytest.mark.parametrize("intrin_name", [
"rint", "trunc", "ceil", "floor"
])
def test_unary_invalid_fpexception(self, intrin_name):
intrin = getattr(self, intrin_name)
for d in [float("nan"), float("inf"), -float("inf")]:
v = self.setall(d)
clear_floatstatus()
intrin(v)
assert check_floatstatus(invalid=True) == False
@pytest.mark.parametrize('py_comp,np_comp', [
(operator.lt, "cmplt"),
(operator.le, "cmple"),
(operator.gt, "cmpgt"),
(operator.ge, "cmpge"),
(operator.eq, "cmpeq"),
(operator.ne, "cmpneq")
])
def test_comparison_with_nan(self, py_comp, np_comp):
pinf, ninf, nan = self._pinfinity(), self._ninfinity(), self._nan()
mask_true = self._true_mask()
def to_bool(vector):
return [lane == mask_true for lane in vector]
intrin = getattr(self, np_comp)
cmp_cases = ((0, nan), (nan, 0), (nan, nan), (pinf, nan),
(ninf, nan), (-0.0, +0.0))
for case_operand1, case_operand2 in cmp_cases:
data_a = [case_operand1]*self.nlanes
data_b = [case_operand2]*self.nlanes
vdata_a = self.setall(case_operand1)
vdata_b = self.setall(case_operand2)
vcmp = to_bool(intrin(vdata_a, vdata_b))
data_cmp = [py_comp(a, b) for a, b in zip(data_a, data_b)]
assert vcmp == data_cmp
@pytest.mark.parametrize("intrin", ["any", "all"])
@pytest.mark.parametrize("data", (
[float("nan"), 0],
[0, float("nan")],
[float("nan"), 1],
[1, float("nan")],
[float("nan"), float("nan")],
[0.0, -0.0],
[-0.0, 0.0],
[1.0, -0.0]
))
def test_operators_crosstest(self, intrin, data):
"""
Test intrinsics:
npyv_any_##SFX
npyv_all_##SFX
"""
data_a = self.load(data * self.nlanes)
func = eval(intrin)
intrin = getattr(self, intrin)
desired = func(data_a)
simd = intrin(data_a)
assert not not simd == desired
class _SIMD_ALL(_Test_Utility):
"""
To test all vector types at once
"""
def test_memory_load(self):
data = self._data()
load_data = self.load(data)
assert load_data == data
loada_data = self.loada(data)
assert loada_data == data
loads_data = self.loads(data)
assert loads_data == data
loadl = self.loadl(data)
loadl_half = list(loadl)[:self.nlanes//2]
data_half = data[:self.nlanes//2]
assert loadl_half == data_half
assert loadl != data
def test_memory_store(self):
data = self._data()
vdata = self.load(data)
store = [0] * self.nlanes
self.store(store, vdata)
assert store == data
store_a = [0] * self.nlanes
self.storea(store_a, vdata)
assert store_a == data
store_s = [0] * self.nlanes
self.stores(store_s, vdata)
assert store_s == data
store_l = [0] * self.nlanes
self.storel(store_l, vdata)
assert store_l[:self.nlanes//2] == data[:self.nlanes//2]
assert store_l != vdata
store_h = [0] * self.nlanes
self.storeh(store_h, vdata)
assert store_h[:self.nlanes//2] == data[self.nlanes//2:]
assert store_h != vdata
@pytest.mark.parametrize("intrin, elsizes, scale, fill", [
("self.load_tillz, self.load_till", (32, 64), 1, [0xffff]),
("self.load2_tillz, self.load2_till", (32, 64), 2, [0xffff, 0x7fff]),
])
def test_memory_partial_load(self, intrin, elsizes, scale, fill):
if self._scalar_size() not in elsizes:
return
npyv_load_tillz, npyv_load_till = eval(intrin)
data = self._data()
lanes = list(range(1, self.nlanes + 1))
lanes += [self.nlanes**2, self.nlanes**4]
for n in lanes:
load_till = npyv_load_till(data, n, *fill)
load_tillz = npyv_load_tillz(data, n)
n *= scale
data_till = data[:n] + fill * ((self.nlanes-n) // scale)
assert load_till == data_till
data_tillz = data[:n] + [0] * (self.nlanes-n)
assert load_tillz == data_tillz
@pytest.mark.parametrize("intrin, elsizes, scale", [
("self.store_till", (32, 64), 1),
("self.store2_till", (32, 64), 2),
])
def test_memory_partial_store(self, intrin, elsizes, scale):
if self._scalar_size() not in elsizes:
return
npyv_store_till = eval(intrin)
data = self._data()
data_rev = self._data(reverse=True)
vdata = self.load(data)
lanes = list(range(1, self.nlanes + 1))
lanes += [self.nlanes**2, self.nlanes**4]
for n in lanes:
data_till = data_rev.copy()
data_till[:n*scale] = data[:n*scale]
store_till = self._data(reverse=True)
npyv_store_till(store_till, n, vdata)
assert store_till == data_till
@pytest.mark.parametrize("intrin, elsizes, scale", [
("self.loadn", (32, 64), 1),
("self.loadn2", (32, 64), 2),
])
def test_memory_noncont_load(self, intrin, elsizes, scale):
if self._scalar_size() not in elsizes:
return
npyv_loadn = eval(intrin)
for stride in range(-64, 64):
if stride < 0:
data = self._data(stride, -stride*self.nlanes)
data_stride = list(itertools.chain(
*zip(*[data[-i::stride] for i in range(scale, 0, -1)])
))
elif stride == 0:
data = self._data()
data_stride = data[0:scale] * (self.nlanes//scale)
else:
data = self._data(count=stride*self.nlanes)
data_stride = list(itertools.chain(
*zip(*[data[i::stride] for i in range(scale)]))
)
data_stride = self.load(data_stride)
loadn = npyv_loadn(data, stride)
assert loadn == data_stride
@pytest.mark.parametrize("intrin, elsizes, scale, fill", [
("self.loadn_tillz, self.loadn_till", (32, 64), 1, [0xffff]),
("self.loadn2_tillz, self.loadn2_till", (32, 64), 2, [0xffff, 0x7fff]),
])
def test_memory_noncont_partial_load(self, intrin, elsizes, scale):
if self._scalar_size() not in elsizes:
return
npyv_loadn_tillz, npyv_loadn_till = eval(intrin)
lanes = list(range(1, self.nlanes + 1))
lanes += [self.nlanes**2, self.nlanes**4]
for stride in range(-64, 64):
if stride < 0:
data = self._data(stride, -stride*self.nlanes)
data_stride = list(itertools.chain(
*zip(*[data[-i::stride] for i in range(scale, 0, -1)])
))
elif stride == 0:
data = self._data()
data_stride = data[0:scale] * (self.nlanes//scale)
else:
data = self._data(count=stride*self.nlanes)
data_stride = list(itertools.chain(
*zip(*[data[i::stride] for i in range(scale)])
))
data_stride = list(self.load(data_stride))
for n in lanes:
nscale = n * scale
llanes = self.nlanes - nscale
data_stride_till = (
data_stride[:nscale] + fill * (llanes//scale)
)
loadn_till = npyv_loadn_till(data, stride, n, *fill)
assert loadn_till == data_stride_till
data_stride_tillz = data_stride[:nscale] + [0] * llanes
loadn_tillz = npyv_loadn_tillz(data, stride, n)
assert loadn_tillz == data_stride_tillz
def test_memory_noncont_store(self, intrin, elsizes, scale):
if self._scalar_size() not in elsizes:
return
npyv_storen = eval(intrin)
data = self._data()
vdata = self.load(data)
hlanes = self.nlanes // scale
for stride in range(1, 64):
data_storen = [0xff] * stride * self.nlanes
for s in range(0, hlanes*stride, stride):
i = (s//stride)*scale
data_storen[s:s+scale] = data[i:i+scale]
storen = [0xff] * stride * self.nlanes
storen += [0x7f]*64
npyv_storen(storen, stride, vdata)
assert storen[:-64] == data_storen
assert storen[-64:] == [0x7f]*64
for stride in range(-64, 0):
data_storen = [0xff] * -stride * self.nlanes
for s in range(0, hlanes*stride, stride):
i = (s//stride)*scale
data_storen[s-scale:s or None] = data[i:i+scale]
storen = [0x7f]*64
storen += [0xff] * -stride * self.nlanes
npyv_storen(storen, stride, vdata)
assert storen[64:] == data_storen
assert storen[:64] == [0x7f]*64
data_storen = [0x7f] * self.nlanes
storen = data_storen.copy()
data_storen[0:scale] = data[-scale:]
npyv_storen(storen, 0, vdata)
assert storen == data_storen
@pytest.mark.parametrize("intrin, elsizes, scale", [
("self.storen_till", (32, 64), 1),
("self.storen2_till", (32, 64), 2),
])
def test_memory_noncont_partial_store(self, intrin, elsizes, scale):
if self._scalar_size() not in elsizes:
return
npyv_storen_till = eval(intrin)
data = self._data()
vdata = self.load(data)
lanes = list(range(1, self.nlanes + 1))
lanes += [self.nlanes**2, self.nlanes**4]
hlanes = self.nlanes // scale
for stride in range(1, 64):
for n in lanes:
data_till = [0xff] * stride * self.nlanes
tdata = data[:n*scale] + [0xff] * (self.nlanes-n*scale)
for s in range(0, hlanes*stride, stride)[:n]:
i = (s//stride)*scale
data_till[s:s+scale] = tdata[i:i+scale]
storen_till = [0xff] * stride * self.nlanes
storen_till += [0x7f]*64
npyv_storen_till(storen_till, stride, n, vdata)
assert storen_till[:-64] == data_till
assert storen_till[-64:] == [0x7f]*64
for stride in range(-64, 0):
for n in lanes:
data_till = [0xff] * -stride * self.nlanes
tdata = data[:n*scale] + [0xff] * (self.nlanes-n*scale)
for s in range(0, hlanes*stride, stride)[:n]:
i = (s//stride)*scale
data_till[s-scale:s or None] = tdata[i:i+scale]
storen_till = [0x7f]*64
storen_till += [0xff] * -stride * self.nlanes
npyv_storen_till(storen_till, stride, n, vdata)
assert storen_till[64:] == data_till
assert storen_till[:64] == [0x7f]*64
for n in lanes:
data_till = [0x7f] * self.nlanes
storen_till = data_till.copy()
data_till[0:scale] = data[:n*scale][-scale:]
npyv_storen_till(storen_till, 0, n, vdata)
assert storen_till == data_till
@pytest.mark.parametrize("intrin, table_size, elsize", [
("self.lut32", 32, 32),
("self.lut16", 16, 64)
])
def test_lut(self, intrin, table_size, elsize):
"""
Test lookup table intrinsics:
npyv_lut32_##sfx
npyv_lut16_##sfx
"""
if elsize != self._scalar_size():
return
intrin = eval(intrin)
idx_itrin = getattr(self.npyv, f"setall_u{elsize}")
table = range(0, table_size)
for i in table:
broadi = self.setall(i)
idx = idx_itrin(i)
lut = intrin(table, idx)
assert lut == broadi
def test_misc(self):
broadcast_zero = self.zero()
assert broadcast_zero == [0] * self.nlanes
for i in range(1, 10):
broadcasti = self.setall(i)
assert broadcasti == [i] * self.nlanes
data_a, data_b = self._data(), self._data(reverse=True)
vdata_a, vdata_b = self.load(data_a), self.load(data_b)
vset = self.set(*data_a)
assert vset == data_a
vsetf = self.setf(10, *data_a)
assert vsetf == data_a
sfxes = ["u8", "s8", "u16", "s16", "u32", "s32", "u64", "s64"]
if self.npyv.simd_f64:
sfxes.append("f64")
if self.npyv.simd_f32:
sfxes.append("f32")
for sfx in sfxes:
vec_name = getattr(self, "reinterpret_" + sfx)(vdata_a).__name__
assert vec_name == "npyv_" + sfx
select_a = self.select(self.cmpeq(self.zero(), self.zero()), vdata_a, vdata_b)
assert select_a == data_a
select_b = self.select(self.cmpneq(self.zero(), self.zero()), vdata_a, vdata_b)
assert select_b == data_b
assert self.extract0(vdata_b) == vdata_b[0]
self.npyv.cleanup()
def test_reorder(self):
data_a, data_b = self._data(), self._data(reverse=True)
vdata_a, vdata_b = self.load(data_a), self.load(data_b)
data_a_lo = data_a[:self.nlanes//2]
data_b_lo = data_b[:self.nlanes//2]
data_a_hi = data_a[self.nlanes//2:]
data_b_hi = data_b[self.nlanes//2:]
combinel = self.combinel(vdata_a, vdata_b)
assert combinel == data_a_lo + data_b_lo
combineh = self.combineh(vdata_a, vdata_b)
assert combineh == data_a_hi + data_b_hi
combine = self.combine(vdata_a, vdata_b)
assert combine == (data_a_lo + data_b_lo, data_a_hi + data_b_hi)
data_zipl = self.load([
v for p in zip(data_a_lo, data_b_lo) for v in p
])
data_ziph = self.load([
v for p in zip(data_a_hi, data_b_hi) for v in p
])
vzip = self.zip(vdata_a, vdata_b)
assert vzip == (data_zipl, data_ziph)
vzip = [0]*self.nlanes*2
self._x2("store")(vzip, (vdata_a, vdata_b))
assert vzip == list(data_zipl) + list(data_ziph)
unzip = self.unzip(data_zipl, data_ziph)
assert unzip == (data_a, data_b)
unzip = self._x2("load")(list(data_zipl) + list(data_ziph))
assert unzip == (data_a, data_b)
def test_reorder_rev64(self):
ssize = self._scalar_size()
if ssize == 64:
return
data_rev64 = [
y for x in range(0, self.nlanes, 64//ssize)
for y in reversed(range(x, x + 64//ssize))
]
rev64 = self.rev64(self.load(range(self.nlanes)))
assert rev64 == data_rev64
def test_reorder_permi128(self):
"""
Test permuting elements for each 128-bit lane.
npyv_permi128_##sfx
"""
ssize = self._scalar_size()
if ssize < 32:
return
data = self.load(self._data())
permn = 128//ssize
permd = permn-1
nlane128 = self.nlanes//permn
shfl = [0, 1] if ssize == 64 else [0, 2, 4, 6]
for i in range(permn):
indices = [(i >> shf) & permd for shf in shfl]
vperm = self.permi128(data, *indices)
data_vperm = [
data[j + (e & -permn)]
for e, j in enumerate(indices*nlane128)
]
assert vperm == data_vperm
@pytest.mark.parametrize('func, intrin', [
(operator.lt, "cmplt"),
(operator.le, "cmple"),
(operator.gt, "cmpgt"),
(operator.ge, "cmpge"),
(operator.eq, "cmpeq")
])
def test_operators_comparison(self, func, intrin):
if self._is_fp():
data_a = self._data()
else:
data_a = self._data(self._int_max() - self.nlanes)
data_b = self._data(self._int_min(), reverse=True)
vdata_a, vdata_b = self.load(data_a), self.load(data_b)
intrin = getattr(self, intrin)
mask_true = self._true_mask()
def to_bool(vector):
return [lane == mask_true for lane in vector]
data_cmp = [func(a, b) for a, b in zip(data_a, data_b)]
cmp = to_bool(intrin(vdata_a, vdata_b))
assert cmp == data_cmp
def test_operators_logical(self):
if self._is_fp():
data_a = self._data()
else:
data_a = self._data(self._int_max() - self.nlanes)
data_b = self._data(self._int_min(), reverse=True)
vdata_a, vdata_b = self.load(data_a), self.load(data_b)
if self._is_fp():
data_cast_a = self._to_unsigned(vdata_a)
data_cast_b = self._to_unsigned(vdata_b)
cast, cast_data = self._to_unsigned, self._to_unsigned
else:
data_cast_a, data_cast_b = data_a, data_b
cast, cast_data = lambda a: a, self.load
data_xor = cast_data([a ^ b for a, b in zip(data_cast_a, data_cast_b)])
vxor = cast(self.xor(vdata_a, vdata_b))
assert vxor == data_xor
data_or = cast_data([a | b for a, b in zip(data_cast_a, data_cast_b)])
vor = cast(getattr(self, "or")(vdata_a, vdata_b))
assert vor == data_or
data_and = cast_data([a & b for a, b in zip(data_cast_a, data_cast_b)])
vand = cast(getattr(self, "and")(vdata_a, vdata_b))
assert vand == data_and
data_not = cast_data([~a for a in data_cast_a])
vnot = cast(getattr(self, "not")(vdata_a))
assert vnot == data_not
if self.sfx not in ("u8"):
return
data_andc = [a & ~b for a, b in zip(data_cast_a, data_cast_b)]
vandc = cast(getattr(self, "andc")(vdata_a, vdata_b))
assert vandc == data_andc
@pytest.mark.parametrize("intrin", ["any", "all"])
@pytest.mark.parametrize("data", (
[1, 2, 3, 4],
[-1, -2, -3, -4],
[0, 1, 2, 3, 4],
[0x7f, 0x7fff, 0x7fffffff, 0x7fffffffffffffff],
[0, -1, -2, -3, 4],
[0],
[1],
[-1]
))
def test_operators_crosstest(self, intrin, data):
"""
Test intrinsics:
npyv_any_##SFX
npyv_all_##SFX
"""
data_a = self.load(data * self.nlanes)
func = eval(intrin)
intrin = getattr(self, intrin)
desired = func(data_a)
simd = intrin(data_a)
assert not not simd == desired
def test_conversion_boolean(self):
bsfx = "b" + self.sfx[1:]
to_boolean = getattr(self.npyv, "cvt_%s_%s" % (bsfx, self.sfx))
from_boolean = getattr(self.npyv, "cvt_%s_%s" % (self.sfx, bsfx))
false_vb = to_boolean(self.setall(0))
true_vb = self.cmpeq(self.setall(0), self.setall(0))
assert false_vb != true_vb
false_vsfx = from_boolean(false_vb)
true_vsfx = from_boolean(true_vb)
assert false_vsfx != true_vsfx
def test_conversion_expand(self):
"""
Test expand intrinsics:
npyv_expand_u16_u8
npyv_expand_u32_u16
"""
if self.sfx not in ("u8", "u16"):
return
totype = self.sfx[0]+str(int(self.sfx[1:])*2)
expand = getattr(self.npyv, f"expand_{totype}_{self.sfx}")
data = self._data(self._int_max() - self.nlanes)
vdata = self.load(data)
edata = expand(vdata)
data_lo = data[:self.nlanes//2]
data_hi = data[self.nlanes//2:]
assert edata == (data_lo, data_hi)
def test_arithmetic_subadd(self):
if self._is_fp():
data_a = self._data()
else:
data_a = self._data(self._int_max() - self.nlanes)
data_b = self._data(self._int_min(), reverse=True)
vdata_a, vdata_b = self.load(data_a), self.load(data_b)
data_add = self.load([a + b for a, b in zip(data_a, data_b)])
add = self.add(vdata_a, vdata_b)
assert add == data_add
data_sub = self.load([a - b for a, b in zip(data_a, data_b)])
sub = self.sub(vdata_a, vdata_b)
assert sub == data_sub
def test_arithmetic_mul(self):
if self.sfx in ("u64", "s64"):
return
if self._is_fp():
data_a = self._data()
else:
data_a = self._data(self._int_max() - self.nlanes)
data_b = self._data(self._int_min(), reverse=True)
vdata_a, vdata_b = self.load(data_a), self.load(data_b)
data_mul = self.load([a * b for a, b in zip(data_a, data_b)])
mul = self.mul(vdata_a, vdata_b)
assert mul == data_mul
def test_arithmetic_div(self):
if not self._is_fp():
return
data_a, data_b = self._data(), self._data(reverse=True)
vdata_a, vdata_b = self.load(data_a), self.load(data_b)
data_div = self.load([a / b for a, b in zip(data_a, data_b)])
div = self.div(vdata_a, vdata_b)
assert div == data_div
def test_arithmetic_intdiv(self):
"""
Test integer division intrinsics:
npyv_divisor_##sfx
npyv_divc_##sfx
"""
if self._is_fp():
return
int_min = self._int_min()
def trunc_div(a, d):
"""
Divide towards zero works with large integers > 2^53,
and wrap around overflow similar to what C does.
"""
if d == -1 and a == int_min:
return a
sign_a, sign_d = a < 0, d < 0
if a == 0 or sign_a == sign_d:
return a // d
return (a + sign_d - sign_a) // d + 1
data = [1, -int_min]
data += range(0, 2**8, 2**5)
data += range(0, 2**8, 2**5-1)
bsize = self._scalar_size()
if bsize > 8:
data += range(2**8, 2**16, 2**13)
data += range(2**8, 2**16, 2**13-1)
if bsize > 16:
data += range(2**16, 2**32, 2**29)
data += range(2**16, 2**32, 2**29-1)
if bsize > 32:
data += range(2**32, 2**64, 2**61)
data += range(2**32, 2**64, 2**61-1)
data += [-x for x in data]
for dividend, divisor in itertools.product(data, data):
divisor = self.setall(divisor)[0]
if divisor == 0:
continue
dividend = self.load(self._data(dividend))
data_divc = [trunc_div(a, divisor) for a in dividend]
divisor_parms = self.divisor(divisor)
divc = self.divc(dividend, divisor_parms)
assert divc == data_divc
def test_arithmetic_reduce_sum(self):
"""
Test reduce sum intrinsics:
npyv_sum_##sfx
"""
if self.sfx not in ("u32", "u64", "f32", "f64"):
return
data = self._data()
vdata = self.load(data)
data_sum = sum(data)
vsum = self.sum(vdata)
assert vsum == data_sum
def test_arithmetic_reduce_sumup(self):
"""
Test extend reduce sum intrinsics:
npyv_sumup_##sfx
"""
if self.sfx not in ("u8", "u16"):
return
rdata = (0, self.nlanes, self._int_min(), self._int_max()-self.nlanes)
for r in rdata:
data = self._data(r)
vdata = self.load(data)
data_sum = sum(data)
vsum = self.sumup(vdata)
assert vsum == data_sum
def test_mask_conditional(self):
"""
Conditional addition and subtraction for all supported data types.
Test intrinsics:
npyv_ifadd_##SFX, npyv_ifsub_##SFX
"""
vdata_a = self.load(self._data())
vdata_b = self.load(self._data(reverse=True))
true_mask = self.cmpeq(self.zero(), self.zero())
false_mask = self.cmpneq(self.zero(), self.zero())
data_sub = self.sub(vdata_b, vdata_a)
ifsub = self.ifsub(true_mask, vdata_b, vdata_a, vdata_b)
assert ifsub == data_sub
ifsub = self.ifsub(false_mask, vdata_a, vdata_b, vdata_b)
assert ifsub == vdata_b
data_add = self.add(vdata_b, vdata_a)
ifadd = self.ifadd(true_mask, vdata_b, vdata_a, vdata_b)
assert ifadd == data_add
ifadd = self.ifadd(false_mask, vdata_a, vdata_b, vdata_b)
assert ifadd == vdata_b
if not self._is_fp():
return
data_div = self.div(vdata_b, vdata_a)
ifdiv = self.ifdiv(true_mask, vdata_b, vdata_a, vdata_b)
assert ifdiv == data_div
ifdivz = self.ifdivz(true_mask, vdata_b, vdata_a)
assert ifdivz == data_div
ifdiv = self.ifdiv(false_mask, vdata_a, vdata_b, vdata_b)
assert ifdiv == vdata_b
ifdivz = self.ifdivz(false_mask, vdata_a, vdata_b)
assert ifdivz == self.zero()
bool_sfx = ("b8", "b16", "b32", "b64")
int_sfx = ("u8", "s8", "u16", "s16", "u32", "s32", "u64", "s64")
fp_sfx = ("f32", "f64")
all_sfx = int_sfx + fp_sfx
tests_registry = {
bool_sfx: _SIMD_BOOL,
int_sfx : _SIMD_INT,
fp_sfx : _SIMD_FP,
("f32",): _SIMD_FP32,
("f64",): _SIMD_FP64,
all_sfx : _SIMD_ALL
}
for target_name, npyv in targets.items():
simd_width = npyv.simd if npyv else ''
pretty_name = target_name.split('__')
if len(pretty_name) > 1:
pretty_name = f"({' '.join(pretty_name)})"
else:
pretty_name = pretty_name[0]
skip = ""
skip_sfx = dict()
if not npyv:
skip = f"target '{pretty_name}' isn't supported by current machine"
elif not npyv.simd:
skip = f"target '{pretty_name}' isn't supported by NPYV"
else:
if not npyv.simd_f32:
skip_sfx["f32"] = f"target '{pretty_name}' doesn't support single-precision"
if not npyv.simd_f64:
skip_sfx["f64"] = f"target '{pretty_name}' doesn't support double-precision"
for sfxes, cls in tests_registry.items():
for sfx in sfxes:
skip_m = skip_sfx.get(sfx, skip)
inhr = (cls,)
attr = dict(npyv=targets[target_name], sfx=sfx, target_name=target_name)
tcls = type(f"Test{cls.__name__}_{simd_width}_{target_name}_{sfx}", inhr, attr)
if skip_m:
pytest.mark.skip(reason=skip_m)(tcls)
globals()[tcls.__name__] = tcls