NumPy 源码解析(八十)
.\numpy\numpy\_core\tests\test_array_api_info.py
import numpy as np
import pytest
info = np.__array_namespace_info__()
def test_capabilities():
caps = info.capabilities()
assert caps["boolean indexing"] == True
assert caps["data-dependent shapes"] == True
def test_default_device():
assert info.default_device() == "cpu" == np.asarray(0).device
def test_default_dtypes():
dtypes = info.default_dtypes()
assert dtypes["real floating"] == np.float64 == np.asarray(0.0).dtype
assert dtypes["complex floating"] == np.complex128 == np.asarray(0.0j).dtype
assert dtypes["integral"] == np.intp == np.asarray(0).dtype
assert dtypes["indexing"] == np.intp == np.argmax(np.zeros(10)).dtype
with pytest.raises(ValueError, match="Device not understood"):
info.default_dtypes(device="gpu")
def test_dtypes_all():
dtypes = info.dtypes()
assert dtypes == {
"bool": np.bool_,
"int8": np.int8,
"int16": np.int16,
"int32": np.int32,
"int64": np.int64,
"uint8": np.uint8,
"uint16": np.uint16,
"uint32": np.uint32,
"uint64": np.uint64,
"float32": np.float32,
"float64": np.float64,
"complex64": np.complex64,
"complex128": np.complex128,
}
dtype_categories = {
"bool": {"bool": np.bool_},
"signed integer": {
"int8": np.int8,
"int16": np.int16,
"int32": np.int32,
"int64": np.int64,
},
"unsigned integer": {
"uint8": np.uint8,
"uint16": np.uint16,
"uint32": np.uint32,
"uint64": np.uint64,
},
"integral": ("signed integer", "unsigned integer"),
"real floating": {"float32": np.float32, "float64": np.float64},
"complex floating": {"complex64": np.complex64, "complex128":
np.complex128},
"numeric": ("integral", "real floating", "complex floating"),
}
@pytest.mark.parametrize("kind", dtype_categories)
def test_dtypes_kind(kind):
expected = dtype_categories[kind]
if isinstance(expected, tuple):
assert info.dtypes(kind=kind) == info.dtypes(kind=expected)
else:
assert info.dtypes(kind=kind) == expected
def test_dtypes_tuple():
dtypes = info.dtypes(kind=("bool", "integral"))
assert dtypes == {
"bool": np.bool_,
"int8": np.int8,
"int16": np.int16,
"int32": np.int32,
"int64": np.int64,
"uint8": np.uint8,
"uint16": np.uint16,
"uint32": np.uint32,
"uint64": np.uint64,
}
def test_dtypes_invalid_kind():
with pytest.raises(ValueError, match="unsupported kind"):
info.dtypes(kind="invalid")
def test_dtypes_invalid_device():
pass
with pytest.raises(ValueError, match="Device not understood"):
info.dtypes(device="gpu")
def test_devices():
assert info.devices() == ["cpu"]
.\numpy\numpy\_core\tests\test_array_coercion.py
"""
Tests for array coercion, mainly through testing `np.array` results directly.
Note that other such tests exist, e.g., in `test_api.py` and many corner-cases
are tested (sometimes indirectly) elsewhere.
"""
from itertools import permutations, product
import pytest
from pytest import param
import numpy as np
import numpy._core._multiarray_umath as ncu
from numpy._core._rational_tests import rational
from numpy.testing import (
assert_array_equal, assert_warns, IS_PYPY)
def arraylikes():
"""
Generator for functions converting an array into various array-likes.
If full is True (default) it includes array-likes not capable of handling
all dtypes.
"""
def ndarray(a):
return a
yield param(ndarray, id="ndarray")
class MyArr(np.ndarray):
pass
def subclass(a):
return a.view(MyArr)
yield subclass
class _SequenceLike():
def __len__(self):
raise TypeError
def __getitem__(self):
raise TypeError
class ArrayDunder(_SequenceLike):
def __init__(self, a):
self.a = a
def __array__(self, dtype=None, copy=None):
if dtype is None:
return self.a
return self.a.astype(dtype)
yield param(ArrayDunder, id="__array__")
yield param(memoryview, id="memoryview")
class ArrayInterface:
def __init__(self, a):
self.a = a
self.__array_interface__ = a.__array_interface__
yield param(ArrayInterface, id="__array_interface__")
class ArrayStruct:
def __init__(self, a):
self.a = a
self.__array_struct__ = a.__array_struct__
yield param(ArrayStruct, id="__array_struct__")
def scalar_instances(times=True, extended_precision=True, user_dtype=True):
yield param(np.sqrt(np.float16(5)), id="float16")
yield param(np.sqrt(np.float32(5)), id="float32")
yield param(np.sqrt(np.float64(5)), id="float64")
if extended_precision:
yield param(np.sqrt(np.longdouble(5)), id="longdouble")
yield param(np.sqrt(np.complex64(2+3j)), id="complex64")
yield param(np.sqrt(np.complex128(2+3j)), id="complex128")
if extended_precision:
yield param(np.sqrt(np.clongdouble(2+3j)), id="clongdouble")
yield param(np.int8(2), id="int8")
这里的注释对代码中每个函数和类的定义进行了解释,确保符合题目要求的格式和注意事项。
yield param(np.int16(2), id="int16")
yield param(np.int32(2), id="int32")
yield param(np.int64(2), id="int64")
yield param(np.uint8(2), id="uint8")
yield param(np.uint16(2), id="uint16")
yield param(np.uint32(2), id="uint32")
yield param(np.uint64(2), id="uint64")
if user_dtype:
yield param(rational(1, 2), id="rational")
structured = np.array([(1, 3)], "i,i")[0]
assert isinstance(structured, np.void)
assert structured.dtype == np.dtype("i,i")
yield param(structured, id="structured")
if times:
yield param(np.timedelta64(2), id="timedelta64[generic]")
yield param(np.timedelta64(23, "s"), id="timedelta64[s]")
yield param(np.timedelta64("NaT", "s"), id="timedelta64[s](NaT)")
yield param(np.datetime64("NaT"), id="datetime64[generic](NaT)")
yield param(np.datetime64("2020-06-07 12:43", "ms"), id="datetime64[ms]")
yield param(np.bytes_(b"1234"), id="bytes")
yield param(np.str_("2345"), id="unicode")
yield param(np.void(b"4321"), id="unstructured_void")
def is_parametric_dtype(dtype):
"""Returns True if the dtype is a parametric legacy dtype (itemsize
is 0, or a datetime without units)
"""
if dtype.itemsize == 0:
return True
if issubclass(dtype.type, (np.datetime64, np.timedelta64)):
if dtype.name.endswith("64"):
return True
return False
class TestStringDiscovery:
@pytest.mark.parametrize("obj",
[object(), 1.2, 10**43, None, "string"],
ids=["object", "1.2", "10**43", "None", "string"])
def test_basic_stringlength(self, obj):
length = len(str(obj))
expected = np.dtype(f"S{length}")
assert np.array(obj, dtype="S").dtype == expected
assert np.array([obj], dtype="S").dtype == expected
arr = np.array(obj, dtype="O")
assert np.array(arr, dtype="S").dtype == expected
assert np.array(arr, dtype=type(expected)).dtype == expected
assert arr.astype("S").dtype == expected
assert arr.astype(type(np.dtype("S"))).dtype == expected
@pytest.mark.parametrize("obj",
[object(), 1.2, 10**43, None, "string"],
ids=["object", "1.2", "10**43", "None", "string"])
def test_nested_arrays_stringlength(self, obj):
length = len(str(obj))
expected = np.dtype(f"S{length}")
arr = np.array(obj, dtype="O")
assert np.array([arr, arr], dtype="S").dtype == expected
@pytest.mark.parametrize("arraylike", arraylikes())
def test_unpack_first_level(self, arraylike):
obj = np.array([None])
obj[0] = np.array(1.2)
length = len(str(obj[0]))
expected = np.dtype(f"S{length}")
obj = arraylike(obj)
arr = np.array([obj], dtype="S")
assert arr.shape == (1, 1)
assert arr.dtype == expected
class TestScalarDiscovery:
def test_void_special_case(self):
arr = np.array((1, 2, 3), dtype="i,i,i")
assert arr.shape == ()
arr = np.array([(1, 2, 3)], dtype="i,i,i")
assert arr.shape == (1,)
def test_char_special_case(self):
arr = np.array("string", dtype="c")
assert arr.shape == (6,)
assert arr.dtype.char == "c"
arr = np.array(["string"], dtype="c")
assert arr.shape == (1, 6)
assert arr.dtype.char == "c"
def test_char_special_case_deep(self):
nested = ["string"]
for i in range(ncu.MAXDIMS - 2):
nested = [nested]
arr = np.array(nested, dtype='c')
assert arr.shape == (1,) * (ncu.MAXDIMS - 1) + (6,)
with pytest.raises(ValueError):
np.array([nested], dtype="c")
def test_unknown_object(self):
arr = np.array(object())
assert arr.shape == ()
assert arr.dtype == np.dtype("O")
@pytest.mark.parametrize("scalar", scalar_instances())
def test_scalar(self, scalar):
arr = np.array(scalar)
assert arr.shape == ()
assert arr.dtype == scalar.dtype
arr = np.array([[scalar, scalar]])
assert arr.shape == (1, 2)
assert arr.dtype == scalar.dtype
def test_scalar_promotion(self):
for sc1, sc2 in product(scalar_instances(), scalar_instances()):
sc1, sc2 = sc1.values[0], sc2.values[0]
try:
arr = np.array([sc1, sc2])
except (TypeError, ValueError):
continue
assert arr.shape == (2,)
try:
dt1, dt2 = sc1.dtype, sc2.dtype
expected_dtype = np.promote_types(dt1, dt2)
assert arr.dtype == expected_dtype
except TypeError as e:
assert arr.dtype == np.dtype("O")
def test_scalar_coercion(self, scalar):
if isinstance(scalar, np.inexact):
scalar = type(scalar)((scalar * 2)**0.5)
if type(scalar) is rational:
pytest.xfail("Rational to object cast is undefined currently.")
arr = np.array(scalar, dtype=object).astype(scalar.dtype)
arr1 = np.array(scalar).reshape(1)
arr2 = np.array([scalar])
arr3 = np.empty(1, dtype=scalar.dtype)
arr3[0] = scalar
arr4 = np.empty(1, dtype=scalar.dtype)
arr4[:] = [scalar]
assert_array_equal(arr, arr1)
assert_array_equal(arr, arr2)
assert_array_equal(arr, arr3)
assert_array_equal(arr, arr4)
@pytest.mark.xfail(IS_PYPY, reason="`int(np.complex128(3))` fails on PyPy")
@pytest.mark.filterwarnings("ignore::numpy.exceptions.ComplexWarning")
@pytest.mark.parametrize("cast_to", scalar_instances())
def test_scalar_coercion_same_as_cast_and_assignment(self, cast_to):
"""
Test that in most cases:
* `np.array(scalar, dtype=dtype)`
* `np.empty((), dtype=dtype)[()] = scalar`
* `np.array(scalar).astype(dtype)`
should behave the same. The only exceptions are parametric dtypes
(mainly datetime/timedelta without unit) and void without fields.
"""
dtype = cast_to.dtype
for scalar in scalar_instances(times=False):
scalar = scalar.values[0]
if dtype.type == np.void:
if scalar.dtype.fields is not None and dtype.fields is None:
with pytest.raises(TypeError):
np.array(scalar).astype(dtype)
np.array(scalar, dtype=dtype)
np.array([scalar], dtype=dtype)
continue
try:
cast = np.array(scalar).astype(dtype)
except (TypeError, ValueError, RuntimeError):
with pytest.raises(Exception):
np.array(scalar, dtype=dtype)
if (isinstance(scalar, rational) and
np.issubdtype(dtype, np.signedinteger)):
return
with pytest.raises(Exception):
np.array([scalar], dtype=dtype)
res = np.zeros((), dtype=dtype)
with pytest.raises(Exception):
res[()] = scalar
return
arr = np.array(scalar, dtype=dtype)
assert_array_equal(arr, cast)
ass = np.zeros((), dtype=dtype)
ass[()] = scalar
assert_array_equal(ass, cast)
@pytest.mark.parametrize("pyscalar", [10, 10.32, 10.14j, 10**100])
def test_pyscalar_subclasses(self, pyscalar):
"""NumPy arrays are read/write which means that anything but invariant
behaviour is on thin ice. However, we currently are happy to discover
subclasses of Python float, int, complex the same as the base classes.
This should potentially be deprecated.
"""
class MyScalar(type(pyscalar)):
pass
res = np.array(MyScalar(pyscalar))
expected = np.array(pyscalar)
assert_array_equal(res, expected)
@pytest.mark.parametrize("dtype_char", np.typecodes["All"])
def test_default_dtype_instance(self, dtype_char):
if dtype_char in "SU":
dtype = np.dtype(dtype_char + "1")
elif dtype_char == "V":
dtype = np.dtype("V8")
else:
dtype = np.dtype(dtype_char)
discovered_dtype, _ = ncu._discover_array_parameters([], type(dtype))
assert discovered_dtype == dtype
assert discovered_dtype.itemsize == dtype.itemsize
@pytest.mark.parametrize("dtype", np.typecodes["Integer"])
@pytest.mark.parametrize(["scalar", "error"],
[(np.float64(np.nan), ValueError),
(np.array(-1).astype(np.ulonglong)[()], OverflowError)])
def test_scalar_to_int_coerce_does_not_cast(self, dtype, scalar, error):
"""
Signed integers are currently different in that they do not cast other
NumPy scalar, but instead use scalar.__int__(). The hardcoded
exception to this rule is `np.array(scalar, dtype=integer)`.
"""
dtype = np.dtype(dtype)
with np.errstate(invalid="ignore"):
coerced = np.array(scalar, dtype=dtype)
cast = np.array(scalar).astype(dtype)
assert_array_equal(coerced, cast)
with pytest.raises(error):
np.array([scalar], dtype=dtype)
with pytest.raises(error):
cast[()] = scalar
class TestTimeScalars:
@pytest.mark.parametrize("dtype", [np.int64, np.float32])
@pytest.mark.parametrize("scalar",
[param(np.timedelta64("NaT", "s"), id="timedelta64[s](NaT)"),
param(np.timedelta64(123, "s"), id="timedelta64[s]"),
param(np.datetime64("NaT", "generic"), id="datetime64[generic](NaT)"),
param(np.datetime64(1, "D"), id="datetime64[D]")],)
def test_coercion_basic(self, dtype, scalar):
arr = np.array(scalar, dtype=dtype)
cast = np.array(scalar).astype(dtype)
assert_array_equal(arr, cast)
ass = np.ones((), dtype=dtype)
if issubclass(dtype, np.integer):
with pytest.raises(TypeError):
ass[()] = scalar
else:
ass[()] = scalar
assert_array_equal(ass, cast)
@pytest.mark.parametrize("dtype", [np.int64, np.float32])
@pytest.mark.parametrize("scalar",
[param(np.timedelta64(123, "ns"), id="timedelta64[ns]"),
param(np.timedelta64(12, "generic"), id="timedelta64[generic]")])
def test_coercion_timedelta_convert_to_number(self, dtype, scalar):
arr = np.array(scalar, dtype=dtype)
cast = np.array(scalar).astype(dtype)
ass = np.ones((), dtype=dtype)
ass[()] = scalar
assert_array_equal(arr, cast)
assert_array_equal(cast, cast)
@pytest.mark.parametrize("dtype", ["S6", "U6"])
@pytest.mark.parametrize(["val", "unit"],
[param(123, "s", id="[s]"), param(123, "D", id="[D]")])
def test_coercion_assignment_datetime(self, val, unit, dtype):
scalar = np.datetime64(val, unit)
dtype = np.dtype(dtype)
cut_string = dtype.type(str(scalar)[:6])
arr = np.array(scalar, dtype=dtype)
assert arr[()] == cut_string
ass = np.ones((), dtype=dtype)
ass[()] = scalar
assert ass[()] == cut_string
with pytest.raises(RuntimeError):
np.array(scalar).astype(dtype)
@pytest.mark.parametrize(["val", "unit"],
[param(123, "s", id="[s]"), param(123, "D", id="[D]")])
def test_coercion_assignment_timedelta(self, val, unit):
scalar = np.timedelta64(val, unit)
np.array(scalar, dtype="S6")
cast = np.array(scalar).astype("S6")
ass = np.ones((), dtype="S6")
ass[()] = scalar
expected = scalar.astype("S")[:6]
assert cast[()] == expected
assert ass[()] == expected
@pytest.mark.parametrize("arraylike", arraylikes())
def test_nested_arraylikes(self, arraylike):
initial = arraylike(np.ones((1, 1)))
nested = initial
for i in range(ncu.MAXDIMS - 1):
nested = [nested]
with pytest.raises(ValueError, match=".*would exceed the maximum"):
np.array(nested, dtype="float64")
arr = np.array(nested, dtype=object)
assert arr.dtype == np.dtype("O")
assert arr.shape == (1,) * ncu.MAXDIMS
assert arr.item() == np.array(initial).item()
def test_empty_sequence(self):
arr = np.array([[], [1], [[1]]], dtype=object)
assert arr.shape == (3,)
with pytest.raises(ValueError):
np.array([[], np.empty((0, 1))], dtype=object)
def test_array_of_different_depths(self):
arr = np.zeros((3, 2))
mismatch_first_dim = np.zeros((1, 2))
mismatch_second_dim = np.zeros((3, 3))
dtype, shape = ncu._discover_array_parameters(
[arr, mismatch_second_dim], dtype=np.dtype("O"))
assert shape == (2, 3)
dtype, shape = ncu._discover_array_parameters(
[arr, mismatch_first_dim], dtype=np.dtype("O"))
assert shape == (2,)
res = np.asarray([arr, mismatch_first_dim], dtype=np.dtype("O"))
assert res[0] is arr
assert res[1] is mismatch_first_dim
class TestBadSequences:
def test_growing_list(self):
obj = []
class mylist(list):
def __len__(self):
obj.append([1, 2])
return super().__len__()
obj.append(mylist([1, 2]))
with pytest.raises(RuntimeError):
np.array(obj)
def test_mutated_list(self):
obj = []
class mylist(list):
def __len__(self):
obj[0] = [2, 3]
return super().__len__()
obj.append([2, 3])
obj.append(mylist([1, 2]))
np.array(obj)
def test_replace_0d_array(self):
obj = []
class baditem:
def __len__(self):
obj[0][0] = 2
raise ValueError("not actually a sequence!")
def __getitem__(self):
pass
obj.append([np.array(2), baditem()])
with pytest.raises(RuntimeError):
np.array(obj)
class TestArrayLikes:
@pytest.mark.parametrize("arraylike", arraylikes())
def test_0d_object_special_case(self, arraylike):
arr = np.array(0.)
obj = arraylike(arr)
res = np.array(obj, dtype=object)
assert_array_equal(arr, res)
res = np.array([obj], dtype=object)
assert res[0] is obj
@pytest.mark.parametrize("arraylike", arraylikes())
@pytest.mark.parametrize("arr", [np.array(0.), np.arange(4)])
def test_object_assignment_special_case(self, arraylike, arr):
obj = arraylike(arr)
empty = np.arange(1, dtype=object)
empty[:] = [obj]
assert empty[0] is obj
def test_0d_generic_special_case(self):
class ArraySubclass(np.ndarray):
def __float__(self):
raise TypeError("e.g. quantities raise on this")
arr = np.array(0.)
obj = arr.view(ArraySubclass)
res = np.array(obj)
assert_array_equal(arr, res)
with pytest.raises(TypeError):
np.array([obj])
obj = memoryview(arr)
res = np.array(obj)
assert_array_equal(arr, res)
with pytest.raises(ValueError):
np.array([obj])
def test_arraylike_classes(self):
arr = np.array(np.int64)
assert arr[()] is np.int64
arr = np.array([np.int64])
assert arr[0] is np.int64
class ArrayLike:
@property
def __array_interface__(self):
pass
@property
def __array_struct__(self):
pass
def __array__(self, dtype=None, copy=None):
pass
arr = np.array(ArrayLike)
assert arr[()] is ArrayLike
arr = np.array([ArrayLike])
assert arr[0] is ArrayLike
@pytest.mark.skipif(
np.dtype(np.intp).itemsize < 8, reason="Needs 64bit platform")
def test_too_large_array_error_paths(self):
"""Test the error paths, including for memory leaks"""
arr = np.array(0, dtype="uint8")
arr = np.broadcast_to(arr, 2**62)
for i in range(5):
with pytest.raises(MemoryError):
np.array(arr)
with pytest.raises(MemoryError):
np.array([arr])
@pytest.mark.parametrize("attribute",
["__array_interface__", "__array__", "__array_struct__"])
@pytest.mark.parametrize("error", [RecursionError, MemoryError])
class BadInterface:
def __getattr__(self, attr):
if attr == attribute:
raise error
super().__getattr__(attr)
with pytest.raises(error):
np.array(BadInterface())
@pytest.mark.parametrize("error", [RecursionError, MemoryError])
def test_bad_array_like_bad_length(self, error):
class BadSequence:
def __len__(self):
raise error
def __getitem__(self):
return 1
with pytest.raises(error):
np.array(BadSequence())
class TestAsArray:
"""Test expected behaviors of ``asarray``."""
def test_dtype_identity(self):
"""
确认 *dtype* 关键字参数的预期行为。
当使用 ``asarray()`` 时,通过关键字参数提供的 dtype 应该被应用到结果数组上。
这会强制对于唯一的 np.dtype 对象产生唯一的数组处理方式,但对于等价的 dtypes,底层数据(即基础对象)与原始数组对象是共享的。
参考 https://github.com/numpy/numpy/issues/1468
"""
int_array = np.array([1, 2, 3], dtype='i')
assert np.asarray(int_array) is int_array
assert np.asarray(int_array, dtype='i') is int_array
unequal_type = np.dtype('i', metadata={'spam': True})
annotated_int_array = np.asarray(int_array, dtype=unequal_type)
assert annotated_int_array is not int_array
assert annotated_int_array.base is int_array
equivalent_requirement = np.dtype('i', metadata={'spam': True})
annotated_int_array_alt = np.asarray(annotated_int_array,
dtype=equivalent_requirement)
assert unequal_type == equivalent_requirement
assert unequal_type is not equivalent_requirement
assert annotated_int_array_alt is not annotated_int_array
assert annotated_int_array_alt.dtype is equivalent_requirement
integer_type_codes = ('i', 'l', 'q')
integer_dtypes = [np.dtype(code) for code in integer_type_codes]
typeA = None
typeB = None
for typeA, typeB in permutations(integer_dtypes, r=2):
if typeA == typeB:
assert typeA is not typeB
break
assert isinstance(typeA, np.dtype) and isinstance(typeB, np.dtype)
long_int_array = np.asarray(int_array, dtype='l')
long_long_int_array = np.asarray(int_array, dtype='q')
assert long_int_array is not int_array
assert long_long_int_array is not int_array
assert np.asarray(long_int_array, dtype='q') is not long_int_array
array_a = np.asarray(int_array, dtype=typeA)
assert typeA == typeB
assert typeA is not typeB
assert array_a.dtype is typeA
assert array_a is not np.asarray(array_a, dtype=typeB)
assert np.asarray(array_a, dtype=typeB).dtype is typeB
assert array_a is np.asarray(array_a, dtype=typeB).base
class TestSpecialAttributeLookupFailure:
class WeirdArrayLike:
@property
def __array__(self, dtype=None, copy=None):
raise RuntimeError("oops!")
class WeirdArrayInterface:
@property
def __array_interface__(self):
raise RuntimeError("oops!")
def test_deprecated(self):
with pytest.raises(RuntimeError):
np.array(self.WeirdArrayLike())
with pytest.raises(RuntimeError):
np.array(self.WeirdArrayInterface())
def test_subarray_from_array_construction():
arr = np.array([1, 2])
res = arr.astype("2i")
assert_array_equal(res, [[1, 1], [2, 2]])
res = np.array(arr, dtype="(2,)i")
assert_array_equal(res, [[1, 1], [2, 2]])
res = np.array([[(1,), (2,)], arr], dtype="2i")
assert_array_equal(res, [[[1, 1], [2, 2]], [[1, 1], [2, 2]]])
arr = np.arange(5 * 2).reshape(5, 2)
expected = np.broadcast_to(arr[:, :, np.newaxis, np.newaxis], (5, 2, 2, 2))
res = arr.astype("(2,2)f")
assert_array_equal(res, expected)
res = np.array(arr, dtype="(2,2)f")
assert_array_equal(res, expected)
def test_empty_string():
res = np.array([""] * 10, dtype="S")
assert_array_equal(res, np.array("\0", "S1"))
assert res.dtype == "S1"
arr = np.array([""] * 10, dtype=object)
res = arr.astype("S")
assert_array_equal(res, b"")
assert res.dtype == "S1"
res = np.array(arr, dtype="S")
assert_array_equal(res, b"")
assert res.dtype == f"S{np.dtype('O').itemsize}"
res = np.array([[""] * 10, arr], dtype="S")
assert_array_equal(res, b"")
assert res.shape == (2, 10)
assert res.dtype == "S1"
.\numpy\numpy\_core\tests\test_array_interface.py
import sys
import pytest
import numpy as np
from numpy.testing import extbuild, IS_WASM, IS_EDITABLE
@pytest.fixture
def get_module(tmp_path):
""" Some codes to generate data and manage temporary buffers use when
sharing with numpy via the array interface protocol.
"""
if not sys.platform.startswith('linux'):
pytest.skip('link fails on cygwin')
if IS_WASM:
pytest.skip("Can't build module inside Wasm")
if IS_EDITABLE:
pytest.skip("Can't build module for editable install")
prologue = '''
#include <Python.h>
#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
#include <numpy/arrayobject.h>
#include <stdio.h>
#include <math.h>
NPY_NO_EXPORT
void delete_array_struct(PyObject *cap) {
/* get the array interface structure */
// 获取数组接口结构体指针
PyArrayInterface *inter = (PyArrayInterface*)
PyCapsule_GetPointer(cap, NULL);
/* get the buffer by which data was shared */
// 获取共享数据的缓冲区指针
double *ptr = (double*)PyCapsule_GetContext(cap);
/* for the purposes of the regression test set the elements
to nan */
// 为回归测试目的,将元素设置为 nan
for (npy_intp i = 0; i < inter->shape[0]; ++i)
ptr[i] = nan("");
/* free the shared buffer */
// 释放共享缓冲区
free(ptr);
/* free the array interface structure */
// 释放数组接口结构体
free(inter->shape);
free(inter);
// 输出调试信息
fprintf(stderr, "delete_array_struct\\ncap = %ld inter = %ld"
" ptr = %ld\\n", (long)cap, (long)inter, (long)ptr);
}
'''
functions = [
("new_array_struct", "METH_VARARGS", """
# 定义一个新的 Python C 扩展函数 new_array_struct,接受变长参数
long long n_elem = 0; # 声明并初始化数组元素数量为0
double value = 0.0; # 声明并初始化数组元素的默认值为0.0
# 解析传入的 Python 参数 args,期望参数为一个长整型和一个双精度浮点数
if (!PyArg_ParseTuple(args, "Ld", &n_elem, &value)) {
Py_RETURN_NONE; # 如果解析失败,返回 None
}
/* 分配并初始化用于与 NumPy 共享的数据内存 */
long long n_bytes = n_elem*sizeof(double); // 计算需要分配的字节数
double *data = (double*)malloc(n_bytes); // 分配内存空间
if (!data) {
PyErr_Format(PyExc_MemoryError,
"Failed to malloc %lld bytes", n_bytes); // 如果分配内存失败,抛出内存错误异常
Py_RETURN_NONE; // 返回 None
}
// 将数组初始化为指定值
for (long long i = 0; i < n_elem; ++i) {
data[i] = value;
}
/* 计算数组的形状和步幅 */
int nd = 1; // 数组维度为1
npy_intp *ss = (npy_intp*)malloc(2*nd*sizeof(npy_intp)); // 分配存储形状和步幅的内存
npy_intp *shape = ss; // 指向形状数据的指针
npy_intp *stride = ss + nd; // 指向步幅数据的指针
shape[0] = n_elem; // 设置数组形状的第一个维度为元素数量
stride[0] = sizeof(double); // 设置数组步幅的第一个维度为双精度浮点数的大小
/* 构建数组接口 */
PyArrayInterface *inter = (PyArrayInterface*)
malloc(sizeof(PyArrayInterface)); // 分配数组接口的内存空间
memset(inter, 0, sizeof(PyArrayInterface)); // 将分配的内存清零
inter->two = 2; // 设置数组接口版本号
inter->nd = nd; // 设置数组接口的维度数
inter->typekind = 'f'; // 设置数组接口的数据类型为浮点数
inter->itemsize = sizeof(double); // 设置数组接口的每个元素大小
inter->shape = shape; // 设置数组接口的形状数据指针
inter->strides = stride; // 设置数组接口的步幅数据指针
inter->data = data; // 设置数组接口的数据指针
inter->flags = NPY_ARRAY_WRITEABLE | NPY_ARRAY_NOTSWAPPED |
NPY_ARRAY_ALIGNED | NPY_ARRAY_C_CONTIGUOUS; // 设置数组接口的标志位
/* 封装为一个 Capsule 对象 */
PyObject *cap = PyCapsule_New(inter, NULL, delete_array_struct); // 创建一个 Capsule 对象
PyCapsule_SetContext(cap, data); // 在 Capsule 对象中保存数据指针
// 打印调试信息到标准错误输出
fprintf(stderr, "new_array_struct\\ncap = %ld inter = %ld"
" ptr = %ld\\n", (long)cap, (long)inter, (long)data);
// 返回创建的 Capsule 对象
return cap;
""")
]
more_init = "import_array();"
try:
import array_interface_testing
return array_interface_testing
except ImportError:
pass
// 如果模块不存在,使用 extbuild 构建并导入扩展模块
return extbuild.build_and_import_extension('array_interface_testing',
functions,
prologue=prologue,
include_dirs=[np.get_include()],
build_dir=tmp_path,
more_init=more_init)
@pytest.mark.slow
def test_cstruct(get_module):
"""
Test case for validating the behavior of the PyCapsule destructor
when numpy releases its reference to shared data through the array
interface protocol.
"""
class data_source:
"""
This class is for testing the timing of the PyCapsule destructor
invoked when numpy release its reference to the shared data as part of
the numpy array interface protocol. If the PyCapsule destructor is
called early the shared data is freed and invalid memory accesses will
occur.
"""
def __init__(self, size, value):
self.size = size
self.value = value
@property
def __array_struct__(self):
"""
Method returning a new array struct using size and value from
the instance.
"""
return get_module.new_array_struct(self.size, self.value)
stderr = sys.__stderr__
expected_value = -3.1415
multiplier = -10000.0
stderr.write(' ---- create an object to share data ---- \n')
buf = data_source(256, expected_value)
stderr.write(' ---- OK!\n\n')
stderr.write(' ---- share data via the array interface protocol ---- \n')
arr = np.array(buf, copy=False)
stderr.write('arr.__array_interface___ = %s\n' % (
str(arr.__array_interface__)))
stderr.write('arr.base = %s\n' % (str(arr.base)))
stderr.write(' ---- OK!\n\n')
stderr.write(' ---- destroy the object that shared data ---- \n')
buf = None
stderr.write(' ---- OK!\n\n')
assert np.allclose(arr, expected_value)
stderr.write(' ---- read shared data ---- \n')
stderr.write('arr = %s\n' % (str(arr)))
stderr.write(' ---- OK!\n\n')
stderr.write(' ---- modify shared data ---- \n')
arr *= multiplier
expected_value *= multiplier
stderr.write('arr.__array_interface___ = %s\n' % (
str(arr.__array_interface__)))
stderr.write('arr.base = %s\n' % (str(arr.base)))
stderr.write(' ---- OK!\n\n')
stderr.write(' ---- read modified shared data ---- \n')
stderr.write('arr = %s\n' % (str(arr)))
stderr.write(' ---- OK!\n\n')
assert np.allclose(arr, expected_value)
stderr.write(' ---- free shared data ---- \n')
arr = None
stderr.write(' ---- OK!\n\n')
.\numpy\numpy\_core\tests\test_casting_floatingpoint_errors.py
import pytest
from pytest import param
from numpy.testing import IS_WASM
import numpy as np
def values_and_dtypes():
"""
生成会导致浮点错误的数值和数据类型对,包括整数到浮点数的无效转换(会产生"invalid"警告)
和浮点数转换的溢出(会产生"overflow"警告)。
(Python 中 int/float 的路径不需要在所有相同情况下进行测试,但也不会有害。)
"""
yield param(70000, "float16", id="int-to-f2")
yield param("70000", "float16", id="str-to-f2")
yield param(70000.0, "float16", id="float-to-f2")
yield param(np.longdouble(70000.), "float16", id="longdouble-to-f2")
yield param(np.float64(70000.), "float16", id="double-to-f2")
yield param(np.float32(70000.), "float16", id="float-to-f2")
yield param(10**100, "float32", id="int-to-f4")
yield param(1e100, "float32", id="float-to-f2")
yield param(np.longdouble(1e300), "float32", id="longdouble-to-f2")
yield param(np.float64(1e300), "float32", id="double-to-f2")
max_ld = np.finfo(np.longdouble).max
spacing = np.spacing(np.nextafter(np.finfo("f8").max, 0))
if max_ld - spacing > np.finfo("f8").max:
yield param(np.finfo(np.longdouble).max, "float64",
id="longdouble-to-f8")
yield param(2e300, "complex64", id="float-to-c8")
yield param(2e300+0j, "complex64", id="complex-to-c8")
yield param(2e300j, "complex64", id="complex-to-c8")
yield param(np.longdouble(2e300), "complex64", id="longdouble-to-c8")
with np.errstate(over="ignore"):
for to_dt in np.typecodes["AllInteger"]:
for value in [np.inf, np.nan]:
for from_dt in np.typecodes["AllFloat"]:
from_dt = np.dtype(from_dt)
from_val = from_dt.type(value)
yield param(from_val, to_dt, id=f"{from_val}-to-{to_dt}")
def check_operations(dtype, value):
"""
NumPy 中有许多专用路径进行类型转换,应检查在这些转换过程中发生的浮点错误。
"""
if dtype.kind != 'i':
def assignment():
arr = np.empty(3, dtype=dtype)
arr[0] = value
yield assignment
def fill():
arr = np.empty(3, dtype=dtype)
arr.fill(value)
yield fill
def copyto_scalar():
arr = np.empty(3, dtype=dtype)
np.copyto(arr, value, casting="unsafe")
yield copyto_scalar
def copyto():
arr = np.empty(3, dtype=dtype)
np.copyto(arr, np.array([value, value, value]), casting="unsafe")
yield copyto
yield copyto
def copyto_scalar_masked():
arr = np.empty(3, dtype=dtype)
np.copyto(arr, value, casting="unsafe",
where=[True, False, True])
yield copyto_scalar_masked
def copyto_masked():
arr = np.empty(3, dtype=dtype)
np.copyto(arr, np.array([value, value, value]), casting="unsafe",
where=[True, False, True])
yield copyto_masked
def direct_cast():
np.array([value, value, value]).astype(dtype)
yield direct_cast
def direct_cast_nd_strided():
arr = np.full((5, 5, 5), fill_value=value)[:, ::2, :]
arr.astype(dtype)
yield direct_cast_nd_strided
def boolean_array_assignment():
arr = np.empty(3, dtype=dtype)
arr[[True, False, True]] = np.array([value, value])
yield boolean_array_assignment
def integer_array_assignment():
arr = np.empty(3, dtype=dtype)
values = np.array([value, value])
arr[[0, 1]] = values
yield integer_array_assignment
def integer_array_assignment_with_subspace():
arr = np.empty((5, 3), dtype=dtype)
values = np.array([value, value, value])
arr[[0, 2]] = values
yield integer_array_assignment_with_subspace
def flat_assignment():
arr = np.empty((3,), dtype=dtype)
values = np.array([value, value, value])
arr.flat[:] = values
yield flat_assignment
@pytest.mark.skipif(IS_WASM, reason="no wasm fp exception support")
@pytest.mark.parametrize(["value", "dtype"], values_and_dtypes())
@pytest.mark.filterwarnings("ignore::numpy.exceptions.ComplexWarning")
def test_floatingpoint_errors_casting(dtype, value):
dtype = np.dtype(dtype)
for operation in check_operations(dtype, value):
dtype = np.dtype(dtype)
match = "invalid" if dtype.kind in 'iu' else "overflow"
with pytest.warns(RuntimeWarning, match=match):
operation()
with np.errstate(all="raise"):
with pytest.raises(FloatingPointError, match=match):
operation()
.\numpy\numpy\_core\tests\test_casting_unittests.py
"""
The tests exercise the casting machinery in a more low-level manner.
The reason is mostly to test a new implementation of the casting machinery.
Unlike most tests in NumPy, these are closer to unit-tests rather
than integration tests.
"""
import pytest
import textwrap
import enum
import random
import numpy as np
from numpy.lib.stride_tricks import as_strided
from numpy.testing import assert_array_equal
from numpy._core._multiarray_umath import _get_castingimpl as get_castingimpl
simple_dtypes = "?bhilqBHILQefdFD"
if np.dtype("l").itemsize != np.dtype("q").itemsize:
simple_dtypes = simple_dtypes.replace("l", "").replace("L", "")
simple_dtypes = [type(np.dtype(c)) for c in simple_dtypes]
def simple_dtype_instances():
for dtype_class in simple_dtypes:
dt = dtype_class()
yield pytest.param(dt, id=str(dt))
if dt.byteorder != "|":
dt = dt.newbyteorder()
yield pytest.param(dt, id=str(dt))
def get_expected_stringlength(dtype):
"""Returns the string length when casting the basic dtypes to strings.
"""
if dtype == np.bool:
return 5
if dtype.kind in "iu":
if dtype.itemsize == 1:
length = 3
elif dtype.itemsize == 2:
length = 5
elif dtype.itemsize == 4:
length = 10
elif dtype.itemsize == 8:
length = 20
else:
raise AssertionError(f"did not find expected length for {dtype}")
if dtype.kind == "i":
length += 1
return length
if dtype.char == "g":
return 48
elif dtype.char == "G":
return 48 * 2
elif dtype.kind == "f":
return 32
elif dtype.kind == "c":
return 32 * 2
raise AssertionError(f"did not find expected length for {dtype}")
class Casting(enum.IntEnum):
no = 0
equiv = 1
safe = 2
same_kind = 3
unsafe = 4
def _get_cancast_table():
table = textwrap.dedent("""
X ? b h i l q B H I L Q e f d g F D G S U V O M m
? # = = = = = = = = = = = = = = = = = = = = = . =
b . # = = = = . . . . . = = = = = = = = = = = . =
h . ~ # = = = . . . . . ~ = = = = = = = = = = . =
i . ~ ~ # = = . . . . . ~ ~ = = ~ = = = = = = . =
l . ~ ~ ~ # # . . . . . ~ ~ = = ~ = = = = = = . =
q . ~ ~ ~ # # . . . . . ~ ~ = = ~ = = = = = = . =
B . ~ = = = = # = = = =```
# 创建一个多行字符串,其中包含表格,用于定义数据类型间的类型转换关系
table = textwrap.dedent("""
X ? b h i l q B H I L Q e f d g F D G S U V O M m
?
b .
h . ~
i . ~ ~
l . ~ ~ ~
q . ~ ~ ~
B . ~ = = = =
table = textwrap.dedent("""
X ? b h i l q B H I L Q e f d g F D G S U V O M m
? # = = = = = = = = = = = = = = = = = = = = = . =
b . # = = = = . . . . . = = = = = = = = = = = . =
h . ~ # = = = . . . . . ~ = = = = = = = = = = . =
i . ~ ~ # = = . . . . . ~ ~ = = ~ = = = = = = . =
l . ~ ~ ~ # # . . . . . ~ ~ = = ~ = = = = = = . =
q . ~ ~ ~ # # . . . . . ~ ~ = = ~ = = = = = = . =
B . ~ = = = = # = = = =
# 根据字符定义转换策略的字典,将字符映射到对应的转换类型
convert_cast = {".": Casting.unsafe, "~": Casting.same_kind,
"=": Casting.safe, "#": Casting.equiv,
" ": -1}
# 创建一个空字典用于存储数据类型间的转换策略
cancast = {}
# 遍历数据类型列表和表格的行,将转换策略填充到字典中
for from_dt, row in zip(dtypes, table[1:]):
cancast[from_dt] = {}
for to_dt, c in zip(dtypes, row[2::2]):
cancast[from_dt][to_dt] = convert_cast[c]
# 返回填充好的数据类型间转换策略字典
return cancast
# 调用函数 _get_cancast_table() 并将结果赋给 CAST_TABLE
CAST_TABLE = _get_cancast_table()
# 定义测试类 TestChanges,用于测试行为变化
class TestChanges:
"""
These test cases exercise some behaviour changes
"""
# 测试函数 test_float_to_string,参数化测试浮点数转换为字符串
@pytest.mark.parametrize("string", ["S", "U"])
@pytest.mark.parametrize("floating", ["e", "f", "d", "g"])
def test_float_to_string(self, floating, string):
# 断言能够从浮点数 floating 转换为字符串 string
assert np.can_cast(floating, string)
# 断言能够从浮点数 floating 转换为带有后缀 "100" 的字符串 string
# 100 is long enough to hold any formatted floating
assert np.can_cast(floating, f"{string}100")
# 测试函数 test_to_void,验证转换为 void 类型的安全性
def test_to_void(self):
# 断言能够从双精度浮点数 "d" 转换为 void 类型 "V"
assert np.can_cast("d", "V")
# 断言能够从长度为 20 的字符串 "S20" 转换为 void 类型 "V"
assert np.can_cast("S20", "V")
# 当 void 类型的长度过小时,断言转换不安全
assert not np.can_cast("d", "V1")
assert not np.can_cast("S20", "V1")
assert not np.can_cast("U1", "V1")
# 当源类型和目标类型都是结构化类型时,使用 "same_kind" 转换类型
assert np.can_cast("d,i", "V", casting="same_kind")
# 当源类型和目标类型都是无结构化 void 类型时,使用 "no" 表示无需转换
assert np.can_cast("V3", "V", casting="no")
assert np.can_cast("V0", "V", casting="no")
# 定义测试类 TestCasting,用于测试类型转换
class TestCasting:
# 定义类变量 size,用于设置数组大小,最好大于 NPY_LOWLEVEL_BUFFER_BLOCKSIZE * itemsize
size = 1500 # Best larger than NPY_LOWLEVEL_BUFFER_BLOCKSIZE * itemsize
# 定义函数 get_data,根据给定的 dtype1 和 dtype2 获取数据
def get_data(self, dtype1, dtype2):
# 根据 dtype1 和 dtype2 的大小确定数组的长度
if dtype2 is None or dtype1.itemsize >= dtype2.itemsize:
length = self.size // dtype1.itemsize
else:
length = self.size // dtype2.itemsize
# 使用 dtype1 创建空数组 arr1,并进行一些断言以确保数组属性
arr1 = np.empty(length, dtype=dtype1)
assert arr1.flags.c_contiguous
assert arr1.flags.aligned
# 生成随机值列表 values
values = [random.randrange(-128, 128) for _ in range(length)]
# 遍历 values,通过 item 赋值方式向 arr1 中填充值
for i, value in enumerate(values):
# 如果 value 小于 0 且 dtype1 是无符号整数类型,则手动转换成对应的正数值
if value < 0 and dtype1.kind == "u":
value = value + np.iinfo(dtype1).max + 1
arr1[i] = value
# 如果 dtype2 是 None,则返回 arr1 和 values
if dtype2 is None:
if dtype1.char == "?":
values = [bool(v) for v in values]
return arr1, values
# 如果 dtype2 是布尔类型,则将 values 转换成布尔值列表
if dtype2.char == "?":
values = [bool(v) for v in values]
# 使用 dtype2 创建空数组 arr2,并进行一些断言以确保数组属性
arr2 = np.empty(length, dtype=dtype2)
assert arr2.flags.c_contiguous
assert arr2.flags.aligned
# 再次遍历 values,通过 item 赋值方式向 arr2 中填充值
for i, value in enumerate(values):
# 如果 value 小于 0 且 dtype2 是无符号整数类型,则手动转换成对应的正数值
if value < 0 and dtype2.kind == "u":
value = value + np.iinfo(dtype2).max + 1
arr2[i] = value
# 返回 arr1, arr2 和 values
return arr1, arr2, values
# 定义一个方法,用于生成经过变异处理的数据数组
def get_data_variation(self, arr1, arr2, aligned=True, contig=True):
"""
Returns a copy of arr1 that may be non-contiguous or unaligned, and a
matching array for arr2 (although not a copy).
"""
# 如果需要保证连续性,计算 arr1 和 arr2 的字节步长
if contig:
stride1 = arr1.dtype.itemsize
stride2 = arr2.dtype.itemsize
# 如果需要保证对齐性,计算 arr1 和 arr2 的字节步长
elif aligned:
stride1 = 2 * arr1.dtype.itemsize
stride2 = 2 * arr2.dtype.itemsize
# 否则,计算 arr1 和 arr2 的字节步长
else:
stride1 = arr1.dtype.itemsize + 1
stride2 = arr2.dtype.itemsize + 1
# 计算允许的最大字节数,用来分配新数组
max_size1 = len(arr1) * 3 * arr1.dtype.itemsize + 1
max_size2 = len(arr2) * 3 * arr2.dtype.itemsize + 1
# 创建用于存储数据的新数组,类型为无符号 8 位整数
from_bytes = np.zeros(max_size1, dtype=np.uint8)
to_bytes = np.zeros(max_size2, dtype=np.uint8)
# 对上述分配是否足够进行断言检查
assert stride1 * len(arr1) <= from_bytes.nbytes
assert stride2 * len(arr2) <= to_bytes.nbytes
# 如果需要对齐,使用 as_strided 函数生成新的 arr1 和 arr2
if aligned:
new1 = as_strided(from_bytes[:-1].view(arr1.dtype),
arr1.shape, (stride1,))
new2 = as_strided(to_bytes[:-1].view(arr2.dtype),
arr2.shape, (stride2,))
# 否则,稍微偏移后使用 as_strided 函数生成新的 arr1 和 arr2
else:
new1 = as_strided(from_bytes[1:].view(arr1.dtype),
arr1.shape, (stride1,))
new2 = as_strided(to_bytes[1:].view(arr2.dtype),
arr2.shape, (stride2,))
# 将 arr1 的数据复制到新生成的 new1 数组中
new1[...] = arr1
# 如果不需要保证连续性,进行进一步的检查确保没有写入不应该写入的字节
if not contig:
offset = arr1.dtype.itemsize if aligned else 0
buf = from_bytes[offset::stride1].tobytes()
assert buf.count(b"\0") == len(buf)
# 根据条件检查 new1 和 new2 的连续性
if contig:
assert new1.flags.c_contiguous
assert new2.flags.c_contiguous
else:
assert not new1.flags.c_contiguous
assert not new2.flags.c_contiguous
# 根据条件检查 new1 和 new2 的对齐性
if aligned:
assert new1.flags.aligned
assert new2.flags.aligned
else:
assert not new1.flags.aligned or new1.dtype.alignment == 1
assert not new2.flags.aligned or new2.dtype.alignment == 1
# 返回生成的变异后的数组 new1 和 new2
return new1, new2
# 使用 pytest 的参数化功能,针对 simple_dtypes 参数化测试用例
@pytest.mark.parametrize("from_Dt", simple_dtypes)
# 定义测试函数,用于测试从指定类型到简单数据类型的类型转换
def test_simple_cancast(self, from_Dt):
# 遍历简单数据类型列表,将from_Dt转换为to_Dt并进行测试
for to_Dt in simple_dtypes:
# 获取类型转换的实现函数
cast = get_castingimpl(from_Dt, to_Dt)
# 遍历from_Dt及其字节序新顺序的实例
for from_dt in [from_Dt(), from_Dt().newbyteorder()]:
# 解析描述符,获取默认值和转换结果
default = cast._resolve_descriptors((from_dt, None))[1][1]
assert default == to_Dt()
del default
# 遍历to_Dt及其字节序新顺序的实例
for to_dt in [to_Dt(), to_Dt().newbyteorder()]:
# 解析描述符,获取类型转换、转换结果、视图偏移
casting, (from_res, to_res), view_off = (
cast._resolve_descriptors((from_dt, to_dt)))
assert(type(from_res) == from_Dt)
assert(type(to_res) == to_Dt)
# 如果视图偏移不为空,表示视图可接受,需无需进行强制转换
# 并且字节顺序必须匹配
if view_off is not None:
assert casting == Casting.no
assert Casting.equiv == CAST_TABLE[from_Dt][to_Dt]
assert from_res.isnative == to_res.isnative
else:
# 如果from_Dt与to_Dt相同,则需要确认to_res不同于from_dt
if from_Dt == to_Dt:
assert from_res.isnative != to_res.isnative
# 否则,根据CAST_TABLE检查强制转换类型
assert casting == CAST_TABLE[from_Dt][to_Dt]
# 如果from_Dt与to_Dt相同,则from_dt应等于from_res,to_dt应等于to_res
if from_Dt is to_Dt:
assert(from_dt is from_res)
assert(to_dt is to_res)
# 使用pytest标记过滤掉特定的警告信息
@pytest.mark.filterwarnings("ignore::numpy.exceptions.ComplexWarning")
# 使用pytest.mark.parametrize装饰器,参数化测试函数的输入参数from_dt
@pytest.mark.parametrize("from_dt", simple_dtype_instances())
# 定义名为 test_simple_direct_casts 的测试方法,用于测试直接类型转换
def test_simple_direct_casts(self, from_dt):
"""
This test checks numeric direct casts for dtypes supported also by the
struct module (plus complex). It tries to be test a wide range of
inputs, but skips over possibly undefined behaviour (e.g. int rollover).
Longdouble and CLongdouble are tested, but only using double precision.
If this test creates issues, it should possibly just be simplified
or even removed (checking whether unaligned/non-contiguous casts give
the same results is useful, though).
"""
# 遍历 simple_dtype_instances() 返回的数据类型实例
for to_dt in simple_dtype_instances():
# 取类型实例的第一个值
to_dt = to_dt.values[0]
# 获取从 from_dt 到 to_dt 的类型转换方法
cast = get_castingimpl(type(from_dt), type(to_dt))
# 解析类型转换器的描述符,获取转换方法、原始类型结果、目标类型结果及视图偏移量
casting, (from_res, to_res), view_off = cast._resolve_descriptors((from_dt, to_dt))
# 如果 from_res 或 to_res 不等于 from_dt 或 to_dt,则不进行测试,因为已在多个步骤中测试过
if from_res is not from_dt or to_res is not to_dt:
# Do not test this case, it is handled in multiple steps,
# each of which should is tested individually.
return
# 判断是否安全转换
safe = casting <= Casting.safe
# 删除变量以释放内存
del from_res, to_res, casting
# 获取从 from_dt 到 to_dt 的数据
arr1, arr2, values = self.get_data(from_dt, to_dt)
# 使用 cast 对象进行简单分块调用
cast._simple_strided_call((arr1, arr2))
# 使用 Python 列表检查结果是否与预期值 values 相同
assert arr2.tolist() == values
# 使用分块循环检查是否达到相同的结果
arr1_o, arr2_o = self.get_data_variation(arr1, arr2, True, False)
cast._simple_strided_call((arr1_o, arr2_o))
# 使用 assert_array_equal 检查 arr2_o 是否等于 arr2
assert_array_equal(arr2_o, arr2)
# 检查 arr2_o 的字节表示是否等于 arr2 的字节表示
assert arr2_o.tobytes() == arr2.tobytes()
# 如果支持不对齐访问并且对齐会影响结果,则进一步检查
if ((from_dt.alignment == 1 and to_dt.alignment == 1) or
not cast._supports_unaligned):
return
# 检查不同的数据变化情况下是否依然保持一致性
arr1_o, arr2_o = self.get_data_variation(arr1, arr2, False, True)
cast._simple_strided_call((arr1_o, arr2_o))
assert_array_equal(arr2_o, arr2)
assert arr2_o.tobytes() == arr2.tobytes()
arr1_o, arr2_o = self.get_data_variation(arr1, arr2, False, False)
cast._simple_strided_call((arr1_o, arr2_o))
assert_array_equal(arr2_o, arr2)
assert arr2_o.tobytes() == arr2.tobytes()
# 释放内存,删除变量
del arr1_o, arr2_o, cast
# 使用 pytest 的参数化标记,指定测试数据来源为 simple_dtypes
@pytest.mark.parametrize("from_Dt", simple_dtypes)
# 定义一个测试方法,用于测试从日期时间类型到不同时间类型的转换
def test_numeric_to_times(self, from_Dt):
# 当前只实现连续循环,因此只需要测试这些情况。
# 实例化一个从 from_Dt 获取的日期时间对象
from_dt = from_Dt()
# 定义不同时间数据类型的列表
time_dtypes = [np.dtype("M8"), np.dtype("M8[ms]"), np.dtype("M8[4D]"),
np.dtype("m8"), np.dtype("m8[ms]"), np.dtype("m8[4D]")]
# 遍历不同的时间数据类型
for time_dt in time_dtypes:
# 获取从 from_dt 到 time_dt 的类型转换实现
cast = get_castingimpl(type(from_dt), type(time_dt))
# 解析类型转换的描述符,确定转换、起始和目标结果、视图偏移
casting, (from_res, to_res), view_off = cast._resolve_descriptors(
(from_dt, time_dt))
# 断言起始结果为 from_dt
assert from_res is from_dt
# 断言目标结果为 time_dt
assert to_res is time_dt
# 清理中间变量
del from_res, to_res
# 断言转换在转换表中有效
assert casting & CAST_TABLE[from_Dt][type(time_dt)]
# 视图偏移应为 None
assert view_off is None
# 定义一个 int64 数据类型
int64_dt = np.dtype(np.int64)
# 获取数据数组 arr1, arr2 以及其值
arr1, arr2, values = self.get_data(from_dt, int64_dt)
# 将 arr2 视图转换为 time_dt 类型
arr2 = arr2.view(time_dt)
# 将 arr2 中的所有值设置为 NaT
arr2[...] = np.datetime64("NaT")
# 如果时间数据类型为 np.dtype("M8")
if time_dt == np.dtype("M8"):
# 确保至少有一个值不是 NaT
arr1[-1] = 0 # ensure at least one value is not NaT
# 进行简单的类型转换调用,预期会引发 ValueError 异常
cast._simple_strided_call((arr1, arr2))
with pytest.raises(ValueError):
str(arr2[-1]) # e.g. conversion to string fails
# 结束当前测试
return
# 进行简单的类型转换调用
cast._simple_strided_call((arr1, arr2))
# 断言 arr2 的值列表应与预期的 values 相等
assert [int(v) for v in arr2.tolist()] == values
# 检查在步进循环中得到相同的结果
arr1_o, arr2_o = self.get_data_variation(arr1, arr2, True, False)
cast._simple_strided_call((arr1_o, arr2_o))
# 断言 arr2_o 应与 arr2 相等
assert_array_equal(arr2_o, arr2)
# 断言 arr2_o 的字节表示应与 arr2 的字节表示相同
assert arr2_o.tobytes() == arr2.tobytes()
# 使用pytest的参数化装饰器,为测试用例传入不同的参数组合
@pytest.mark.parametrize(
["from_dt", "to_dt", "expected_casting", "expected_view_off",
"nom", "denom"],
# 设置参数组合列表
[("M8[ns]", None, Casting.no, 0, 1, 1), # 设置参数组合
(str(np.dtype("M8[ns]").newbyteorder()), None,
Casting.equiv, None, 1, 1), # 设置参数组合
("M8", "M8[ms]", Casting.safe, 0, 1, 1), # 设置参数组合
# should be invalid cast:
("M8[ms]", "M8", Casting.unsafe, None, 1, 1), # 设置参数组合
("M8[5ms]", "M8[5ms]", Casting.no, 0, 1, 1), # 设置参数组合
("M8[ns]", "M8[ms]", Casting.same_kind, None, 1, 10**6), # 设置参数组合
("M8[ms]", "M8[ns]", Casting.safe, None, 10**6, 1), # 设置参数组合
("M8[ms]", "M8[7ms]", Casting.same_kind, None, 1, 7), # 设置参数组合
("M8[4D]", "M8[1M]", Casting.same_kind, None, None,
# give full values based on NumPy 1.19.x
[-2**63, 0, -1, 1314, -1315, 564442610]), # 设置参数组合
("m8[ns]", None, Casting.no, 0, 1, 1), # 设置参数组合
(str(np.dtype("m8[ns]").newbyteorder()), None,
Casting.equiv, None, 1, 1), # 设置参数组合
("m8", "m8[ms]", Casting.safe, 0, 1, 1), # 设置参数组合
# should be invalid cast:
("m8[ms]", "m8", Casting.unsafe, None, 1, 1), # 设置参数组合
("m8[5ms]", "m8[5ms]", Casting.no, 0, 1, 1), # 设置参数组合
("m8[ns]", "m8[ms]", Casting.same_kind, None, 1, 10**6), # 设置参数组合
("m8[ms]", "m8[ns]", Casting.safe, None, 10**6, 1), # 设置参数组合
("m8[ms]", "m8[7ms]", Casting.same_kind, None, 1, 7), # 设置参数组合
("m8[4D]", "m8[1M]", Casting.unsafe, None, None,
# give full values based on NumPy 1.19.x
[-2**63, 0, 0, 1314, -1315, 564442610])]) # 设置参数组合
# 定义测试函数,用于测试时间类型转换功能
def test_time_to_time(self, from_dt, to_dt,
expected_casting, expected_view_off,
nom, denom):
# 将输入参数 from_dt 转换为 numpy 的数据类型对象
from_dt = np.dtype(from_dt)
# 如果 to_dt 不为 None,则将其转换为 numpy 的数据类型对象
if to_dt is not None:
to_dt = np.dtype(to_dt)
# 测试几个数值以进行类型转换(使用 NumPy 1.19 生成结果)
values = np.array([-2**63, 1, 2**63-1, 10000, -10000, 2**32])
# 将数值数组转换为指定字节序的 int64 类型
values = values.astype(np.dtype("int64").newbyteorder(from_dt.byteorder))
# 断言数值数组的字节序与 from_dt 的字节序相同
assert values.dtype.byteorder == from_dt.byteorder
# 断言第一个值为 NaT(Not a Time)
assert np.isnat(values.view(from_dt)[0])
# 获取 from_dt 类型的类对象
DType = type(from_dt)
# 获取类型转换函数的实现
cast = get_castingimpl(DType, DType)
# 解析类型转换的描述符
casting, (from_res, to_res), view_off = cast._resolve_descriptors(
(from_dt, to_dt))
# 断言解析后的 from_res 与 from_dt 相同
assert from_res is from_dt
# 断言解析后的 to_res 与 to_dt 相同或为 None
assert to_res is to_dt or to_dt is None
# 断言解析后的类型转换方式与预期相同
assert casting == expected_casting
# 断言解析后的视图偏移量与预期相同
assert view_off == expected_view_off
# 如果 nom 不为 None
if nom is not None:
# 计算预期输出,将结果视图转换为 to_res 类型
expected_out = (values * nom // denom).view(to_res)
# 将第一个元素设为 "NaT"
expected_out[0] = "NaT"
else:
# 创建一个与 values 相同形状的空数组,并赋值为 denom
expected_out = np.empty_like(values)
expected_out[...] = denom
# 将数组视图转换为 to_dt 类型
expected_out = expected_out.view(to_dt)
# 将 values 数组视图转换为 from_dt 类型
orig_arr = values.view(from_dt)
# 创建一个与 expected_out 相同形状的空数组
orig_out = np.empty_like(expected_out)
# 如果 casting 为 Casting.unsafe 并且 to_dt 为 "m8" 或 "M8"
if casting == Casting.unsafe and (to_dt == "m8" or to_dt == "M8"):
# 如果从非通用单位到通用单位的类型转换应报告为无效转换
with pytest.raises(ValueError):
cast._simple_strided_call((orig_arr, orig_out))
return
# 遍历 aligned 和 contig 的组合进行测试
for aligned in [True, True]:
for contig in [True, True]:
# 调用 get_data_variation 获取变化后的数据数组和输出数组
arr, out = self.get_data_variation(
orig_arr, orig_out, aligned, contig)
# 将输出数组清零
out[...] = 0
# 调用类型转换函数进行简单的分步调用
cast._simple_strided_call((arr, out))
# 断言输出数组视图与预期输出视图相等
assert_array_equal(out.view("int64"), expected_out.view("int64"))
# 根据输入的 dtype 和修改长度,返回一个新的 dtype 对象
def string_with_modified_length(self, dtype, change_length):
# 如果 dtype 的字符为 "S",则 fact 为 1,否则为 4
fact = 1 if dtype.char == "S" else 4
# 计算新的长度
length = dtype.itemsize // fact + change_length
# 构造并返回一个新的 dtype 对象
return np.dtype(f"{dtype.byteorder}{dtype.char}{length}")
# 使用参数化测试标记,对 simple_dtypes 和 string_char 进行参数化测试
@pytest.mark.parametrize("other_DT", simple_dtypes)
@pytest.mark.parametrize("string_char", ["S", "U"])
# 测试字符串是否可转换的方法,使用给定的参数进行测试
def test_string_cancast(self, other_DT, string_char):
# 如果字符串类型是"S",则设定因子为1,否则设定为4
fact = 1 if string_char == "S" else 4
# 获取字符串字符对应的 NumPy 数据类型
string_DT = type(np.dtype(string_char))
# 调用函数获取类型转换的实现
cast = get_castingimpl(other_DT, string_DT)
# 创建其他数据类型的实例
other_dt = other_DT()
# 获取预期的字符串长度
expected_length = get_expected_stringlength(other_dt)
# 构造 NumPy 数据类型,以给定字符和预期长度
string_dt = np.dtype(f"{string_char}{expected_length}")
# 解析描述符,获取安全性、返回的数据类型以及视图偏移量
safety, (res_other_dt, res_dt), view_off = cast._resolve_descriptors(
(other_dt, None))
assert res_dt.itemsize == expected_length * fact
assert safety == Casting.safe # 我们认为字符串转换是“安全的”
assert view_off is None
assert isinstance(res_dt, string_DT)
# 对于不同长度的字符串,检查类型转换的安全性
for change_length in [-1, 0, 1]:
if change_length >= 0:
expected_safety = Casting.safe
else:
expected_safety = Casting.same_kind
# 获取修改后的字符串数据类型
to_dt = self.string_with_modified_length(string_dt, change_length)
# 解析描述符,获取安全性、返回的数据类型以及视图偏移量
safety, (_, res_dt), view_off = cast._resolve_descriptors(
(other_dt, to_dt))
assert res_dt is to_dt
assert safety == expected_safety
assert view_off is None
# 反向转换总是被认为是不安全的:
cast = get_castingimpl(string_DT, other_DT)
# 解析描述符,获取安全性以及视图偏移量
safety, _, view_off = cast._resolve_descriptors((string_dt, other_dt))
assert safety == Casting.unsafe
assert view_off is None
# 再次解析描述符,获取安全性、返回的数据类型以及视图偏移量
cast = get_castingimpl(string_DT, other_DT)
safety, (_, res_dt), view_off = cast._resolve_descriptors(
(string_dt, None))
assert safety == Casting.unsafe
assert view_off is None
assert other_dt is res_dt # 返回简单数据类型的单例对象
# 定义一个测试方法,用于验证字符串和其他数据类型之间的转换是否满足往返属性
def test_simple_string_casts_roundtrip(self, other_dt, string_char):
"""
Tests casts from and to string by checking the roundtripping property.
The test also covers some string to string casts (but not all).
If this test creates issues, it should possibly just be simplified
or even removed (checking whether unaligned/non-contiguous casts give
the same results is useful, though).
"""
# 获取 other_dt 对应的 dtype 类型
string_DT = type(np.dtype(string_char))
# 获取类型转换的实现函数
cast = get_castingimpl(type(other_dt), string_DT)
cast_back = get_castingimpl(string_DT, type(other_dt))
# 解析描述符,获取返回的 other_dt 和 string_dt 类型
_, (res_other_dt, string_dt), _ = cast._resolve_descriptors(
(other_dt, None))
# 如果 res_other_dt 不等于 other_dt,表示不支持非本机字节顺序,跳过测试
if res_other_dt is not other_dt:
assert other_dt.byteorder != res_other_dt.byteorder
return
# 获取原始数组和值
orig_arr, values = self.get_data(other_dt, None)
# 创建一个与 orig_arr 等长的零填充数组,用于字符串类型 str_arr
str_arr = np.zeros(len(orig_arr), dtype=string_dt)
# 创建修改长度后的字符串类型数组,长度为 -1
string_dt_short = self.string_with_modified_length(string_dt, -1)
str_arr_short = np.zeros(len(orig_arr), dtype=string_dt_short)
# 创建修改长度后的字符串类型数组,长度为 1
string_dt_long = self.string_with_modified_length(string_dt, 1)
str_arr_long = np.zeros(len(orig_arr), dtype=string_dt_long)
# 断言不支持非对齐的简单赋值操作
assert not cast._supports_unaligned
assert not cast_back._supports_unaligned
# 循环检查连续和非连续数组的数据变化
for contig in [True, False]:
other_arr, str_arr = self.get_data_variation(
orig_arr, str_arr, True, contig)
_, str_arr_short = self.get_data_variation(
orig_arr, str_arr_short.copy(), True, contig)
_, str_arr_long = self.get_data_variation(
orig_arr, str_arr_long, True, contig)
# 对简单的步进调用进行类型转换
cast._simple_strided_call((other_arr, str_arr))
# 对短数组进行步进调用,并断言转换后的结果
cast._simple_strided_call((other_arr, str_arr_short))
assert_array_equal(str_arr.astype(string_dt_short), str_arr_short)
# 对长数组进行步进调用,并断言转换后的结果
cast._simple_strided_call((other_arr, str_arr_long))
assert_array_equal(str_arr, str_arr_long)
# 如果 other_dt 的类型是布尔类型,则跳过循环
if other_dt.kind == "b":
continue
# 将 other_arr 数组的所有元素设为 0
other_arr[...] = 0
# 对字符串数组 str_arr 和 other_arr 进行反向转换
cast_back._simple_strided_call((str_arr, other_arr))
# 断言 orig_arr 和 other_arr 相等
assert_array_equal(orig_arr, other_arr)
# 将 other_arr 数组的所有元素设为 0
other_arr[...] = 0
# 对长字符串数组 str_arr_long 和 other_arr 进行反向转换
cast_back._simple_strided_call((str_arr_long, other_arr))
# 断言 orig_arr 和 other_arr 相等
assert_array_equal(orig_arr, other_arr)
# 测试字符串到字符串类型的强制转换是否可行
def test_string_to_string_cancast(self, other_dt, string_char):
# 将 other_dt 转换为 NumPy 的数据类型对象
other_dt = np.dtype(other_dt)
# 根据 string_char 确定 factor 值,用于计算预期长度
fact = 1 if string_char == "S" else 4
# 根据 other_dt 的字符类型确定 div 值,用于计算预期长度
div = 1 if other_dt.char == "S" else 4
# 确定 string_DT 为与 string_char 相对应的 NumPy 数据类型对象
string_DT = type(np.dtype(string_char))
# 获取从 other_dt 到 string_DT 的转换实现对象
cast = get_castingimpl(type(other_dt), string_DT)
# 计算预期的字符串长度
expected_length = other_dt.itemsize // div
# 创建一个新的字符串数据类型对象,长度为预期长度
string_dt = np.dtype(f"{string_char}{expected_length}")
# 调用转换实现对象的 _resolve_descriptors 方法,解析描述符以获取安全性、结果数据类型和视图偏移量
safety, (res_other_dt, res_dt), view_off = cast._resolve_descriptors(
(other_dt, None))
# 断言结果数据类型的字节大小符合预期长度乘以 factor
assert res_dt.itemsize == expected_length * fact
# 断言结果数据类型是预期的字符串数据类型对象
assert isinstance(res_dt, string_DT)
# 根据 other_dt 和 string_char 的值确定预期的安全性和视图偏移量
expected_view_off = None
if other_dt.char == string_char:
if other_dt.isnative:
expected_safety = Casting.no
expected_view_off = 0
else:
expected_safety = Casting.equiv
elif string_char == "U":
expected_safety = Casting.safe
else:
expected_safety = Casting.unsafe
# 断言实际的视图偏移量与预期的视图偏移量一致
assert view_off == expected_view_off
# 断言实际的安全性与预期的安全性一致
assert expected_safety == safety
# 遍历修改长度为[-1, 0, 1]的情况
for change_length in [-1, 0, 1]:
# 调用 self.string_with_modified_length 方法,修改 string_dt 的长度为 to_dt
to_dt = self.string_with_modified_length(string_dt, change_length)
# 再次调用转换实现对象的 _resolve_descriptors 方法,解析描述符以获取安全性、结果数据类型和视图偏移量
safety, (_, res_dt), view_off = cast._resolve_descriptors(
(other_dt, to_dt))
# 断言结果数据类型为预期的 to_dt
assert res_dt is to_dt
# 根据不同的 change_length 断言视图偏移量是否符合预期
if change_length <= 0:
assert view_off == expected_view_off
else:
assert view_off is None
# 根据预期的安全性断言实际的安全性
if expected_safety == Casting.unsafe:
assert safety == expected_safety
elif change_length < 0:
assert safety == Casting.same_kind
elif change_length == 0:
assert safety == expected_safety
elif change_length > 0:
assert safety == Casting.safe
@pytest.mark.parametrize("order1", [">", "<"])
@pytest.mark.parametrize("order2", [">", "<"])
def test_unicode_byteswapped_cast(self, order1, order2):
# 非常具体的测试,用于测试 Unicode 的字节交换,包括非对齐数组数据。
# 创建两种不同字节顺序的数据类型对象 dtype1 和 dtype2
dtype1 = np.dtype(f"{order1}U30")
dtype2 = np.dtype(f"{order2}U30")
# 创建两个未对齐的数组 data1 和 data2,将其视图设置为 dtype1 和 dtype2
data1 = np.empty(30 * 4 + 1, dtype=np.uint8)[1:].view(dtype1)
data2 = np.empty(30 * 4 + 1, dtype=np.uint8)[1:].view(dtype2)
# 如果 dtype1 的对齐方式不为 1,则进行断言检查
if dtype1.alignment != 1:
assert not data1.flags.aligned
assert not data2.flags.aligned
# 创建一个 Unicode 元素
element = "this is a ünicode string‽"
data1[()] = element
# 测试 data1 和 data1.copy()(应该是对齐的)两种情况
for data in [data1, data1.copy()]:
# 将 data1 的值复制给 data2,并断言它们的值相等
data2[...] = data1
assert data2[()] == element
assert data2.copy()[()] == element
# 测试空类型到字符串的特殊情况转换
def test_void_to_string_special_case(self):
# 测试空类型到字符串的特殊情况转换,这种情况可能可以转换为错误(与下面的 `test_object_to_parametric_internal_error` 进行比较)。
assert np.array([], dtype="V5").astype("S").dtype.itemsize == 5
assert np.array([], dtype="V5").astype("U").dtype.itemsize == 4 * 5
# 测试从对象到参数化类型的内部错误处理
def test_object_to_parametric_internal_error(self):
# 拒绝从对象到参数化类型的转换,需要先确定正确的实例。
object_dtype = type(np.dtype(object))
other_dtype = type(np.dtype(str))
cast = get_castingimpl(object_dtype, other_dtype)
with pytest.raises(TypeError,
match="casting from object to the parametric DType"):
cast._resolve_descriptors((np.dtype("O"), None))
# 使用简单的数据类型实例参数化测试对象和简单解析
@pytest.mark.parametrize("dtype", simple_dtype_instances())
def test_object_and_simple_resolution(self, dtype):
# 简单的测试,用于测试当没有指定实例时的转换
object_dtype = type(np.dtype(object))
cast = get_castingimpl(object_dtype, type(dtype))
safety, (_, res_dt), view_off = cast._resolve_descriptors(
(np.dtype("O"), dtype))
assert safety == Casting.unsafe
assert view_off is None
assert res_dt is dtype
safety, (_, res_dt), view_off = cast._resolve_descriptors(
(np.dtype("O"), None))
assert safety == Casting.unsafe
assert view_off is None
assert res_dt == dtype.newbyteorder("=")
# 使用简单的数据类型实例参数化测试简单到对象的解析
@pytest.mark.parametrize("dtype", simple_dtype_instances())
def test_simple_to_object_resolution(self, dtype):
# 简单的测试,用于测试当没有指定实例时从简单类型到对象的转换
object_dtype = type(np.dtype(object))
cast = get_castingimpl(type(dtype), object_dtype)
safety, (_, res_dt), view_off = cast._resolve_descriptors(
(dtype, None))
assert safety == Casting.safe
assert view_off is None
assert res_dt is np.dtype("O")
# 使用"no"或"unsafe"参数化测试空类型和带有子数组的结构化数组
@pytest.mark.parametrize("casting", ["no", "unsafe"])
def test_void_and_structured_with_subarray(self, casting):
# 对应于gh-19325的测试案例
dtype = np.dtype([("foo", "<f4", (3, 2))])
expected = casting == "unsafe"
assert np.can_cast("V4", dtype, casting=casting) == expected
assert np.can_cast(dtype, "V4", casting=casting) == expected
@pytest.mark.parametrize(["to_dt", "expected_off"],
[ # Same as `from_dt` but with both fields shifted:
(np.dtype({"names": ["a", "b"], "formats": ["i4", "f4"],
"offsets": [0, 4]}), 2),
# Additional change of the names
(np.dtype({"names": ["b", "a"], "formats": ["i4", "f4"],
"offsets": [0, 4]}), 2),
# Incompatible field offset change
(np.dtype({"names": ["b", "a"], "formats": ["i4", "f4"],
"offsets": [0, 6]}), None)])
def test_structured_field_offsets(self, to_dt, expected_off):
# This checks the cast-safety and view offset for swapped and "shifted"
# fields which are viewable
# Define the original structured data type with field names "a" and "b",
# integer and float formats, and specified offsets.
from_dt = np.dtype({"names": ["a", "b"],
"formats": ["i4", "f4"],
"offsets": [2, 6]})
# Obtain the casting implementation for converting from `from_dt` to `to_dt`.
cast = get_castingimpl(type(from_dt), type(to_dt))
# Resolve the casting descriptors and retrieve safety, ignored flags, and view offset.
safety, _, view_off = cast._resolve_descriptors((from_dt, to_dt))
# Assert the safety of the cast operation based on the equality of field names.
if from_dt.names == to_dt.names:
assert safety == Casting.equiv
else:
assert safety == Casting.safe
# Assert the expected view offset after shifting the original data pointer by -2 bytes.
# This ensures alignment by effectively adding 2 bytes of spacing before `from_dt`.
assert view_off == expected_off
@pytest.mark.parametrize(("from_dt", "to_dt", "expected_off"), [
# 使用 pytest 的参数化标记,定义测试参数和预期结果
# Subarray cases:
("i", "(1,1)i", 0), # 子数组情况,预期偏移量为0
("(1,1)i", "i", 0), # 子数组情况,预期偏移量为0
("(2,1)i", "(2,1)i", 0), # 子数组情况,预期偏移量为0
# field cases (field to field is tested explicitly also):
# 考虑字段到字段的情况(字段间也会被显式测试):
# 由于负偏移可能导致结构化 dtype 间接访问无效内存,因此不视为可查看
("i", dict(names=["a"], formats=["i"], offsets=[2]), None),
(dict(names=["a"], formats=["i"], offsets=[2]), "i", 2), # 字段到字段情况,预期偏移量为2
# 当前不视为可查看,因为存在多个字段,即使它们重叠(也许我们不应该允许这种情况?)
("i", dict(names=["a", "b"], formats=["i", "i"], offsets=[2, 2]), None),
# 不同数量的字段无法工作,应该直接失败,因此从不报告为“可查看”:
("i,i", "i,i,i", None),
# Unstructured void cases:
("i4", "V3", 0), # void较小或相等,预期偏移量为0
("i4", "V4", 0), # void较小或相等,预期偏移量为0
("i4", "V10", None), # void较大(无法查看)
("O", "V4", None), # 当前拒绝对象用于视图
("O", "V8", None), # 当前拒绝对象用于视图
("V4", "V3", 0), # void较小或相等,预期偏移量为0
("V4", "V4", 0), # void较小或相等,预期偏移量为0
("V3", "V4", None), # void较大(无法查看)
# 注意,当前的void到其他类型的转换通过字节字符串进行,并不是基于“视图”的转换方式,与反向方向不同:
("V4", "i4", None), # 完全无效/不可能的转换
("i,i", "i,i,i", None), # 完全无效/不可能的转换
])
def test_structured_view_offsets_paramteric(
self, from_dt, to_dt, expected_off):
# TODO: 虽然这个测试相当彻底,但现在它并没有真正测试一些可能具有非零偏移量的路径(它们实际上不存在)。
# 使用 pytest 的参数化测试方法,测试结构化视图的偏移量参数化情况
from_dt = np.dtype(from_dt)
to_dt = np.dtype(to_dt)
# 获取类型转换的实现
cast = get_castingimpl(type(from_dt), type(to_dt))
# 解析描述符以获取类型转换的偏移量
_, _, view_off = cast._resolve_descriptors((from_dt, to_dt))
# 断言视图偏移量是否符合预期
assert view_off == expected_off
@pytest.mark.parametrize("dtype", np.typecodes["All"])
# 测试对象类型转换中 NULL 和 None 的等效性
def test_object_casts_NULL_None_equivalence(self, dtype):
# None 转换为其他类型可能成功或失败,但 NULL 化的数组必须与填充了 None 的数组行为相同。
arr_normal = np.array([None] * 5) # 创建一个包含 None 的数组
arr_NULLs = np.empty_like(arr_normal) # 创建一个形状与 arr_normal 相同的空数组
ctypes.memset(arr_NULLs.ctypes.data, 0, arr_NULLs.nbytes) # 使用 ctypes 将 arr_NULLs 的内存数据置为 0
# 检查是否满足条件,如果失败(也许应该),测试就失去了其目的:
assert arr_NULLs.tobytes() == b"\x00" * arr_NULLs.nbytes
try:
expected = arr_normal.astype(dtype) # 尝试将 arr_normal 转换为指定的 dtype
except TypeError:
with pytest.raises(TypeError):
arr_NULLs.astype(dtype), # 如果出现 TypeError,预期会抛出异常
else:
assert_array_equal(expected, arr_NULLs.astype(dtype)) # 断言转换后的 arr_NULLs 与预期的数组相等
@pytest.mark.parametrize("dtype",
np.typecodes["AllInteger"] + np.typecodes["AllFloat"])
# 测试非标准布尔类型到其他类型的转换
def test_nonstandard_bool_to_other(self, dtype):
# 简单测试将 bool_ 类型转换为数值类型,不应暴露 NumPy 布尔值有时可以取除 0 和 1 之外的值的细节。参见也 gh-19514。
nonstandard_bools = np.array([0, 3, -7], dtype=np.int8).view(bool) # 创建一个非标准布尔数组
res = nonstandard_bools.astype(dtype) # 将非标准布尔数组转换为指定的 dtype
expected = [0, 1, 1]
assert_array_equal(res, expected) # 断言转换后的结果与预期结果相等
.\numpy\numpy\_core\tests\test_conversion_utils.py
"""
Tests for numpy/_core/src/multiarray/conversion_utils.c
"""
import re
import sys
import pytest
import numpy as np
import numpy._core._multiarray_tests as mt
from numpy._core.multiarray import CLIP, WRAP, RAISE
from numpy.testing import assert_warns, IS_PYPY
class StringConverterTestCase:
allow_bytes = True
case_insensitive = True
exact_match = False
warn = True
def _check_value_error(self, val):
pattern = r'\(got {}\)'.format(re.escape(repr(val)))
with pytest.raises(ValueError, match=pattern) as exc:
self.conv(val)
def _check_conv_assert_warn(self, val, expected):
if self.warn:
with assert_warns(DeprecationWarning) as exc:
assert self.conv(val) == expected
else:
assert self.conv(val) == expected
def _check(self, val, expected):
"""Takes valid non-deprecated inputs for converters,
runs converters on inputs, checks correctness of outputs,
warnings and errors"""
assert self.conv(val) == expected
if self.allow_bytes:
assert self.conv(val.encode('ascii')) == expected
else:
with pytest.raises(TypeError):
self.conv(val.encode('ascii'))
if len(val) != 1:
if self.exact_match:
self._check_value_error(val[:1])
self._check_value_error(val + '\0')
else:
self._check_conv_assert_warn(val[:1], expected)
if self.case_insensitive:
if val != val.lower():
self._check_conv_assert_warn(val.lower(), expected)
if val != val.upper():
self._check_conv_assert_warn(val.upper(), expected)
else:
if val != val.lower():
self._check_value_error(val.lower())
if val != val.upper():
self._check_value_error(val.upper())
def test_wrong_type(self):
with pytest.raises(TypeError):
self.conv({})
with pytest.raises(TypeError):
self.conv([])
def test_wrong_value(self):
self._check_value_error('')
self._check_value_error('\N{greek small letter pi}')
if self.allow_bytes:
self._check_value_error(b'')
self._check_value_error(b"\xFF")
if self.exact_match:
self._check_value_error("there's no way this is supported")
class TestByteorderConverter(StringConverterTestCase):
""" Tests of PyArray_ByteorderConverter """
conv = mt.run_byteorder_converter
warn = False
def test_valid(self):
for s in ['big', '>']:
self._check(s, 'NPY_BIG')
for s in ['little', '<']:
self._check(s, 'NPY_LITTLE')
for s in ['native', '=']:
self._check(s, 'NPY_NATIVE')
for s in ['ignore', '|']:
self._check(s, 'NPY_IGNORE')
for s in ['swap']:
self._check(s, 'NPY_SWAP')
class TestSortkindConverter(StringConverterTestCase):
""" Tests of PyArray_SortkindConverter """
conv = mt.run_sortkind_converter
warn = False
def test_valid(self):
self._check('quicksort', 'NPY_QUICKSORT')
self._check('heapsort', 'NPY_HEAPSORT')
self._check('mergesort', 'NPY_STABLESORT')
self._check('stable', 'NPY_STABLESORT')
class TestSelectkindConverter(StringConverterTestCase):
""" Tests of PyArray_SelectkindConverter """
conv = mt.run_selectkind_converter
case_insensitive = False
exact_match = True
def test_valid(self):
self._check('introselect', 'NPY_INTROSELECT')
class TestSearchsideConverter(StringConverterTestCase):
""" Tests of PyArray_SearchsideConverter """
conv = mt.run_searchside_converter
def test_valid(self):
self._check('left', 'NPY_SEARCHLEFT')
self._check('right', 'NPY_SEARCHRIGHT')
class TestOrderConverter(StringConverterTestCase):
""" Tests of PyArray_OrderConverter """
conv = mt.run_order_converter
warn = False
def test_valid(self):
self._check('c', 'NPY_CORDER')
self._check('f', 'NPY_FORTRANORDER')
self._check('a', 'NPY_ANYORDER')
self._check('k', 'NPY_KEEPORDER')
def test_flatten_invalid_order(self):
with pytest.raises(ValueError):
self.conv('Z')
for order in [False, True, 0, 8]:
with pytest.raises(TypeError):
self.conv(order)
class TestClipmodeConverter(StringConverterTestCase):
""" Tests of PyArray_ClipmodeConverter """
conv = mt.run_clipmode_converter
def test_valid(self):
self._check('clip', 'NPY_CLIP')
self._check('wrap', 'NPY_WRAP')
self._check('raise', 'NPY_RAISE')
assert self.conv(CLIP) == 'NPY_CLIP'
assert self.conv(WRAP) == 'NPY_WRAP'
assert self.conv(RAISE) == 'NPY_RAISE'
class TestCastingConverter(StringConverterTestCase):
""" Tests of PyArray_CastingConverter """
conv = mt.run_casting_converter
case_insensitive = False
exact_match = True
def test_valid(self):
self._check("no", "NPY_NO_CASTING")
self._check("equiv", "NPY_EQUIV_CASTING")
self._check("safe", "NPY_SAFE_CASTING")
self._check("same_kind", "NPY_SAME_KIND_CASTING")
self._check("unsafe", "NPY_UNSAFE_CASTING")
class TestIntpConverter:
""" Tests of PyArray_IntpConverter """
conv = mt.run_intp_converter
def test_basic(self):
assert self.conv(1) == (1,)
assert self.conv((1, 2)) == (1, 2)
assert self.conv([1, 2]) == (1, 2)
assert self.conv(()) == ()
def test_none(self):
with pytest.warns(DeprecationWarning):
assert self.conv(None) == ()
@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
reason="PyPy bug in error formatting")
def test_float(self):
with pytest.raises(TypeError):
self.conv(1.0)
with pytest.raises(TypeError):
self.conv([1, 1.0])
def test_too_large(self):
with pytest.raises(ValueError):
self.conv(2**64)
def test_too_many_dims(self):
assert self.conv([1]*64) == (1,)*64
with pytest.raises(ValueError):
self.conv([1]*65)
.\numpy\numpy\_core\tests\test_cpu_dispatcher.py
from numpy._core._multiarray_umath import (
__cpu_features__, __cpu_baseline__, __cpu_dispatch__
)
from numpy._core import _umath_tests
from numpy.testing import assert_equal
def test_dispatcher():
"""
Testing the utilities of the CPU dispatcher
"""
targets = (
"SSE2", "SSE41", "AVX2",
"VSX", "VSX2", "VSX3",
"NEON", "ASIMD", "ASIMDHP",
"VX", "VXE"
)
highest_sfx = ""
all_sfx = []
for feature in reversed(targets):
if feature in __cpu_baseline__:
continue
if feature not in __cpu_dispatch__ or not __cpu_features__[feature]:
continue
if not highest_sfx:
highest_sfx = "_" + feature
all_sfx.append("func" + "_" + feature)
test = _umath_tests.test_dispatch()
assert_equal(test["func"], "func" + highest_sfx)
assert_equal(test["var"], "var" + highest_sfx)
if highest_sfx:
assert_equal(test["func_xb"], "func" + highest_sfx)
assert_equal(test["var_xb"], "var" + highest_sfx)
else:
assert_equal(test["func_xb"], "nobase")
assert_equal(test["var_xb"], "nobase")
all_sfx.append("func")
assert_equal(test["all"], all_sfx)
.\numpy\numpy\_core\tests\test_cpu_features.py
import sys, platform, re, pytest
from numpy._core._multiarray_umath import (
__cpu_features__,
__cpu_baseline__,
__cpu_dispatch__,
)
import numpy as np
import subprocess
import pathlib
import os
def assert_features_equal(actual, desired, fname):
__tracebackhide__ = True
actual, desired = str(actual), str(desired)
if actual == desired:
return
detected = str(__cpu_features__).replace("'", "")
try:
with open("/proc/cpuinfo") as fd:
cpuinfo = fd.read(2048)
except Exception as err:
cpuinfo = str(err)
try:
import subprocess
auxv = subprocess.check_output(['/bin/true'], env=dict(LD_SHOW_AUXV="1"))
auxv = auxv.decode()
except Exception as err:
auxv = str(err)
import textwrap
error_report = textwrap.indent(
"""
###########################################
### Extra debugging information
###########################################
-------------------------------------------
--- NumPy Detections
-------------------------------------------
%s
-------------------------------------------
--- SYS / CPUINFO
-------------------------------------------
%s....
-------------------------------------------
--- SYS / AUXV
-------------------------------------------
%s
""" % (detected, cpuinfo, auxv), prefix='\r')
raise AssertionError((
"Failure Detection\n"
" NAME: '%s'\n"
" ACTUAL: %s\n"
" DESIRED: %s\n"
"%s"
) % (fname, actual, desired, error_report))
def _text_to_list(txt):
out = txt.strip("][\n").replace("'", "").split(', ')
return None if out[0] == "" else out
class AbstractTest:
features = []
features_groups = {}
features_map = {}
features_flags = set()
def load_flags(self):
pass
def test_features(self):
self.load_flags()
for gname, features in self.features_groups.items():
test_features = [self.cpu_have(f) for f in features]
assert_features_equal(__cpu_features__.get(gname), all(test_features), gname)
for feature_name in self.features:
cpu_have = self.cpu_have(feature_name)
npy_have = __cpu_features__.get(feature_name)
assert_features_equal(npy_have, cpu_have, feature_name)
def cpu_have(self, feature_name):
map_names = self.features_map.get(feature_name, feature_name)
if isinstance(map_names, str):
return map_names in self.features_flags
for f in map_names:
if f in self.features_flags:
return True
return False
def load_flags_cpuinfo(self, magic_key):
self.features_flags = self.get_cpuinfo_item(magic_key)
def get_cpuinfo_item(self, magic_key):
values = set()
with open('/proc/cpuinfo') as fd:
for line in fd:
if not line.startswith(magic_key):
continue
flags_value = [s.strip() for s in line.split(':', 1)]
if len(flags_value) == 2:
values = values.union(flags_value[1].upper().split())
return values
def load_flags_auxv(self):
auxv = subprocess.check_output(['/bin/true'], env=dict(LD_SHOW_AUXV="1"))
for at in auxv.split(b'\n'):
if not at.startswith(b"AT_HWCAP"):
continue
hwcap_value = [s.strip() for s in at.split(b':', 1)]
if len(hwcap_value) == 2:
self.features_flags = self.features_flags.union(
hwcap_value[1].upper().decode().split()
)
@pytest.mark.skipif(
sys.platform == 'emscripten',
reason=(
"The subprocess module is not available on WASM platforms and"
" therefore this test class cannot be properly executed."
),
)
class TestEnvPrivation:
cwd = pathlib.Path(__file__).parent.resolve()
env = os.environ.copy()
_enable = os.environ.pop('NPY_ENABLE_CPU_FEATURES', None)
_disable = os.environ.pop('NPY_DISABLE_CPU_FEATURES', None)
SUBPROCESS_ARGS = dict(cwd=cwd, capture_output=True, text=True, check=True)
unavailable_feats = [
feat for feat in __cpu_dispatch__ if not __cpu_features__[feat]
]
UNAVAILABLE_FEAT = (
None if len(unavailable_feats) == 0
else unavailable_feats[0]
)
BASELINE_FEAT = None if len(__cpu_baseline__) == 0 else __cpu_baseline__[0]
SCRIPT = """
def main():
from numpy._core._multiarray_umath import (
__cpu_features__,
__cpu_dispatch__
)
detected = [feat for feat in __cpu_dispatch__ if __cpu_features__[feat]]
print(detected)
if __name__ == "__main__":
main()
"""
@pytest.fixture(autouse=True)
def setup_class(self, tmp_path_factory):
file = tmp_path_factory.mktemp("runtime_test_script")
file /= "_runtime_detect.py"
file.write_text(self.SCRIPT)
self.file = file
return
def _run(self):
return subprocess.run(
[sys.executable, self.file],
env=self.env,
**self.SUBPROCESS_ARGS,
)
def _expect_error(
self,
msg,
err_type,
no_error_msg="Failed to generate error"
):
try:
self._run()
except subprocess.CalledProcessError as e:
assertion_message = f"Expected: {msg}\nGot: {e.stderr}"
assert re.search(msg, e.stderr), assertion_message
assertion_message = (
f"Expected error of type: {err_type}; see full "
f"error:\n{e.stderr}"
)
assert re.search(err_type, e.stderr), assertion_message
else:
assert False, no_error_msg
def setup_method(self):
"""确保环境变量被重置"""
self.env = os.environ.copy()
return
def test_runtime_feature_selection(self):
"""
Ensure that when selecting `NPY_ENABLE_CPU_FEATURES`, only the
features exactly specified are dispatched.
"""
out = self._run()
non_baseline_features = _text_to_list(out.stdout)
if non_baseline_features is None:
pytest.skip(
"No dispatchable features outside of baseline detected."
)
feature = non_baseline_features[0]
self.env['NPY_ENABLE_CPU_FEATURES'] = feature
out = self._run()
enabled_features = _text_to_list(out.stdout)
assert set(enabled_features) == {feature}
if len(non_baseline_features) < 2:
pytest.skip("Only one non-baseline feature detected.")
self.env['NPY_ENABLE_CPU_FEATURES'] = ",".join(non_baseline_features)
out = self._run()
enabled_features = _text_to_list(out.stdout)
assert set(enabled_features) == set(non_baseline_features)
return
@pytest.mark.parametrize("enabled, disabled",
[
("feature", "feature"),
("feature", "same"),
])
def test_both_enable_disable_set(self, enabled, disabled):
"""
Ensure that when both environment variables are set then an
ImportError is thrown
"""
self.env['NPY_ENABLE_CPU_FEATURES'] = enabled
self.env['NPY_DISABLE_CPU_FEATURES'] = disabled
msg = "Both NPY_DISABLE_CPU_FEATURES and NPY_ENABLE_CPU_FEATURES"
err_type = "ImportError"
self._expect_error(msg, err_type)
@pytest.mark.skipif(
not __cpu_dispatch__,
reason=(
"NPY_*_CPU_FEATURES only parsed if "
"`__cpu_dispatch__` is non-empty"
)
)
@pytest.mark.parametrize("action", ["ENABLE", "DISABLE"])
def test_variable_too_long(self, action):
"""
Test that an error is thrown if the environment variables are too long
to be processed. Current limit is 1024, but this may change later.
"""
MAX_VAR_LENGTH = 1024
self.env[f'NPY_{action}_CPU_FEATURES'] = "t" * MAX_VAR_LENGTH
msg = (
f"Length of environment variable 'NPY_{action}_CPU_FEATURES' is "
f"{MAX_VAR_LENGTH + 1}, only {MAX_VAR_LENGTH} accepted"
)
err_type = "RuntimeError"
self._expect_error(msg, err_type)
@pytest.mark.skipif(
not __cpu_dispatch__,
reason=(
"NPY_*_CPU_FEATURES only parsed if "
"`__cpu_dispatch__` is non-empty"
)
)
def test_impossible_feature_disable(self):
"""
Test that a RuntimeError is thrown if an impossible feature-disabling
request is made. This includes disabling a baseline feature.
"""
if self.BASELINE_FEAT is None:
pytest.skip("There are no unavailable features to test with")
bad_feature = self.BASELINE_FEAT
self.env['NPY_DISABLE_CPU_FEATURES'] = bad_feature
msg = (
f"You cannot disable CPU feature '{bad_feature}', since it is "
"part of the baseline optimizations"
)
err_type = "RuntimeError"
self._expect_error(msg, err_type)
def test_impossible_feature_enable(self):
"""
Test that a RuntimeError is thrown if an impossible feature-enabling
request is made. This includes enabling a feature not supported by the
machine, or disabling a baseline optimization.
"""
if self.UNAVAILABLE_FEAT is None:
pytest.skip("There are no unavailable features to test with")
bad_feature = self.UNAVAILABLE_FEAT
self.env['NPY_ENABLE_CPU_FEATURES'] = bad_feature
msg = (
f"You cannot enable CPU features \\({bad_feature}\\), since "
"they are not supported by your machine."
)
err_type = "RuntimeError"
self._expect_error(msg, err_type)
feats = f"{bad_feature}, Foobar"
self.env['NPY_ENABLE_CPU_FEATURES'] = feats
msg = (
f"You cannot enable CPU features \\({bad_feature}\\), since they "
"are not supported by your machine."
)
self._expect_error(msg, err_type)
if self.BASELINE_FEAT is not None:
feats = f"{bad_feature}, {self.BASELINE_FEAT}"
self.env['NPY_ENABLE_CPU_FEATURES'] = feats
msg = (
f"You cannot enable CPU features \\({bad_feature}\\), since "
"they are not supported by your machine."
)
self._expect_error(msg, err_type)
is_linux = sys.platform.startswith('linux')
is_cygwin = sys.platform.startswith('cygwin')
machine = platform.machine()
is_x86 = re.match("^(amd64|x86|i386|i686)", machine, re.IGNORECASE)
@pytest.mark.skipif(
not (is_linux or is_cygwin) or not is_x86, reason="Only for Linux and x86"
)
class Test_X86_Features(AbstractTest):
features = [
"MMX", "SSE", "SSE2", "SSE3", "SSSE3", "SSE41", "POPCNT", "SSE42",
"AVX", "F16C", "XOP", "FMA4", "FMA3", "AVX2", "AVX512F", "AVX512CD",
"AVX512ER", "AVX512PF", "AVX5124FMAPS", "AVX5124VNNIW", "AVX512VPOPCNTDQ",
"AVX512VL", "AVX512BW", "AVX512DQ", "AVX512VNNI", "AVX512IFMA",
"AVX512VBMI", "AVX512VBMI2", "AVX512BITALG", "AVX512FP16",
]
features_groups = dict(
AVX512_KNL=["AVX512F", "AVX512CD", "AVX512ER", "AVX512PF"],
AVX512_KNM=["AVX512F", "AVX512CD", "AVX512ER", "AVX512PF", "AVX5124FMAPS",
"AVX5124VNNIW", "AVX512VPOPCNTDQ"],
AVX512_SKX=["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL"],
AVX512_CLX=["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512VNNI"],
AVX512_CNL=["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512IFMA",
"AVX512VBMI"],
AVX512_ICL=["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512IFMA",
"AVX512VBMI", "AVX512VNNI", "AVX512VBMI2", "AVX512BITALG", "AVX512VPOPCNTDQ"],
AVX512_SPR=["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ",
"AVX512VL", "AVX512IFMA", "AVX512VBMI", "AVX512VNNI",
"AVX512VBMI2", "AVX512BITALG", "AVX512VPOPCNTDQ",
"AVX512FP16"],
)
features_map = dict(
SSE3="PNI", SSE41="SSE4_1", SSE42="SSE4_2", FMA3="FMA",
AVX512VNNI="AVX512_VNNI", AVX512BITALG="AVX512_BITALG", AVX512VBMI2="AVX512_VBMI2",
AVX5124FMAPS="AVX512_4FMAPS", AVX5124VNNIW="AVX512_4VNNIW", AVX512VPOPCNTDQ="AVX512_VPOPCNTDQ",
AVX512FP16="AVX512_FP16",
)
def load_flags(self):
self.load_flags_cpuinfo("flags")
is_power = re.match("^(powerpc|ppc)64", machine, re.IGNORECASE)
@pytest.mark.skipif(not is_linux or not is_power, reason="Only for Linux and Power")
class Test_POWER_Features(AbstractTest):
features = ["VSX", "VSX2", "VSX3", "VSX4"]
features_map = dict(VSX2="ARCH_2_07", VSX3="ARCH_3_00", VSX4="ARCH_3_1")
def load_flags(self):
self.load_flags_auxv()
is_zarch = re.match("^(s390x)", machine, re.IGNORECASE)
@pytest.mark.skipif(not is_linux or not is_zarch,
reason="Only for Linux and IBM Z")
class Test_ZARCH_Features(AbstractTest):
features = ["VX", "VXE", "VXE2"]
def load_flags(self):
self.load_flags_auxv()
is_arm = re.match("^(arm|aarch64)", machine, re.IGNORECASE)
@pytest.mark.skipif(not is_linux or not is_arm, reason="Only for Linux and ARM")
class Test_ARM_Features(AbstractTest):
features = [
"SVE", "NEON", "ASIMD", "FPHP", "ASIMDHP", "ASIMDDP", "ASIMDFHM"
]
features_groups = dict(
NEON_FP16 = ["NEON", "HALF"],
NEON_VFPV4 = ["NEON", "VFPV4"],
)
def load_flags(self):
self.load_flags_cpuinfo("Features")
arch = self.get_cpuinfo_item("CPU architecture")
is_rootfs_v8 = int('0'+next(iter(arch))) > 7 if arch else 0
if re.match("^(aarch64|AARCH64)", machine) or is_rootfs_v8:
self.features_map = dict(
NEON="ASIMD", HALF="ASIMD", VFPV4="ASIMD"
)
else:
self.features_map = dict(
ASIMD=("AES", "SHA1", "SHA2", "PMULL", "CRC32")
)
.\numpy\numpy\_core\tests\test_custom_dtypes.py
import sys
from tempfile import NamedTemporaryFile
import pytest
import numpy as np
from numpy.testing import assert_array_equal
from numpy._core._multiarray_umath import (
_discover_array_parameters as discover_array_params, _get_sfloat_dtype)
SF = _get_sfloat_dtype()
class TestSFloat:
def _get_array(self, scaling, aligned=True):
if not aligned:
a = np.empty(3*8 + 1, dtype=np.uint8)[1:]
a = a.view(np.float64)
a[:] = [1., 2., 3.]
else:
a = np.array([1., 2., 3.])
a *= 1./scaling
return a.view(SF(scaling))
def test_sfloat_rescaled(self):
sf = SF(1.)
sf2 = sf.scaled_by(2.)
assert sf2.get_scaling() == 2.
sf6 = sf2.scaled_by(3.)
assert sf6.get_scaling() == 6.
def test_class_discovery(self):
dt, _ = discover_array_params([1., 2., 3.], dtype=SF)
assert dt == SF(1.)
@pytest.mark.parametrize("scaling", [1., -1., 2.])
def test_scaled_float_from_floats(self, scaling):
a = np.array([1., 2., 3.], dtype=SF(scaling))
assert a.dtype.get_scaling() == scaling
assert_array_equal(scaling * a.view(np.float64), [1., 2., 3.])
def test_repr(self):
assert repr(SF(scaling=1.)) == "_ScaledFloatTestDType(scaling=1.0)"
def test_dtype_name(self):
assert SF(1.).name == "_ScaledFloatTestDType64"
def test_sfloat_structured_dtype_printing(self):
dt = np.dtype([("id", int), ("value", SF(0.5))])
assert "('value', '_ScaledFloatTestDType64')" in repr(dt)
@pytest.mark.parametrize("scaling", [1., -1., 2.])
def test_sfloat_from_float(self, scaling):
a = np.array([1., 2., 3.]).astype(dtype=SF(scaling))
assert a.dtype.get_scaling() == scaling
assert_array_equal(scaling * a.view(np.float64), [1., 2., 3.])
@pytest.mark.parametrize("aligned", [True, False])
@pytest.mark.parametrize("scaling", [1., -1., 2.])
def test_sfloat_getitem(self, aligned, scaling):
a = self._get_array(1., aligned)
assert a.tolist() == [1., 2., 3.]
@pytest.mark.parametrize("aligned", [True, False])
def test_sfloat_casts(self, aligned):
a = self._get_array(1., aligned)
assert np.can_cast(a, SF(-1.), casting="equiv")
assert not np.can_cast(a, SF(-1.), casting="no")
na = a.astype(SF(-1.))
assert_array_equal(-1 * na.view(np.float64), a.view(np.float64))
assert np.can_cast(a, SF(2.), casting="same_kind")
assert not np.can_cast(a, SF(2.), casting="safe")
a2 = a.astype(SF(2.))
assert_array_equal(2 * a2.view(np.float64), a.view(np.float64))
@pytest.mark.parametrize("aligned", [True, False])
def test_sfloat_cast_internal_errors(self, aligned):
a = self._get_array(2e300, aligned)
with pytest.raises(TypeError,
match="error raised inside the core-loop: non-finite factor!"):
a.astype(SF(2e-300))
def test_sfloat_promotion(self):
assert np.result_type(SF(2.), SF(3.)) == SF(3.)
assert np.result_type(SF(3.), SF(2.)) == SF(3.)
assert np.result_type(SF(3.), np.float64) == SF(3.)
assert np.result_type(np.float64, SF(0.5)) == SF(1.)
with pytest.raises(TypeError):
np.result_type(SF(1.), np.int64)
def test_basic_multiply(self):
a = self._get_array(2.)
b = self._get_array(4.)
res = a * b
assert res.dtype.get_scaling() == 8.
expected_view = a.view(np.float64) * b.view(np.float64)
assert_array_equal(res.view(np.float64), expected_view)
def test_possible_and_impossible_reduce(self):
a = self._get_array(2.)
res = np.add.reduce(a, initial=0.)
assert res == a.astype(np.float64).sum()
with pytest.raises(TypeError,
match="the resolved dtypes are not compatible"):
np.multiply.reduce(a)
def test_basic_ufunc_at(self):
float_a = np.array([1., 2., 3.])
b = self._get_array(2.)
float_b = b.view(np.float64).copy()
np.multiply.at(float_b, [1, 1, 1], float_a)
np.multiply.at(b, [1, 1, 1], float_a)
assert_array_equal(b.view(np.float64), float_b)
def test_basic_multiply_promotion(self):
float_a = np.array([1., 2., 3.])
b = self._get_array(2.)
res1 = float_a * b
res2 = b * float_a
assert res1.dtype == res2.dtype == b.dtype
expected_view = float_a * b.view(np.float64)
assert_array_equal(res1.view(np.float64), expected_view)
assert_array_equal(res2.view(np.float64), expected_view)
np.multiply(b, float_a, out=res2)
with pytest.raises(TypeError):
np.multiply(b, float_a, out=np.arange(3))
def test_basic_addition(self):
a = self._get_array(2.)
b = self._get_array(4.)
res = a + b
assert res.dtype == np.result_type(a.dtype, b.dtype)
expected_view = (a.astype(res.dtype).view(np.float64) +
b.astype(res.dtype).view(np.float64))
assert_array_equal(res.view(np.float64), expected_view)
def test_addition_cast_safety(self):
"""The addition method is special for the scaled float, because it
includes the "cast" between different factors, thus cast-safety
is influenced by the implementation.
"""
a = self._get_array(2.)
b = self._get_array(-2.)
c = self._get_array(3.)
np.add(a, b, casting="equiv")
with pytest.raises(TypeError):
np.add(a, b, casting="no")
with pytest.raises(TypeError):
np.add(a, c, casting="safe")
with pytest.raises(TypeError):
np.add(a, a, out=c, casting="safe")
@pytest.mark.parametrize("ufunc",
[np.logical_and, np.logical_or, np.logical_xor])
def test_logical_ufuncs_casts_to_bool(self, ufunc):
a = self._get_array(2.)
a[0] = 0.
float_equiv = a.astype(float)
expected = ufunc(float_equiv, float_equiv)
res = ufunc(a, a)
assert_array_equal(res, expected)
expected = ufunc.reduce(float_equiv)
res = ufunc.reduce(a)
assert_array_equal(res, expected)
with pytest.raises(TypeError):
ufunc(a, a, out=np.empty(a.shape, dtype=int), casting="equiv")
def test_wrapped_and_wrapped_reductions(self):
a = self._get_array(2.)
float_equiv = a.astype(float)
expected = np.hypot(float_equiv, float_equiv)
res = np.hypot(a, a)
assert res.dtype == a.dtype
res_float = res.view(np.float64) * 2
assert_array_equal(res_float, expected)
res = np.hypot.reduce(a, keepdims=True)
assert res.dtype == a.dtype
expected = np.hypot.reduce(float_equiv, keepdims=True)
assert res.view(np.float64) * 2 == expected
def test_astype_class(self):
arr = np.array([1., 2., 3.], dtype=object)
res = arr.astype(SF)
expected = arr.astype(SF(1.))
assert_array_equal(res.view(np.float64), expected.view(np.float64))
def test_creation_class(self):
arr1 = np.array([1., 2., 3.], dtype=SF)
assert arr1.dtype == SF(1.)
arr2 = np.array([1., 2., 3.], dtype=SF(1.))
assert_array_equal(arr1.view(np.float64), arr2.view(np.float64))
assert arr1.dtype == arr2.dtype
assert np.empty(3, dtype=SF).dtype == SF(1.)
assert np.empty_like(arr1, dtype=SF).dtype == SF(1.)
assert np.zeros(3, dtype=SF).dtype == SF(1.)
assert np.zeros_like(arr1, dtype=SF).dtype == SF(1.)
def test_np_save_load(self):
np._ScaledFloatTestDType = SF
arr = np.array([1.0, 2.0, 3.0], dtype=SF(1.0))
with NamedTemporaryFile("wb", delete=False, suffix=".npz") as f:
with pytest.warns(UserWarning) as record:
np.savez(f.name, arr)
assert len(record) == 1
with np.load(f.name, allow_pickle=True) as data:
larr = data["arr_0"]
assert_array_equal(arr.view(np.float64), larr.view(np.float64))
assert larr.dtype == arr.dtype == SF(1.0)
del np._ScaledFloatTestDType
def test_flatiter(self):
arr = np.array([1.0, 2.0, 3.0], dtype=SF(1.0))
for i, val in enumerate(arr.flat):
assert arr[i] == val
@pytest.mark.parametrize(
"index", [
[1, 2], ..., slice(None, 2, None),
np.array([True, True, False]), np.array([0, 1])
], ids=["int_list", "ellipsis", "slice", "bool_array", "int_array"])
def test_flatiter_index(self, index):
arr = np.array([1.0, 2.0, 3.0], dtype=SF(1.0))
np.testing.assert_array_equal(
arr[index].view(np.float64), arr.flat[index].view(np.float64))
arr2 = arr.copy()
arr[index] = 5.0
arr2.flat[index] = 5.0
np.testing.assert_array_equal(
arr.view(np.float64), arr2.view(np.float64))
def test_type_pickle():
import pickle
np._ScaledFloatTestDType = SF
s = pickle.dumps(SF)
res = pickle.loads(s)
assert res is SF
del np._ScaledFloatTestDType
def test_is_numeric():
assert SF._is_numeric
.\numpy\numpy\_core\tests\test_cython.py
from datetime import datetime
import os
import shutil
import subprocess
import sys
import time
import pytest
import numpy as np
from numpy.testing import assert_array_equal, IS_WASM, IS_EDITABLE
try:
import cython
from Cython.Compiler.Version import version as cython_version
except ImportError:
cython = None
else:
from numpy._utils import _pep440
required_version = "3.0.6"
if _pep440.parse(cython_version) < _pep440.Version(required_version):
cython = None
pytestmark = pytest.mark.skipif(cython is None, reason="requires cython")
if IS_EDITABLE:
pytest.skip(
"Editable install doesn't support tests with a compile step",
allow_module_level=True
)
@pytest.fixture(scope='module')
def install_temp(tmpdir_factory):
if IS_WASM:
pytest.skip("No subprocess")
srcdir = os.path.join(os.path.dirname(__file__), 'examples', 'cython')
build_dir = tmpdir_factory.mktemp("cython_test") / "build"
os.makedirs(build_dir, exist_ok=True)
native_file = str(build_dir / 'interpreter-native-file.ini')
with open(native_file, 'w') as f:
f.write("[binaries]\n")
f.write(f"python = '{sys.executable}'")
try:
subprocess.check_call(["meson", "--version"])
except FileNotFoundError:
pytest.skip("No usable 'meson' found")
if sys.platform == "win32":
subprocess.check_call(["meson", "setup",
"--buildtype=release",
"--vsenv", "--native-file", native_file,
str(srcdir)],
cwd=build_dir,
)
else:
subprocess.check_call(["meson", "setup",
"--native-file", native_file, str(srcdir)],
cwd=build_dir
)
try:
subprocess.check_call(["meson", "compile", "-vv"], cwd=build_dir)
except subprocess.CalledProcessError:
print("----------------")
print("meson build failed when doing")
print(f"'meson setup --native-file {native_file} {srcdir}'")
print(f"'meson compile -vv'")
print(f"in {build_dir}")
print("----------------")
raise
sys.path.append(str(build_dir))
def test_is_timedelta64_object(install_temp):
import checks
assert checks.is_td64(np.timedelta64(1234))
assert checks.is_td64(np.timedelta64(1234, "ns"))
assert checks.is_td64(np.timedelta64("NaT", "ns"))
assert not checks.is_td64(1)
assert not checks.is_td64(None)
assert not checks.is_td64("foo")
assert not checks.is_td64(np.datetime64("now", "s"))
def test_is_datetime64_object(install_temp):
import checks
assert checks.is_dt64(np.datetime64(1234, "ns"))
assert checks.is_dt64(np.datetime64("NaT", "ns"))
assert not checks.is_dt64(1)
assert not checks.is_dt64(None)
assert not checks.is_dt64("foo")
assert not checks.is_dt64(np.timedelta64(1234))
def test_get_datetime64_value(install_temp):
import checks
dt64 = np.datetime64("2016-01-01", "ns")
result = checks.get_dt64_value(dt64)
expected = dt64.view("i8")
assert result == expected
def test_get_timedelta64_value(install_temp):
import checks
td64 = np.timedelta64(12345, "h")
result = checks.get_td64_value(td64)
expected = td64.view("i8")
assert result == expected
def test_get_datetime64_unit(install_temp):
import checks
dt64 = np.datetime64("2016-01-01", "ns")
result = checks.get_dt64_unit(dt64)
expected = 10
assert result == expected
td64 = np.timedelta64(12345, "h")
result = checks.get_dt64_unit(td64)
expected = 5
assert result == expected
def test_abstract_scalars(install_temp):
import checks
assert checks.is_integer(1)
assert checks.is_integer(np.int8(1))
assert checks.is_integer(np.uint64(1))
def test_default_int(install_temp):
import checks
assert checks.get_default_integer() is np.dtype(int)
def test_convert_datetime64_to_datetimestruct(install_temp):
import checks
res = checks.convert_datetime64_to_datetimestruct()
exp = {
"year": 2022,
"month": 3,
"day": 15,
"hour": 20,
"min": 1,
"sec": 55,
"us": 260292,
"ps": 0,
"as": 0,
}
assert res == exp
class TestDatetimeStrings:
def test_make_iso_8601_datetime(self, install_temp):
import checks
dt = datetime(2016, 6, 2, 10, 45, 19)
result = checks.make_iso_8601_datetime(dt)
assert result == b"2016-06-02T10:45:19"
def test_get_datetime_iso_8601_strlen(self, install_temp):
import checks
res = checks.get_datetime_iso_8601_strlen()
assert res == 48
@pytest.mark.parametrize(
"arrays",
[
[np.random.rand(2)],
[np.random.rand(2), np.random.rand(3, 1)],
[np.random.rand(2), np.random.rand(2, 3, 2), np.random.rand(1, 3, 2)],
[np.random.rand(2, 1)] * 4 + [np.random.rand(1, 1, 1)],
]
)
def test_multiiter_fields(install_temp, arrays):
import checks
bcast = np.broadcast(*arrays)
assert bcast.ndim == checks.get_multiiter_number_of_dims(bcast)
assert bcast.size == checks.get_multiiter_size(bcast)
assert bcast.numiter == checks.get_multiiter_num_of_iterators(bcast)
assert bcast.shape == checks.get_multiiter_shape(bcast)
assert bcast.index == checks.get_multiiter_current_index(bcast)
assert all(
[
x.base is y.base
for x, y in zip(bcast.iters, checks.get_multiiter_iters(bcast))
]
)
def test_dtype_flags(install_temp):
import checks
dtype = np.dtype("i,O")
assert dtype.flags == checks.get_dtype_flags(dtype)
def test_conv_intp(install_temp):
import checks
class myint:
def __int__(self):
return 3
assert checks.conv_intp(3.) == 3
assert checks.conv_intp(myint()) == 3
def test_npyiter_api(install_temp):
import checks
arr = np.random.rand(3, 2)
it = np.nditer(arr)
assert checks.get_npyiter_size(it) == it.itersize == np.prod(arr.shape)
assert checks.get_npyiter_ndim(it) == it.ndim == 1
assert checks.npyiter_has_index(it) == it.has_index == False
it = np.nditer(arr, flags=["c_index"])
assert checks.npyiter_has_index(it) == it.has_index == True
assert (
checks.npyiter_has_delayed_bufalloc(it)
== it.has_delayed_bufalloc
== False
)
it = np.nditer(arr, flags=["buffered", "delay_bufalloc"])
assert (
checks.npyiter_has_delayed_bufalloc(it)
== it.has_delayed_bufalloc
== True
)
it = np.nditer(arr, flags=["multi_index"])
assert checks.get_npyiter_size(it) == it.itersize == np.prod(arr.shape)
assert checks.npyiter_has_multi_index(it) == it.has_multi_index == True
assert checks.get_npyiter_ndim(it) == it.ndim == 2
arr2 = np.random.rand(2, 1, 2)
it = np.nditer([arr, arr2])
assert checks.get_npyiter_nop(it) == it.nop == 2
assert checks.get_npyiter_size(it) == it.itersize == 12
assert checks.get_npyiter_ndim(it) == it.ndim == 3
assert all(
x is y for x, y in zip(checks.get_npyiter_operands(it), it.operands)
)
assert all(
[
np.allclose(x, y)
for x, y in zip(checks.get_npyiter_itviews(it), it.itviews)
]
)
def test_fillwithbytes(install_temp):
import checks
arr = checks.compile_fillwithbyte()
assert_array_equal(arr, np.ones((1, 2)))
def test_complex(install_temp):
from checks import inc2_cfloat_struct
arr = np.array([0, 10+10j], dtype="F")
inc2_cfloat_struct(arr)
assert arr[1] == (12 + 12j)
.\numpy\numpy\_core\tests\test_datetime.py
import datetime
import pickle
import pytest
import numpy
import numpy as np
from numpy.testing import (
IS_WASM,
assert_,
assert_equal,
assert_raises,
assert_warns,
suppress_warnings,
assert_raises_regex,
assert_array_equal,
)
try:
from pytz import timezone as tz
_has_pytz = True
except ImportError:
_has_pytz = False
try:
RecursionError
except NameError:
RecursionError = RuntimeError
class TestDateTime:
def test_string(self):
msg = "no explicit representation of timezones available for np.datetime64"
with pytest.warns(UserWarning, match=msg):
np.datetime64('2000-01-01T00+01')
def test_datetime(self):
msg = "no explicit representation of timezones available for np.datetime64"
with pytest.warns(UserWarning, match=msg):
t0 = np.datetime64('2023-06-09T12:18:40Z', 'ns')
t0 = np.datetime64('2023-06-09T12:18:40', 'ns')
def test_datetime_dtype_creation(self):
for unit in ['Y', 'M', 'W', 'D',
'h', 'm', 's', 'ms', 'us',
'μs',
'ns', 'ps', 'fs', 'as']:
dt1 = np.dtype('M8[750%s]' % unit)
assert_(dt1 == np.dtype('datetime64[750%s]' % unit))
dt2 = np.dtype('m8[%s]' % unit)
assert_(dt2 == np.dtype('timedelta64[%s]' % unit))
assert_equal(str(np.dtype("M8")), "datetime64")
assert_equal(np.dtype("=M8"), np.dtype("M8"))
assert_equal(np.dtype("=M8[s]"), np.dtype("M8[s]"))
assert_(np.dtype(">M8") == np.dtype("M8") or
np.dtype("<M8") == np.dtype("M8"))
assert_(np.dtype(">M8[D]") == np.dtype("M8[D]") or
np.dtype("<M8[D]") == np.dtype("M8[D]"))
assert_(np.dtype(">M8") != np.dtype("<M8"))
assert_equal(np.dtype("=m8"), np.dtype("m8"))
assert_equal(np.dtype("=m8[s]"), np.dtype("m8[s]"))
assert_(np.dtype(">m8") == np.dtype("m8") or
np.dtype("<m8") == np.dtype("m8"))
assert_(np.dtype(">m8[D]") == np.dtype("m8[D]") or
np.dtype("<m8[D]") == np.dtype("m8[D]"))
assert_(np.dtype(">m8") != np.dtype("<m8"))
assert_raises(TypeError, np.dtype, 'M8[badunit]')
assert_raises(TypeError, np.dtype, 'm8[badunit]')
assert_raises(TypeError, np.dtype, 'M8[YY]')
assert_raises(TypeError, np.dtype, 'm8[YY]')
assert_raises(TypeError, np.dtype, 'm4')
assert_raises(TypeError, np.dtype, 'M7')
assert_raises(TypeError, np.dtype, 'm7')
assert_raises(TypeError, np.dtype, 'M16')
assert_raises(TypeError, np.dtype, 'm16')
assert_raises(TypeError, np.dtype, 'M8[3000000000ps]')
def test_datetime_prefix_conversions(self):
smaller_units = ['M8[7000ms]',
'M8[2000us]',
'M8[1000ns]',
'M8[5000ns]',
'M8[2000ps]',
'M8[9000fs]',
'M8[1000as]',
'M8[2000000ps]',
'M8[1000000as]',
'M8[2000000000ps]',
'M8[1000000000as]']
larger_units = ['M8[7s]',
'M8[2ms]',
'M8[us]',
'M8[5us]',
'M8[2ns]',
'M8[9ps]',
'M8[1fs]',
'M8[2us]',
'M8[1ps]',
'M8[2ms]',
'M8[1ns]']
for larger_unit, smaller_unit in zip(larger_units, smaller_units):
assert np.can_cast(larger_unit, smaller_unit, casting='safe')
assert np.can_cast(smaller_unit, larger_unit, casting='safe')
@pytest.mark.parametrize("unit", [
"s", "ms", "us", "ns", "ps", "fs", "as"])
def test_prohibit_negative_datetime(self, unit):
with assert_raises(TypeError):
np.array([1], dtype=f"M8[-1{unit}]")
def test_compare_generic_nat(self):
assert_(np.datetime64('NaT') !=
np.datetime64('2000') + np.timedelta64('NaT'))
assert_(np.datetime64('NaT') != np.datetime64('NaT', 'us'))
assert_(np.datetime64('NaT', 'us') != np.datetime64('NaT'))
@pytest.mark.parametrize("size", [
3, 21, 217, 1000])
def test_datetime_nat_argsort_stability(self, size):
expected = np.arange(size)
arr = np.tile(np.datetime64('NaT'), size)
assert_equal(np.argsort(arr, kind='mergesort'), expected)
@pytest.mark.parametrize("size", [
3, 21, 217, 1000])
def test_timedelta_nat_argsort_stability(self, size):
expected = np.arange(size)
arr = np.tile(np.timedelta64('NaT'), size)
assert_equal(np.argsort(arr, kind='mergesort'), expected)
@pytest.mark.parametrize("arr, expected", [
(['NaT', 1, 2, 3],
[1, 2, 3, 'NaT']),
(['NaT', 9, 'NaT', -707],
[-707, 9, 'NaT', 'NaT']),
([1, -2, 3, 'NaT'],
[-2, 1, 3, 'NaT']),
([[51, -220, 'NaT'],
[-17, 'NaT', -90]],
[[-220, 51, 'NaT'],
[-90, -17, 'NaT']]),
])
@pytest.mark.parametrize("dtype", [
'M8[ns]', 'M8[us]',
'm8[ns]', 'm8[us]'])
def test_datetime_timedelta_sort_nat(self, arr, expected, dtype):
arr = np.array(arr, dtype=dtype)
expected = np.array(expected, dtype=dtype)
arr.sort()
assert_equal(arr, expected)
def test_datetime_scalar_construction_timezone(self):
msg = "no explicit representation of timezones available for " \
"np.datetime64"
with pytest.warns(UserWarning, match=msg):
assert_equal(np.datetime64('2000-01-01T00Z'),
np.datetime64('2000-01-01T00'))
with pytest.warns(UserWarning, match=msg):
assert_equal(np.datetime64('2000-01-01T00-08'),
np.datetime64('2000-01-01T08'))
def test_datetime_array_find_type(self):
dt = np.datetime64('1970-01-01', 'M')
arr = np.array([dt])
assert_equal(arr.dtype, np.dtype('M8[M]'))
dt = datetime.date(1970, 1, 1)
arr = np.array([dt])
assert_equal(arr.dtype, np.dtype('O'))
dt = datetime.datetime(1970, 1, 1, 12, 30, 40)
arr = np.array([dt])
assert_equal(arr.dtype, np.dtype('O'))
b = np.bool(True)
dm = np.datetime64('1970-01-01', 'M')
d = datetime.date(1970, 1, 1)
dt = datetime.datetime(1970, 1, 1, 12, 30, 40)
arr = np.array([b, dm])
assert_equal(arr.dtype, np.dtype('O'))
arr = np.array([b, d])
assert_equal(arr.dtype, np.dtype('O'))
arr = np.array([b, dt])
assert_equal(arr.dtype, np.dtype('O'))
arr = np.array([d, d]).astype('datetime64')
assert_equal(arr.dtype, np.dtype('M8[D]'))
arr = np.array([dt, dt]).astype('datetime64')
assert_equal(arr.dtype, np.dtype('M8[us]'))
@pytest.mark.parametrize("unit", [
("Y"), ("M"), ("W"), ("D"), ("h"), ("m"),
("s"), ("ms"), ("us"), ("ns"), ("ps"),
("fs"), ("as"), ("generic") ])
def test_timedelta_np_int_construction(self, unit):
if unit != "generic":
assert_equal(np.timedelta64(np.int64(123), unit),
np.timedelta64(123, unit))
else:
assert_equal(np.timedelta64(np.int64(123)),
np.timedelta64(123))
def test_timedelta_object_array_conversion(self):
inputs = [datetime.timedelta(28),
datetime.timedelta(30),
datetime.timedelta(31)]
expected = np.array([28, 30, 31], dtype='timedelta64[D]')
actual = np.array(inputs, dtype='timedelta64[D]')
assert_equal(expected, actual)
def test_timedelta_0_dim_object_array_conversion(self):
test = np.array(datetime.timedelta(seconds=20))
actual = test.astype(np.timedelta64)
expected = np.array(datetime.timedelta(seconds=20),
np.timedelta64)
assert_equal(actual, expected)
def test_timedelta_nat_format(self):
assert_equal('NaT', '{0}'.format(np.timedelta64('nat')))
def test_datetime_nat_casting(self):
a = np.array('NaT', dtype='M8[D]')
b = np.datetime64('NaT', '[D]')
assert_equal(a.astype('M8[s]'), np.array('NaT', dtype='M8[s]'))
assert_equal(a.astype('M8[ms]'), np.array('NaT', dtype='M8[ms]'))
assert_equal(a.astype('M8[M]'), np.array('NaT', dtype='M8[M]'))
assert_equal(a.astype('M8[Y]'), np.array('NaT', dtype='M8[Y]'))
assert_equal(a.astype('M8[W]'), np.array('NaT', dtype='M8[W]'))
assert_equal(np.datetime64(b, '[s]'), np.datetime64('NaT', '[s]'))
assert_equal(np.datetime64(b, '[ms]'), np.datetime64('NaT', '[ms]'))
assert_equal(np.datetime64(b, '[M]'), np.datetime64('NaT', '[M]'))
assert_equal(np.datetime64(b, '[Y]'), np.datetime64('NaT', '[Y]'))
assert_equal(np.datetime64(b, '[W]'), np.datetime64('NaT', '[W]'))
assert_equal(np.datetime64(a, '[s]'), np.datetime64('NaT', '[s]'))
assert_equal(np.datetime64(a, '[ms]'), np.datetime64('NaT', '[ms]'))
assert_equal(np.datetime64(a, '[M]'), np.datetime64('NaT', '[M]'))
assert_equal(np.datetime64(a, '[Y]'), np.datetime64('NaT', '[Y]'))
assert_equal(np.datetime64(a, '[W]'), np.datetime64('NaT', '[W]'))
nan = np.array([np.nan] * 8 + [0])
fnan = nan.astype('f')
lnan = nan.astype('g')
cnan = nan.astype('D')
cfnan = nan.astype('F')
clnan = nan.astype('G')
hnan = nan.astype(np.half)
nat = np.array([np.datetime64('NaT')] * 8 + [np.datetime64(0, 'D')])
assert_equal(nan.astype('M8[ns]'), nat)
assert_equal(fnan.astype('M8[ns]'), nat)
assert_equal(lnan.astype('M8[ns]'), nat)
assert_equal(cnan.astype('M8[ns]'), nat)
assert_equal(cfnan.astype('M8[ns]'), nat)
assert_equal(clnan.astype('M8[ns]'), nat)
assert_equal(hnan.astype('M8[ns]'), nat)
nat = np.array([np.timedelta64('NaT')] * 8 + [np.timedelta64(0)])
assert_equal(nan.astype('timedelta64[ns]'), nat)
assert_equal(fnan.astype('timedelta64[ns]'), nat)
assert_equal(lnan.astype('timedelta64[ns]'), nat)
assert_equal(cnan.astype('timedelta64[ns]'), nat)
assert_equal(cfnan.astype('timedelta64[ns]'), nat)
assert_equal(clnan.astype('timedelta64[ns]'), nat)
assert_equal(hnan.astype('timedelta64[ns]'), nat)
assert_equal(np.array('1599', dtype='M8[D]').astype('i8'),
(1600-1970)*365 - (1972-1600)/4 + 3 - 365)
assert_equal(np.array('1600', dtype='M8[D]').astype('i8'),
(1600-1970)*365 - (1972-1600)/4 + 3)
assert_equal(np.array('1601', dtype='M8[D]').astype('i8'),
(1600-1970)*365 - (1972-1600)/4 + 3 + 366)
assert_equal(np.array('1900', dtype='M8[D]').astype('i8'),
(1900-1970)*365 - (1970-1900)//4)
assert_equal(np.array('1901', dtype='M8[D]').astype('i8'),
(1900-1970)*365 - (1970-1900)//4 + 365)
assert_equal(np.array('1967', dtype='M8[D]').astype('i8'), -3*365 - 1)
assert_equal(np.array('1968', dtype='M8[D]').astype('i8'), -2*365 - 1)
assert_equal(np.array('1969', dtype='M8[D]').astype('i8'), -1*365)
assert_equal(np.array('1970', dtype='M8[D]').astype('i8'), 0*365)
assert_equal(np.array('1971', dtype='M8[D]').astype('i8'), 1*365)
assert_equal(np.array('1972', dtype='M8[D]').astype('i8'), 2*365)
assert_equal(np.array('1973', dtype='M8[D]').astype('i8'), 3*365 + 1)
assert_equal(np.array('1974', dtype='M8[D]').astype('i8'), 4*365 + 1)
assert_equal(np.array('2000', dtype='M8[D]').astype('i8'),
(2000 - 1970)*365 + (2000 - 1972)//4)
assert_equal(np.array('2001', dtype='M8[D]').astype('i8'),
(2000 - 1970)*365 + (2000 - 1972)//4 + 366)
assert_equal(np.array('2400', dtype='M8[D]').astype('i8'),
(2400 - 1970)*365 + (2400 - 1972)//4 - 3)
assert_equal(np.array('2401', dtype='M8[D]').astype('i8'),
(2400 - 1970)*365 + (2400 - 1972)//4 - 3 + 366)
assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('i8'),
(1600-1970)*365 - (1972-1600)//4 + 3 + 31 + 28)
assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('i8'),
(1600-1970)*365 - (1972-1600)//4 + 3 + 31 + 29)
assert_equal(np.array('2000-02-29', dtype='M8[D]').astype('i8'),
(2000 - 1970)*365 + (2000 - 1972)//4 + 31 + 28)
assert_equal(np.array('2000-03-01', dtype='M8[D]').astype('i8'),
(2000 - 1970)*365 + (2000 - 1972)//4 + 31 + 29)
assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('i8'),
(2000 - 1970)*365 + (2000 - 1972)//4 + 366 + 31 + 28 + 21)
def test_days_to_pydate(self):
assert_equal(np.array('1599', dtype='M8[D]').astype('O'),
datetime.date(1599, 1, 1))
assert_equal(np.array('1600', dtype='M8[D]').astype('O'),
datetime.date(1600, 1, 1))
assert_equal(np.array('1601', dtype='M8[D]').astype('O'),
datetime.date(1601, 1, 1))
assert_equal(np.array('1900', dtype='M8[D]').astype('O'),
datetime.date(1900, 1, 1))
assert_equal(np.array('1901', dtype='M8[D]').astype('O'),
datetime.date(1901, 1, 1))
assert_equal(np.array('2000', dtype='M8[D]').astype('O'),
datetime.date(2000, 1, 1))
assert_equal(np.array('2001', dtype='M8[D]').astype('O'),
datetime.date(2001, 1, 1))
assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('O'),
datetime.date(1600, 2, 29))
assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('O'),
datetime.date(1600, 3, 1))
assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('O'),
datetime.date(2001, 3, 22))
def test_dtype_comparison(self):
assert_(not (np.dtype('M8[us]') == np.dtype('M8[ms]')))
assert_(np.dtype('M8[us]') != np.dtype('M8[ms]'))
assert_(np.dtype('M8[2D]') != np.dtype('M8[D]'))
assert_(np.dtype('M8[D]') != np.dtype('M8[2D]'))
def test_pydatetime_creation(self):
a = np.array(['1960-03-12', datetime.date(1960, 3, 12)], dtype='M8[D]')
assert_equal(a[0], a[1])
a = np.array(['1999-12-31', datetime.date(1999, 12, 31)], dtype='M8[D]')
assert_equal(a[0], a[1])
a = np.array(['2000-01-01', datetime.date(2000, 1, 1)], dtype='M8[D]')
assert_equal(a[0], a[1])
a = np.array(['today', datetime.date.today()], dtype='M8[D]')
assert_equal(a[0], a[1])
assert_equal(np.array(datetime.date(1960, 3, 12), dtype='M8[s]'),
np.array(np.datetime64('1960-03-12T00:00:00')))
def test_datetime_string_conversion(self):
a = ['2011-03-16', '1920-01-01', '2013-05-19']
str_a = np.array(a, dtype='S')
uni_a = np.array(a, dtype='U')
dt_a = np.array(a, dtype='M')
assert_equal(dt_a, str_a.astype('M'))
assert_equal(dt_a.dtype, str_a.astype('M').dtype)
dt_b = np.empty_like(dt_a)
dt_b[...] = str_a
assert_equal(dt_a, dt_b)
assert_equal(str_a, dt_a.astype('S0'))
str_b = np.empty_like(str_a)
str_b[...] = dt_a
assert_equal(str_a, str_b)
assert_equal(dt_a, uni_a.astype('M'))
assert_equal(dt_a.dtype, uni_a.astype('M').dtype)
dt_b = np.empty_like(dt_a)
dt_b[...] = uni_a
assert_equal(dt_a, dt_b)
assert_equal(uni_a, dt_a.astype('U'))
uni_b = np.empty_like(uni_a)
uni_b[...] = dt_a
assert_equal(uni_a, uni_b)
assert_equal(str_a, dt_a.astype((np.bytes_, 128)))
str_b = np.empty(str_a.shape, dtype=(np.bytes_, 128))
str_b[...] = dt_a
assert_equal(str_a, str_b)
def test_datetime_conversions_byteorders(self, str_dtype, time_dtype):
times = np.array(["2017", "NaT"], dtype=time_dtype)
from_strings = np.array(["2017", "NaT"], dtype=str_dtype)
to_strings = times.astype(str_dtype)
times_swapped = times.astype(times.dtype.newbyteorder())
res = times_swapped.astype(str_dtype)
assert_array_equal(res, to_strings)
res = times_swapped.astype(to_strings.dtype.newbyteorder())
assert_array_equal(res, to_strings)
res = times.astype(to_strings.dtype.newbyteorder())
assert_array_equal(res, to_strings)
from_strings_swapped = from_strings.astype(from_strings.dtype.newbyteorder())
res = from_strings_swapped.astype(time_dtype)
assert_array_equal(res, times)
res = from_strings_swapped.astype(times.dtype.newbyteorder())
assert_array_equal(res, times)
res = from_strings.astype(times.dtype.newbyteorder())
assert_array_equal(res, times)
def test_datetime_array_str(self):
a = np.array(['2011-03-16', '1920-01-01', '2013-05-19'], dtype='M')
assert_equal(str(a), "['2011-03-16' '1920-01-01' '2013-05-19']")
a = np.array(['2011-03-16T13:55', '1920-01-01T03:12'], dtype='M')
assert_equal(np.array2string(a, separator=', ',
formatter={'datetime': lambda x:
"'%s'" % np.datetime_as_string(x, timezone='UTC')}),
"['2011-03-16T13:55Z', '1920-01-01T03:12Z']")
a = np.array(['2010', 'NaT', '2030']).astype('M')
assert_equal(str(a), "['2010' 'NaT' '2030']")
def test_timedelta_array_str(self):
a = np.array([-1, 0, 100], dtype='m')
assert_equal(str(a), "[ -1 0 100]")
a = np.array(['NaT', 'NaT'], dtype='m')
assert_equal(str(a), "['NaT' 'NaT']")
a = np.array([-1, 'NaT', 0], dtype='m')
assert_equal(str(a), "[ -1 'NaT' 0]")
a = np.array([-1, 'NaT', 1234567], dtype='m')
assert_equal(str(a), "[ -1 'NaT' 1234567]")
a = np.array([-1, 'NaT', 1234567], dtype='>m')
assert_equal(str(a), "[ -1 'NaT' 1234567]")
a = np.array([-1, 'NaT', 1234567], dtype='<m')
assert_equal(str(a), "[ -1 'NaT' 1234567]")
def test_pickle(self):
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
dt = np.dtype('M8[7D]')
assert_equal(pickle.loads(pickle.dumps(dt, protocol=proto)), dt)
dt = np.dtype('M8[W]')
assert_equal(pickle.loads(pickle.dumps(dt, protocol=proto)), dt)
scalar = np.datetime64('2016-01-01T00:00:00.000000000')
assert_equal(pickle.loads(pickle.dumps(scalar, protocol=proto)), scalar)
delta = scalar - np.datetime64('2015-01-01T00:00:00.000000000')
assert_equal(pickle.loads(pickle.dumps(delta, protocol=proto)), delta)
pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n" + \
b"(I4\nS'<'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'D'\np6\n" + \
b"I7\nI1\nI1\ntp7\ntp8\ntp9\nb."
assert_equal(pickle.loads(pkl), np.dtype('<M8[7D]'))
pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n" + \
b"(I4\nS'<'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'W'\np6\n" + \
b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb."
assert_equal(pickle.loads(pkl), np.dtype('<M8[W]'))
pkl = b"cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n" + \
b"(I4\nS'>'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'us'\np6\n" + \
b"I1\nI1\nI1\ntp7\ntp8\ntp9\nb."
assert_equal(pickle.loads(pkl), np.dtype('>M8[us]'))
def test_setstate(self):
dt = np.dtype('>M8[us]')
assert_raises(ValueError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, 1))
assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2])
assert_raises(TypeError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, ({}, 'xxx')))
assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2])
def test_dtype_promotion(self):
for mM in ['m', 'M']:
assert_equal(
np.promote_types(np.dtype(mM+'8[2Y]'), np.dtype(mM+'8[2Y]')),
np.dtype(mM+'8[2Y]'))
assert_equal(
np.promote_types(np.dtype(mM+'8[12Y]'), np.dtype(mM+'8[15Y]')),
np.dtype(mM+'8[3Y]'))
assert_equal(
np.promote_types(np.dtype(mM+'8[62M]'), np.dtype(mM+'8[24M]')),
np.dtype(mM+'8[2M]'))
assert_equal(
np.promote_types(np.dtype(mM+'8[1W]'), np.dtype(mM+'8[2D]')),
np.dtype(mM+'8[1D]'))
assert_equal(
np.promote_types(np.dtype(mM+'8[W]'), np.dtype(mM+'8[13s]')),
np.dtype(mM+'8[s]'))
assert_equal(
np.promote_types(np.dtype(mM+'8[13W]'), np.dtype(mM+'8[49s]')),
np.dtype(mM+'8[7s]'))
assert_raises(TypeError, np.promote_types,
np.dtype('m8[Y]'), np.dtype('m8[D]'))
assert_raises(TypeError, np.promote_types,
np.dtype('m8[M]'), np.dtype('m8[W]'))
assert_raises(TypeError, np.promote_types, "float32", "m8")
assert_raises(TypeError, np.promote_types, "m8", "float32")
assert_raises(TypeError, np.promote_types, "uint64", "m8")
assert_raises(TypeError, np.promote_types, "m8", "uint64")
assert_raises(OverflowError, np.promote_types,
np.dtype('m8[W]'), np.dtype('m8[fs]'))
assert_raises(OverflowError, np.promote_types,
np.dtype('m8[s]'), np.dtype('m8[as]'))
def test_cast_overflow(self):
def cast():
numpy.datetime64("1971-01-01 00:00:00.000000000000000").astype("<M8[D]")
assert_raises(OverflowError, cast)
def cast2():
numpy.datetime64("2014").astype("<M8[fs]")
assert_raises(OverflowError, cast2)
def test_pyobject_roundtrip(self):
a = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0,
-1020040340, -2942398, -1, 0, 1, 234523453, 1199164176],
dtype=np.int64)
for unit in ['M8[D]', 'M8[W]', 'M8[M]', 'M8[Y]']:
b = a.copy().view(dtype=unit)
b[0] = '-0001-01-01'
b[1] = '-0001-12-31'
b[2] = '0000-01-01'
b[3] = '0001-01-01'
b[4] = '1969-12-31'
b[5] = '1970-01-01'
b[6] = '9999-12-31'
b[7] = '10000-01-01'
b[8] = 'NaT'
assert_equal(b.astype(object).astype(unit), b,
"Error roundtripping unit %s" % unit)
for unit in ['M8[as]', 'M8[16fs]', 'M8[ps]', 'M8[us]',
'M8[300as]', 'M8[20us]']:
b = a.copy().view(dtype=unit)
b[0] = '-0001-01-01T00'
b[1] = '-0001-12-31T00'
b[2] = '0000-01-01T00'
b[3] = '0001-01-01T00'
b[4] = '1969-12-31T23:59:59.999999'
b[5] = '1970-01-01T00'
b[6] = '9999-12-31T23:59:59.999999'
b[7] = '10000-01-01T00'
b[8] = 'NaT'
assert_equal(b.astype(object).astype(unit), b,
"Error roundtripping unit %s" % unit)
def test_month_truncation(self):
assert_equal(np.array('1945-03-01', dtype='M8[M]'),
np.array('1945-03-31', dtype='M8[M]'))
assert_equal(np.array('1969-11-01', dtype='M8[M]'),
np.array('1969-11-30T23:59:59.99999', dtype='M').astype('M8[M]'))
assert_equal(np.array('1969-12-01', dtype='M8[M]'),
np.array('1969-12-31T23:59:59.99999', dtype='M').astype('M8[M]'))
assert_equal(np.array('1970-01-01', dtype='M8[M]'),
np.array('1970-01-31T23:59:59.99999', dtype='M').astype('M8[M]'))
assert_equal(np.array('1980-02-01', dtype='M8[M]'),
np.array('1980-02-29T23:59:59.99999', dtype='M').astype('M8[M]'))
def test_datetime_like(self):
a = np.array([3], dtype='m8[4D]')
b = np.array(['2012-12-21'], dtype='M8[D]')
assert_equal(np.ones_like(a).dtype, a.dtype)
assert_equal(np.zeros_like(a).dtype, a.dtype)
assert_equal(np.empty_like(a).dtype, a.dtype)
assert_equal(np.ones_like(b).dtype, b.dtype)
assert_equal(np.zeros_like(b).dtype, b.dtype)
assert_equal(np.empty_like(b).dtype, b.dtype)
def test_datetime_unary(self):
for tda, tdb, tdzero, tdone, tdmone in \
[
(np.array([3], dtype='m8[D]'),
np.array([-3], dtype='m8[D]'),
np.array([0], dtype='m8[D]'),
np.array([1], dtype='m8[D]'),
np.array([-1], dtype='m8[D]')),
(np.timedelta64(3, '[D]'),
np.timedelta64(-3, '[D]'),
np.timedelta64(0, '[D]'),
np.timedelta64(1, '[D]'),
np.timedelta64(-1, '[D]'))]:
assert_equal(-tdb, tda)
assert_equal((-tdb).dtype, tda.dtype)
assert_equal(np.negative(tdb), tda)
assert_equal(np.negative(tdb).dtype, tda.dtype)
assert_equal(np.positive(tda), tda)
assert_equal(np.positive(tda).dtype, tda.dtype)
assert_equal(np.positive(tdb), tdb)
assert_equal(np.positive(tdb).dtype, tdb.dtype)
assert_equal(np.absolute(tdb), tda)
assert_equal(np.absolute(tdb).dtype, tda.dtype)
assert_equal(np.sign(tda), tdone)
assert_equal(np.sign(tdb), tdmone)
assert_equal(np.sign(tdzero), tdzero)
assert_equal(np.sign(tda).dtype, tda.dtype)
assert_
def test_datetime_multiply(self):
for dta, tda, tdb, tdc in \
[
(np.array(['2012-12-21'], dtype='M8[D]'),
np.array([6], dtype='m8[h]'),
np.array([9], dtype='m8[h]'),
np.array([12], dtype='m8[h]')),
(np.datetime64('2012-12-21', '[D]'),
np.timedelta64(6, '[h]'),
np.timedelta64(9, '[h]'),
np.timedelta64(12, '[h]'))]:
assert_equal(tda * 2, tdc)
assert_equal((tda * 2).dtype, np.dtype('m8[h]'))
assert_equal(2 * tda, tdc)
assert_equal((2 * tda).dtype, np.dtype('m8[h]'))
assert_equal(tda * 1.5, tdb)
assert_equal((tda * 1.5).dtype, np.dtype('m8[h]'))
assert_equal(1.5 * tda, tdb)
assert_equal((1.5 * tda).dtype, np.dtype('m8[h]'))
assert_raises(TypeError, np.multiply, tda, tdb)
assert_raises(TypeError, np.multiply, dta, tda)
assert_raises(TypeError, np.multiply, tda, dta)
assert_raises(TypeError, np.multiply, dta, 2)
assert_raises(TypeError, np.multiply, 2, dta)
assert_raises(TypeError, np.multiply, dta, 1.5)
assert_raises(TypeError, np.multiply, 1.5, dta)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in multiply")
nat = np.timedelta64('NaT')
def check(a, b, res):
assert_equal(a * b, res)
assert_equal(b * a, res)
for tp in (int, float):
check(nat, tp(2), nat)
check(nat, tp(0), nat)
for f in (float('inf'), float('nan')):
check(np.timedelta64(1), f, nat)
check(np.timedelta64(0), f, nat)
check(nat, f, nat)
@pytest.mark.parametrize("op1, op2, exp", [
(np.timedelta64(7, 's'),
np.timedelta64(4, 's'),
1),
(np.timedelta64(7, 's'),
np.timedelta64(-4, 's'),
-2),
(np.timedelta64(8, 's'),
np.timedelta64(-4, 's'),
-2),
(np.timedelta64(1, 'm'),
np.timedelta64(31, 's'),
1),
(np.timedelta64(1890),
np.timedelta64(31),
60),
(np.timedelta64(2, 'Y'),
np.timedelta64('13', 'M'),
1),
(np.array([1, 2, 3], dtype='m8'),
np.array([2], dtype='m8'),
np.array([0, 1, 1], dtype=np.int64)),
])
def test_timedelta_floor_divide(self, op1, op2, exp):
assert_equal(op1 // op2, exp)
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
@pytest.mark.parametrize("op1, op2", [
(np.timedelta64(10, 'us'),
np.timedelta64(0, 'us')),
(np.timedelta64('NaT'),
np.timedelta64(50, 'us')),
(np.timedelta64(np.iinfo(np.int64).min),
np.timedelta64(-1)),
])
def test_timedelta_floor_div_warnings(self, op1, op2):
with assert_warns(RuntimeWarning):
actual = op1 // op2
assert_equal(actual, 0)
assert_equal(actual.dtype, np.int64)
@pytest.mark.parametrize("val1, val2", [
(9007199254740993, 1),
(9007199254740999, -2),
])
def test_timedelta_floor_div_precision(self, val1, val2):
op1 = np.timedelta64(val1)
op2 = np.timedelta64(val2)
actual = op1 // op2
expected = val1 // val2
assert_equal(actual, expected)
@pytest.mark.parametrize("val1, val2", [
(np.timedelta64(7, 'Y'),
np.timedelta64(3, 's')),
(np.timedelta64(7, 'M'),
np.timedelta64(1, 'D')),
])
def test_timedelta_floor_div_error(self, val1, val2):
with assert_raises_regex(TypeError, "common metadata divisor"):
val1 // val2
@pytest.mark.parametrize("op1, op2", [
(np.timedelta64(7, 's'),
np.timedelta64(4, 's')),
(np.timedelta64(7, 's'),
np.timedelta64(-4, 's')),
(np.timedelta64(8, 's'),
np.timedelta64(-4, 's')),
(np.timedelta64(1, 'm'),
np.timedelta64(31, 's')),
(np.timedelta64(1890),
np.timedelta64(31)),
(np.timedelta64(2, 'Y'),
np.timedelta64('13', 'M')),
(np.array([1, 2, 3], dtype='m8'),
np.array([2], dtype='m8')),
])
def test_timedelta_divmod(self, op1, op2):
expected = (op1 // op2, op1 % op2)
assert_equal(divmod(op1, op2), expected)
@pytest.mark.skipif(IS_WASM, reason="does not work in wasm")
@pytest.mark.parametrize("op1, op2", [
(np.timedelta64(10, 'us'),
np.timedelta64(0, 'us')),
(np.timedelta64('NaT'),
np.timedelta64(50, 'us')),
(np.timedelta64(np.iinfo(np.int64).min),
np.timedelta64(-1)),
])
def test_timedelta_divmod_warnings(self, op1, op2):
with assert_warns(RuntimeWarning):
expected = (op1 // op2, op1 % op2)
with assert_warns(RuntimeWarning):
actual = divmod(op1, op2)
assert_equal(actual, expected)
def test_datetime_divide(self):
for dta, tda, tdb, tdc, tdd in \
[
(np.array(['2012-12-21'], dtype='M8[D]'),
np.array([6], dtype='m8[h]'),
np.array([9], dtype='m8[h]'),
np.array([12], dtype='m8[h]'),
np.array([6], dtype='m8[m]')),
(np.datetime64('2012-12-21', '[D]'),
np.timedelta64(6, '[h]'),
np.timedelta64(9, '[h]'),
np.timedelta64(12, '[h]'),
np.timedelta64(6, '[m]'))]:
assert_equal(tdc / 2, tda)
assert_equal((tdc / 2).dtype, np.dtype('m8[h]'))
assert_equal(tda / 0.5, tdc)
assert_equal((tda / 0.5).dtype, np.dtype('m8[h]'))
assert_equal(tda / tdb, 6 / 9)
assert_equal(np.divide(tda, tdb), 6 / 9)
assert_equal(np.true_divide(tda, tdb), 6 / 9)
assert_equal(tdb / tda, 9 / 6)
assert_equal((tda / tdb).dtype, np.dtype('f8'))
assert_equal(tda / tdd, 60)
assert_equal(tdd / tda, 1 / 60)
assert_raises(TypeError, np.divide, 2, tdb)
assert_raises(TypeError, np.divide, 0.5, tdb)
assert_raises(TypeError, np.divide, dta, tda)
assert_raises(TypeError, np.divide, tda, dta)
assert_raises(TypeError, np.divide, dta, 2)
assert_raises(TypeError, np.divide, 2, dta)
assert_raises(TypeError, np.divide, dta, 1.5)
assert_raises(TypeError, np.divide, 1.5, dta)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, r".*encountered in divide")
nat = np.timedelta64('NaT')
for tp in (int, float):
assert_equal(np.timedelta64(1) / tp(0), nat)
assert_equal(np.timedelta64(0) / tp(0), nat)
assert_equal(nat / tp(0), nat)
assert_equal(nat / tp(2), nat)
assert_equal(np.timedelta64(1) / float('inf'), np.timedelta64(0))
assert_equal(np.timedelta64(0) / float('inf'), np.timedelta64(0))
assert_equal(nat / float('inf'), nat)
assert_equal(np.timedelta64(1) / float('nan'), nat)
assert_equal(np.timedelta64(0) / float('nan'), nat)
assert_equal(nat / float('nan'), nat)
def test_datetime_compare(self):
a = np.datetime64('2000-03-12T18:00:00.000000')
b = np.array(['2000-03-12T18:00:00.000000',
'2000-03-12T17:59:59.999999',
'2000-03-12T18:00:00.000001',
'1970-01-11T12:00:00.909090',
'2016-01-11T12:00:00.909090'],
dtype='datetime64[us]')
assert_equal(np.equal(a, b), [1, 0, 0, 0, 0])
assert_equal(np.not_equal(a, b), [0, 1, 1, 1, 1])
assert_equal(np.less(a, b), [0, 0, 1, 0, 1])
assert_equal(np.less_equal(a, b), [1, 0, 1, 0, 1])
assert_equal(np.greater(a, b), [0, 1, 0, 1, 0])
assert_equal(np.greater_equal(a, b), [1, 1, 0, 1, 0])
def test_datetime_compare_nat(self):
dt_nat = np.datetime64('NaT', 'D')
dt_other = np.datetime64('2000-01-01')
td_nat = np.timedelta64('NaT', 'h')
td_other = np.timedelta64(1, 'h')
for op in [np.equal, np.less, np.less_equal,
np.greater, np.greater_equal]:
assert_(not op(dt_nat, dt_nat))
assert_(not op(dt_nat, dt_other))
assert_(not op(dt_other, dt_nat))
assert_(not op(td_nat, td_nat))
assert_(not op(td_nat, td_other))
assert_(not op(td_other, td_nat))
assert_(np.not_equal(dt_nat, dt_nat))
assert_(np.not_equal(dt_nat, dt_other))
assert_(np.not_equal(dt_other, dt_nat))
assert_(np.not_equal(td_nat, td_nat))
assert_(np.not_equal(td_nat, td_other))
assert_(np.not_equal(td_other, td_nat))
def test_datetime_minmax(self):
a = np.array('1999-03-12T13', dtype='M8[2m]')
b = np.array('1999-03-12T12', dtype='M8[s]')
assert_equal(np.minimum(a, b), b)
assert_equal(np.minimum(a, b).dtype, np.dtype('M8[s]'))
assert_equal(np.fmin(a, b), b)
assert_equal(np.fmin(a, b).dtype, np.dtype('M8[s]'))
assert_equal(np.maximum(a, b), a)
assert_equal(np.maximum(a, b).dtype, np.dtype('M8[s]'))
assert_equal(np.fmax(a, b), a)
assert_equal(np.fmax(a, b).dtype, np.dtype('M8[s]'))
assert_equal(np.minimum(a.view('i8'), b.view('i8')), a.view('i8'))
a = np.array('1999-03-12T13', dtype='M8[2m]')
dtnat = np.array('NaT', dtype='M8[h]')
assert_equal(np.minimum(a, dtnat), dtnat)
assert_equal(np.minimum(dtnat, a), dtnat)
assert_equal(np.maximum(a, dtnat), dtnat)
assert_equal(np.maximum(dtnat, a), dtnat)
assert_equal(np.fmin(dtnat, a), a)
assert_equal(np.fmin(a, dtnat), a)
assert_equal(np.fmax(dtnat, a), a)
assert_equal(np.fmax(a, dtnat), a)
a = np.array(3, dtype='m8[h]')
b = np.array(3*3600 - 3, dtype='m8[s]')
assert_equal(np.minimum(a, b), b)
assert_equal(np.minimum(a, b).dtype, np.dtype('m8[s]'))
assert_equal(np.fmin(a, b), b)
assert_equal(np.fmin(a, b).dtype, np.dtype('m8[s]'))
assert_equal(np.maximum(a, b), a)
assert_equal(np.maximum(a, b).dtype, np.dtype('m8[s]'))
assert_equal(np.fmax(a, b), a)
assert_equal(np.fmax(a, b).dtype, np.dtype('m8[s]'))
assert_equal(np.minimum(a.view('i8'), b.view('i8')), a.view('i8'))
a = np.array(3, dtype='m8[h]')
b = np.array('1999-03-12T12', dtype='M8[s]')
assert_raises(TypeError, np.minimum, a, b, casting='same_kind')
assert_raises(TypeError, np.maximum, a, b, casting='same_kind')
assert_raises(TypeError, np.fmin, a, b, casting='same_kind')
assert_raises(TypeError, np.fmax, a, b, casting='same_kind')
def test_hours(self):
t = np.ones(3, dtype='M8[s]')
t[0] = 60*60*24 + 60*60*10
assert_(t[0].item().hour == 10)
def test_divisor_conversion_year(self):
assert_(np.dtype('M8[Y/4]') == np.dtype('M8[3M]'))
assert_(np.dtype('M8[Y/13]') == np.dtype('M8[4W]'))
assert_(np.dtype('M8[3Y/73]') == np.dtype('M8[15D]'))
def test_divisor_conversion_month(self):
assert_(np.dtype('M8[M/2]') == np.dtype('M8[2W]'))
assert_(np.dtype('M8[M/15]') == np.dtype('M8[2D]'))
assert_(np.dtype('M8[3M/40]') == np.dtype('M8[54h]'))
def test_divisor_conversion_week(self):
assert_(np.dtype('m8[W/7]') == np.dtype('m8[D]'))
assert_(np.dtype('m8[3W/14]') == np.dtype('m8[36h]'))
assert_(np.dtype('m8[5W/140]') == np.dtype('m8[360m]'))
def test_divisor_conversion_day(self):
assert_(np.dtype('M8[D/12]') == np.dtype('M8[2h]'))
assert_(np.dtype('M8[D/120]') == np.dtype('M8[12m]'))
assert_(np.dtype('M8[3D/960]') == np.dtype('M8[270s]'))
def test_divisor_conversion_hour(self):
assert_(np.dtype('m8[h/30]') == np.dtype('m8[2m]'))
assert_(np.dtype('m8[3h/300]') == np.dtype('m8[36s]'))
def test_divisor_conversion_minute(self):
assert_(np.dtype('m8[m/30]') == np.dtype('m8[2s]'))
assert_(np.dtype('m8[3m/300]') == np.dtype('m8[600ms]'))
def test_divisor_conversion_second(self):
assert_(np.dtype('m8[s/100]') == np.dtype('m8[10ms]'))
assert_(np.dtype('m8[3s/10000]') == np.dtype('m8[300us]'))
def test_divisor_conversion_fs(self):
assert_(np.dtype('M8[fs/100]') == np.dtype('M8[10as]'))
assert_raises(ValueError, lambda: np.dtype('M8[3fs/10000]'))
def test_divisor_conversion_as(self):
assert_raises(ValueError, lambda: np.dtype('M8[as/10]'))
def test_string_parser_variants(self):
msg = "no explicit representation of timezones available for " \
"np.datetime64"
assert_equal(np.array(['1980-02-29T01:02:03'], np.dtype('M8[s]')),
np.array(['1980-02-29 01:02:03'], np.dtype('M8[s]')))
assert_equal(np.array(['+1980-02-29T01:02:03'], np.dtype('M8[s]')),
np.array(['+1980-02-29 01:02:03'], np.dtype('M8[s]')))
assert_equal(np.array(['-1980-02-29T01:02:03'], np.dtype('M8[s]')),
np.array(['-1980-02-29 01:02:03'], np.dtype('M8[s]')))
with pytest.warns(UserWarning, match=msg):
assert_equal(
np.array(['+1980-02-29T01:02:03'], np.dtype('M8[s]')),
np.array(['+1980-02-29 01:02:03Z'], np.dtype('M8[s]')))
with pytest.warns(UserWarning, match=msg):
assert_equal(
np.array(['-1980-02-29T01:02:03'], np.dtype('M8[s]')),
np.array(['-1980-02-29 01:02:03Z'], np.dtype('M8[s]')))
with pytest.warns(UserWarning, match=msg):
assert_equal(
np.array(['1980-02-29T02:02:03'], np.dtype('M8[s]')),
np.array(['1980-02-29 00:32:03-0130'], np.dtype('M8[s]')))
with pytest.warns(UserWarning, match=msg):
assert_equal(
np.array(['1980-02-28T22:32:03'], np.dtype('M8[s]')),
np.array(['1980-02-29 00:02:03+01:30'], np.dtype('M8[s]')))
with pytest.warns(UserWarning, match=msg):
assert_equal(
np.array(['1980-02-29T02:32:03.506'], np.dtype('M8[s]')),
np.array(['1980-02-29 00:32:03.506-02'], np.dtype('M8[s]')))
with pytest.warns(UserWarning, match=msg):
assert_equal(np.datetime64('1977-03-02T12:30-0230'),
np.datetime64('1977-03-02T15:00'))
def test_creation_overflow(self):
date = '1980-03-23 20:00:00'
timesteps = np.array([date], dtype='datetime64[s]')[0].astype(np.int64)
for unit in ['ms', 'us', 'ns']:
timesteps *= 1000
x = np.array([date], dtype='datetime64[%s]' % unit)
assert_equal(timesteps, x[0].astype(np.int64),
err_msg='Datetime conversion error for unit %s' % unit)
assert_equal(x[0].astype(np.int64), 322689600000000000)
with pytest.raises(OverflowError):
np.datetime64(2**64, 'D')
with pytest.raises(OverflowError):
np.timedelta64(2**64, 'D')
@pytest.mark.skipif(not _has_pytz, reason="The pytz module is not available.")
def test_datetime_as_string_timezone(self):
a = np.datetime64('2010-03-15T06:30', 'm')
assert_equal(np.datetime_as_string(a),
'2010-03-15T06:30')
assert_equal(np.datetime_as_string(a, timezone='naive'),
'2010-03-15T06:30')
assert_equal(np.datetime_as_string(a, timezone='UTC'),
'2010-03-15T06:30Z')
assert_(np.datetime_as_string(a, timezone='local') !=
'2010-03-15T06:30')
b = np.datetime64('2010-02-15T06:30', 'm')
assert_equal(np.datetime_as_string(a, timezone=tz('US/Central')),
'2010-03-15T01:30-0500')
assert_equal(np.datetime_as_string(a, timezone=tz('US/Eastern')),
'2010-03-15T02:30-0400')
assert_equal(np.datetime_as_string(a, timezone=tz('US/Pacific')),
'2010-03-14T23:30-0700')
assert_equal(np.datetime_as_string(b, timezone=tz('US/Central')),
'2010-02-15T00:30-0600')
assert_equal(np.datetime_as_string(b, timezone=tz('US/Eastern')),
'2010-02-15T01:30-0500')
assert_equal(np.datetime_as_string(b, timezone=tz('US/Pacific')),
'2010-02-14T22:30-0800')
assert_raises(TypeError, np.datetime_as_string, a, unit='D',
timezone=tz('US/Pacific'))
assert_equal(np.datetime_as_string(a, unit='D',
timezone=tz('US/Pacific'), casting='unsafe'),
'2010-03-14')
assert_equal(np.datetime_as_string(b, unit='D',
timezone=tz('US/Central'), casting='unsafe'),
'2010-02-15')
def test_datetime_arange(self):
a = np.arange('2010-01-05', '2010-01-10', dtype='M8[D]')
assert_equal(a.dtype, np.dtype('M8[D]'))
assert_equal(a,
np.array(['2010-01-05', '2010-01-06', '2010-01-07',
'2010-01-08', '2010-01-09'], dtype='M8[D]'))
a = np.arange('1950-02-10', '1950-02-06', -1, dtype='M8[D]')
assert_equal(a.dtype, np.dtype('M8[D]'))
assert_equal(a,
np.array(['1950-02-10', '1950-02-09', '1950-02-08',
'1950-02-07'], dtype='M8[D]'))
a = np.arange('1969-05', '1970-05', 2, dtype='M8')
assert_equal(a.dtype, np.dtype('M8[M]'))
assert_equal(a,
np.datetime64('1969-05') + np.arange(12, step=2))
a = np.arange('1969', 18, 3, dtype='M8')
assert_equal(a.dtype, np.dtype('M8[Y]'))
assert_equal(a,
np.datetime64('1969') + np.arange(18, step=3))
a = np.arange('1969-12-19', 22, np.timedelta64(2), dtype='M8')
assert_equal(a.dtype, np.dtype('M8[D]'))
assert_equal(a,
np.datetime64('1969-12-19') + np.arange(22, step=2))
assert_raises(ValueError, np.arange, np.datetime64('today'),
np.datetime64('today') + 3, 0)
assert_raises(TypeError, np.arange, np.datetime64('2011-03-01', 'D'),
np.timedelta64(5, 'M'))
assert_raises(TypeError, np.arange,
np.datetime64('2012-02-03T14', 's'),
np.timedelta64(5, 'Y'))
def test_datetime_arange_no_dtype(self):
d = np.array('2010-01-04', dtype="M8[D]")
assert_equal(np.arange(d, d + 1), d)
assert_raises(ValueError, np.arange, d)
def test_timedelta_arange(self):
a = np.arange(3, 10, dtype='m8')
assert_equal(a.dtype, np.dtype('m8'))
assert_equal(a, np.timedelta64(0) + np.arange(3, 10))
a = np.arange(np.timedelta64(3, 's'), 10, 2, dtype='m8')
assert_equal(a.dtype, np.dtype('m8[s]'))
assert_equal(a, np.timedelta64(0, 's') + np.arange(3, 10, 2))
assert_raises(ValueError, np.arange, np.timedelta64(0),
np.timedelta64(5), 0)
assert_raises(TypeError, np.arange, np.timedelta64(0, 'D'),
np.timedelta64(5, 'M'))
assert_raises(TypeError, np.arange, np.timedelta64(0, 'Y'),
np.timedelta64(5, 'D'))
@pytest.mark.parametrize("val1, val2, expected", [
(np.timedelta64(7, 's'),
np.timedelta64(3, 's'),
np.timedelta64(1, 's')),
(np.timedelta64(3, 's'),
np.timedelta64(-2, 's'),
np.timedelta64(-1, 's')),
(np.timedelta64(-3, 's'),
np.timedelta64(2, 's'),
np.timedelta64(1, 's')),
(np.timedelta64(17, 's'),
np.timedelta64(22, 's'),
np.timedelta64(17, 's')),
(np.timedelta64(22, 's'),
np.timedelta64(17, 's'),
np.timedelta64(5, 's')),
(np.timedelta64(1, 'm'),
np.timedelta64(57, 's'),
np.timedelta64(3, 's')),
(np.timedelta64(1, 'us'),
np.timedelta64(727, 'ns'),
np.timedelta64(273, 'ns')),
(np.timedelta64('NaT'),
np.timedelta64(50, 'ns'),
np.timedelta64('NaT')),
(np.timedelta64(2, 'Y'),
np.timedelta64(22, 'M'),
np.timedelta64(2, 'M')),
])
def test_timedelta_modulus(self, val1, val2, expected):
assert_equal(val1 % val2, expected)
@pytest.mark.parametrize("val1, val2", [
(np.timedelta64(7, 'Y'),
np.timedelta64(3, 's')),
(np.timedelta64(7, 'M'),
np.timedelta64(1, 'D')),
])
def test_timedelta_modulus_error(self, val1, val2):
with assert_raises_regex(TypeError, "common metadata divisor"):
val1 % val2
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
def test_timedelta_modulus_div_by_zero(self):
with assert_warns(RuntimeWarning):
actual = np.timedelta64(10, 's') % np.timedelta64(0, 's')
assert_equal(actual, np.timedelta64('NaT'))
@pytest.mark.parametrize("val1, val2", [
(np.timedelta64(7, 'Y'),
15,),
(7.5,
np.timedelta64(1, 'D')),
])
def test_timedelta_modulus_type_resolution(self, val1, val2):
with assert_raises_regex(TypeError,
"'remainder' cannot use operands with types"):
val1 % val2
def test_timedelta_arange_no_dtype(self):
d = np.array(5, dtype="m8[D]")
assert_equal(np.arange(d, d + 1), d)
assert_equal(np.arange(d), np.arange(0, d))
def test_datetime_maximum_reduce(self):
a = np.array(['2010-01-02', '1999-03-14', '1833-03'], dtype='M8[D]')
assert_equal(np.maximum.reduce(a).dtype, np.dtype('M8[D]'))
assert_equal(np.maximum.reduce(a),
np.datetime64('2010-01-02'))
a = np.array([1, 4, 0, 7, 2], dtype='m8[s]')
assert_equal(np.maximum.reduce(a).dtype, np.dtype('m8[s]'))
assert_equal(np.maximum.reduce(a),
np.timedelta64(7, 's'))
def test_timedelta_correct_mean(self):
a = np.arange(1000, dtype="m8[s]")
assert_array_equal(a.mean(), a.sum() / len(a))
def test_datetime_no_subtract_reducelike(self):
arr = np.array(["2021-12-02", "2019-05-12"], dtype="M8[ms]")
msg = r"the resolved dtypes are not compatible"
with pytest.raises(TypeError, match=msg):
np.subtract.reduce(arr)
with pytest.raises(TypeError, match=msg):
np.subtract.accumulate(arr)
with pytest.raises(TypeError, match=msg):
np.subtract.reduceat(arr, [0])
def test_datetime_busdaycalendar(self):
bdd = np.busdaycalendar(
holidays=['NaT', '2011-01-17', '2011-03-06', 'NaT',
'2011-12-26', '2011-05-30', '2011-01-17'])
assert_equal(bdd.holidays,
np.array(['2011-01-17', '2011-05-30', '2011-12-26'], dtype='M8'))
assert_equal(bdd.weekmask, np.array([1, 1, 1, 1, 1, 0, 0], dtype='?'))
bdd = np.busdaycalendar(weekmask="Sun TueWed Thu\tFri")
assert_equal(bdd.weekmask, np.array([0, 1, 1, 1, 1, 0, 1], dtype='?'))
bdd = np.busdaycalendar(weekmask="0011001")
assert_equal(bdd.weekmask, np.array([0, 0, 1, 1, 0, 0, 1], dtype='?'))
bdd = np.busdaycalendar(weekmask="Mon Tue")
assert_equal(bdd.weekmask, np.array([1, 1, 0, 0, 0, 0, 0], dtype='?'))
assert_raises(ValueError, np.busdaycalendar, weekmask=[0, 0, 0, 0, 0, 0, 0])
assert_raises(ValueError, np.busdaycalendar, weekmask="satsun")
assert_raises(ValueError, np.busdaycalendar, weekmask="")
assert_raises(ValueError, np.busdaycalendar, weekmask="Mon Tue We")
assert_raises(ValueError, np.busdaycalendar, weekmask="Max")
assert_raises(ValueError, np.busdaycalendar, weekmask="Monday Tue")
def test_datetime_busday_holidays_count(self):
holidays = ['2011-01-01', '2011-10-10', '2011-11-11', '2011-11-24',
'2011-12-25', '2011-05-30', '2011-02-21', '2011-01-17',
'2011-12-26', '2012-01-02', '2011-02-21', '2011-05-30',
'2011-07-01', '2011-07-04', '2011-09-05', '2011-10-10']
bdd = np.busdaycalendar(weekmask='1111100', holidays=holidays)
dates = np.busday_offset('2011-01-01', np.arange(366),
roll='forward', busdaycal=bdd)
assert_equal(np.busday_count('2011-01-01', dates, busdaycal=bdd),
np.arange(366))
assert_equal(np.busday_count(dates, '2011-01-01', busdaycal=bdd),
-np.arange(366) - 1)
dates = np.busday_offset('2011-12-31', -np.arange(366),
roll='forward', busdaycal=bdd)
expected = np.arange(366)
expected[0] = -1
assert_equal(np.busday_count(dates, '2011-12-31', busdaycal=bdd),
expected)
expected = -np.arange(366) + 1
expected[0] = 0
assert_equal(np.busday_count('2011-12-31', dates, busdaycal=bdd),
expected)
assert_raises(ValueError, np.busday_offset, '2012-01-03', '2012-02-03',
weekmask='1111100', busdaycal=bdd)
assert_raises(ValueError, np.busday_offset, '2012-01-03', '2012-02-03',
holidays=holidays, busdaycal=bdd)
assert_equal(np.busday_count('2011-03', '2011-04', weekmask='Mon'), 4)
assert_equal(np.busday_count('2011-04', '2011-03', weekmask='Mon'), -4)
sunday = np.datetime64('2023-03-05')
monday = sunday + 1
friday = sunday + 5
saturday = sunday + 6
assert_equal(np.busday_count(sunday, monday), 0)
assert_equal(np.busday_count(monday, sunday), -1)
assert_equal(np.busday_count(friday, saturday), 1)
assert_equal(np.busday_count(saturday, friday), 0)
def test_datetime_is_busday(self):
holidays = ['2011-01-01', '2011-10-10', '2011-11-11', '2011-11-24',
'2011-12-25', '2011-05-30', '2011-02-21', '2011-01-17',
'2011-12-26', '2012-01-02', '2011-02-21', '2011-05-30',
'2011-07-01', '2011-07-04', '2011-09-05', '2011-10-10',
'NaT']
bdd = np.busdaycalendar(weekmask='1111100', holidays=holidays)
assert_equal(np.is_busday('2011-01-01'), False)
assert_equal(np.is_busday('2011-01-02'), False)
assert_equal(np.is_busday('2011-01-03'), True)
assert_equal(np.is_busday(holidays, busdaycal=bdd),
np.zeros(len(holidays), dtype='?'))
def test_datetime_y2038(self):
msg = "no explicit representation of timezones available for " \
"np.datetime64"
a = np.datetime64('2038-01-19T03:14:07')
assert_equal(a.view(np.int64), 2**31 - 1)
a = np.datetime64('2038-01-19T03:14:08')
assert_equal(a.view(np.int64), 2**31)
with pytest.warns(UserWarning, match=msg):
a = np.datetime64('2038-01-19T04:14:07+0100')
assert_equal(a.view(np.int64), 2**31 - 1)
with pytest.warns(UserWarning, match=msg):
a = np.datetime64('2038-01-19T04:14:08+0100')
assert_equal(a.view(np.int64), 2**31)
a = np.datetime64('2038-01-20T13:21:14')
assert_equal(str(a), '2038-01-20T13:21:14')
def test_isnat(self):
assert_(np.isnat(np.datetime64('NaT', 'ms')))
assert_(np.isnat(np.datetime64('NaT', 'ns')))
assert_(not np.isnat(np.datetime64('2038-01-19T03:14:07')))
assert_(np.isnat(np.timedelta64('NaT', "ms")))
assert_(not np.isnat(np.timedelta64(34, "ms")))
res = np.array([False, False, True])
for unit in ['Y', 'M', 'W', 'D',
'h', 'm', 's', 'ms', 'us',
'ns', 'ps', 'fs', 'as']:
arr = np.array([123, -321, "NaT"], dtype='<datetime64[%s]' % unit)
assert_equal(np.isnat(arr), res)
arr = np.array([123, -321, "NaT"], dtype='>datetime64[%s]' % unit)
assert_equal(np.isnat(arr), res)
arr = np.array([123, -321, "NaT"], dtype='<timedelta64[%s]' % unit)
assert_equal(np.isnat(arr), res)
arr = np.array([123, -321, "NaT"], dtype='>timedelta64[%s]' % unit)
assert_equal(np.isnat(arr), res)
def test_isnat_error(self):
for t in np.typecodes["All"]:
if t in np.typecodes["Datetime"]:
continue
assert_raises(TypeError, np.isnat, np.zeros(10, t))
def test_isfinite_scalar(self):
assert_(not np.isfinite(np.datetime64('NaT', 'ms')))
assert_(not np.isfinite(np.datetime64('NaT', 'ns')))
assert_(np.isfinite(np.datetime64('2038-01-19T03:14:07')))
assert_(not np.isfinite(np.timedelta64('NaT', "ms")))
assert_(np.isfinite(np.timedelta64(34, "ms")))
@pytest.mark.parametrize('unit', ['Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms',
'us', 'ns', 'ps', 'fs', 'as'])
@pytest.mark.parametrize('dstr', ['<datetime64[%s]', '>datetime64[%s]',
'<timedelta64[%s]', '>timedelta64[%s]'])
def test_isfinite_isinf_isnan_units(self, unit, dstr):
'''
检查 <M, >M, <m, >m 数据类型的所有单位的 isfinite, isinf, isnan
'''
arr_val = [123, -321, "NaT"]
arr = np.array(arr_val, dtype= dstr % unit)
pos = np.array([True, True, False])
neg = np.array([False, False, True])
false = np.array([False, False, False])
assert_equal(np.isfinite(arr), pos)
assert_equal(np.isinf(arr), false)
assert_equal(np.isnan(arr), neg)
def test_assert_equal(self):
assert_raises(AssertionError, assert_equal,
np.datetime64('nat'), np.timedelta64('nat'))
def test_corecursive_input(self):
a, b = [], []
a.append(b)
b.append(a)
obj_arr = np.array([None])
obj_arr[0] = a
assert_raises(ValueError, obj_arr.astype, 'M8')
assert_raises(ValueError, obj_arr.astype, 'm8')
@pytest.mark.parametrize("shape", [(), (1,)])
def test_discovery_from_object_array(self, shape):
arr = np.array("2020-10-10", dtype=object).reshape(shape)
res = np.array("2020-10-10", dtype="M8").reshape(shape)
assert res.dtype == np.dtype("M8[D]")
assert_equal(arr.astype("M8"), res)
arr[...] = np.bytes_("2020-10-10")
assert_equal(arr.astype("M8"), res)
arr = arr.astype("S")
assert_equal(arr.astype("S").astype("M8"), res)
@pytest.mark.parametrize("time_unit", [
"Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", "fs", "as",
"10D", "2M",
])
def test_limit_symmetry(self, time_unit):
"""
Dates should have symmetric limits around the unix epoch at +/-np.int64
"""
epoch = np.datetime64(0, time_unit)
latest = np.datetime64(np.iinfo(np.int64).max, time_unit)
earliest = np.datetime64(-np.iinfo(np.int64).max, time_unit)
assert earliest < epoch < latest
@pytest.mark.parametrize("time_unit", [
"Y", "M",
pytest.param("W", marks=pytest.mark.xfail(reason="gh-13197")),
"D", "h", "m",
"s", "ms", "us", "ns", "ps", "fs", "as",
pytest.param("10D", marks=pytest.mark.xfail(reason="similar to gh-13197")),
])
@pytest.mark.parametrize("sign", [-1, 1])
def test_limit_str_roundtrip(self, time_unit, sign):
"""
Limits should roundtrip when converted to strings.
This tests the conversion to and from npy_datetimestruct.
"""
limit = np.datetime64(np.iinfo(np.int64).max * sign, time_unit)
limit_via_str = np.datetime64(str(limit), time_unit)
assert limit_via_str == limit
class TestDateTimeData:
def test_basic(self):
a = np.array(['1980-03-23'], dtype=np.datetime64)
assert_equal(np.datetime_data(a.dtype), ('D', 1))
def test_bytes(self):
dt = np.datetime64('2000', (b'ms', 5))
assert np.datetime_data(dt.dtype) == ('ms', 5)
dt = np.datetime64('2000', b'5ms')
assert np.datetime_data(dt.dtype) == ('ms', 5)
def test_non_ascii(self):
dt = np.datetime64('2000', ('μs', 5))
assert np.datetime_data(dt.dtype) == ('us', 5)
dt = np.datetime64('2000', '5μs')
assert np.datetime_data(dt.dtype) == ('us', 5)
def test_comparisons_return_not_implemented():
class custom:
__array_priority__ = 10000
obj = custom()
dt = np.datetime64('2000', 'ns')
td = dt - dt
for item in [dt, td]:
assert item.__eq__(obj) is NotImplemented
assert item.__ne__(obj) is NotImplemented
assert item.__le__(obj) is NotImplemented
assert item.__lt__(obj) is NotImplemented
assert item.__ge__(obj) is NotImplemented
assert item.__gt__(obj) is NotImplemented