Commit 4deec279 authored by thatPepe's avatar thatPepe Committed by MaYuhang
Browse files

Merge pull request #631 from InfiniTensor/issue/630

issue/630 - slightly improved unimplemented messages
parents f3a25b70 2a343a3a
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import torch
import infinicore
from framework.base import BaseOperatorTest, TensorSpec, TestCase
from framework.runner import GenericTestRunner
from framework.utils import is_broadcast
# Test cases format: (shape, input_strides_or_None, test_elements_strides_or_None, extra_or_None)
# isin checks membership of each element in provided test_elements (tensor or list)
_TEST_CASES_DATA = [
((8, 8), None, None, None),
((8, 8), (16, 1), None, None),
((8, 8), None, (1,), None),
((2, 3, 4), None, None, None),
((1, 8), None, None, None),
((16, 64), (128, 1), None, None),
]
_TOLERANCE_MAP = {
infinicore.float16: {"atol": 0, "rtol": 1e-2},
infinicore.float32: {"atol": 0, "rtol": 1e-3},
infinicore.int32: {"atol": 0, "rtol": 0},
}
_TENSOR_DTYPES = [infinicore.int32, infinicore.float32, infinicore.float16]
def parse_test_cases():
test_cases = []
for data in _TEST_CASES_DATA:
shape, in_strides, elements_strides, _ = data[0], data[1], data[2], data[3]
input_supports_inplace = not is_broadcast(in_strides)
for dtype in _TENSOR_DTYPES:
tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 1e-3})
input_spec = TensorSpec.from_tensor(shape, in_strides, dtype)
# Build "test elements" as a tensor of small set of values (same dtype)
elements_spec = TensorSpec.from_tensor(
(4,), elements_strides if elements_strides else None, dtype
)
# Out-of-place
test_cases.append(
TestCase(
inputs=[input_spec, elements_spec],
kwargs={},
output_spec=None,
comparison_target=None,
tolerance=tol,
description="IsIn - OUT_OF_PLACE",
)
)
# explicit out
out_spec = TensorSpec.from_tensor(shape, None, infinicore.bool)
test_cases.append(
TestCase(
inputs=[input_spec, elements_spec],
kwargs={},
output_spec=out_spec,
comparison_target="out",
tolerance=tol,
description="IsIn - INPLACE(out)",
)
)
return test_cases
class OpTest(BaseOperatorTest):
"""IsIn operator test with simplified implementation"""
def __init__(self):
super().__init__("IsIn")
def get_test_cases(self):
return parse_test_cases()
def torch_operator(self, *args, **kwargs):
return torch.isin(*args, **kwargs)
# def infinicore_operator(self, *args, **kwargs):
# """InfiniCore implementation (operator not yet available)."""
# return infinicore.isin(*args, **kwargs)
def main():
"""Main entry point"""
runner = GenericTestRunner(OpTest)
runner.run_and_exit()
if __name__ == "__main__":
main()
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import torch
import infinicore
from framework.base import BaseOperatorTest, TensorSpec, TestCase
from framework.runner import GenericTestRunner
from framework.utils import is_broadcast
# Test cases format: (shape, input_strides_or_None, out_strides_or_None)
# isneginf checks for -inf values
_TEST_CASES_DATA = [
((8, 8), None, None),
((8, 8), (16, 1), None),
((8, 8), None, (0, 1)),
((2, 3, 4), None, None),
((1, 8), None, None),
((16, 128), (256, 1), (256, 1)),
]
_TOLERANCE_MAP = {
infinicore.float16: {"atol": 0, "rtol": 1e-2},
infinicore.float32: {"atol": 0, "rtol": 1e-3},
infinicore.bfloat16: {"atol": 0, "rtol": 5e-2},
}
_TENSOR_DTYPES = [infinicore.float16, infinicore.float32, infinicore.bfloat16]
def parse_test_cases():
test_cases = []
for data in _TEST_CASES_DATA:
shape, in_strides, out_strides = data[0], data[1], data[2]
input_supports_inplace = not is_broadcast(in_strides)
out_supports_inplace = not is_broadcast(out_strides)
for dtype in _TENSOR_DTYPES:
tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 1e-3})
input_spec = TensorSpec.from_tensor(shape, in_strides, dtype)
out_spec = TensorSpec.from_tensor(shape, out_strides, infinicore.bool)
test_cases.append(
TestCase(
inputs=[input_spec],
kwargs={},
output_spec=None,
comparison_target=None,
tolerance=tol,
description="IsNegInf - OUT_OF_PLACE",
)
)
if out_supports_inplace:
test_cases.append(
TestCase(
inputs=[input_spec],
kwargs=None,
output_spec=out_spec,
comparison_target="out",
tolerance=tol,
description="IsNegInf - INPLACE(out)",
)
)
return test_cases
class OpTest(BaseOperatorTest):
"""IsNegInf operator test with simplified implementation"""
def __init__(self):
super().__init__("IsNegInf")
def get_test_cases(self):
return parse_test_cases()
def torch_operator(self, *args, **kwargs):
return torch.isneginf(*args, **kwargs)
# def infinicore_operator(self, *args, **kwargs):
# """InfiniCore implementation (operator not yet available)."""
# return infinicore.isneginf(*args, **kwargs)
def main():
"""Main entry point"""
runner = GenericTestRunner(OpTest)
runner.run_and_exit()
if __name__ == "__main__":
main()
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import torch
import infinicore
from framework.base import BaseOperatorTest, TensorSpec, TestCase
from framework.runner import GenericTestRunner
from framework.utils import is_broadcast
# Test cases format: (shape, input_strides_or_None, out_strides_or_None)
# isposinf checks for +inf values
_TEST_CASES_DATA = [
((8, 8), None, None),
((8, 8), (16, 1), None),
((8, 8), None, (0, 1)),
((2, 3, 4), None, None),
((1, 8), None, None),
((16, 128), (256, 1), (256, 1)),
]
_TOLERANCE_MAP = {
infinicore.float16: {"atol": 0, "rtol": 1e-2},
infinicore.float32: {"atol": 0, "rtol": 1e-3},
infinicore.bfloat16: {"atol": 0, "rtol": 5e-2},
}
_TENSOR_DTYPES = [infinicore.float16, infinicore.float32, infinicore.bfloat16]
def parse_test_cases():
test_cases = []
for data in _TEST_CASES_DATA:
shape, in_strides, out_strides = data[0], data[1], data[2]
input_supports_inplace = not is_broadcast(in_strides)
out_supports_inplace = not is_broadcast(out_strides)
for dtype in _TENSOR_DTYPES:
tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 1e-3})
input_spec = TensorSpec.from_tensor(shape, in_strides, dtype)
out_spec = TensorSpec.from_tensor(shape, out_strides, infinicore.bool)
test_cases.append(
TestCase(
inputs=[input_spec],
kwargs={},
output_spec=None,
comparison_target=None,
tolerance=tol,
description="IsPosInf - OUT_OF_PLACE",
)
)
if out_supports_inplace:
test_cases.append(
TestCase(
inputs=[input_spec],
kwargs=None,
output_spec=out_spec,
comparison_target="out",
tolerance=tol,
description="IsPosInf - INPLACE(out)",
)
)
return test_cases
class OpTest(BaseOperatorTest):
"""IsPosInf operator test with simplified implementation"""
def __init__(self):
super().__init__("IsPosInf")
def get_test_cases(self):
return parse_test_cases()
def torch_operator(self, *args, **kwargs):
return torch.isposinf(*args, **kwargs)
# def infinicore_operator(self, *args, **kwargs):
# """InfiniCore implementation (operator not yet available)."""
# return infinicore.isposinf(*args, **kwargs)
def main():
"""Main entry point"""
runner = GenericTestRunner(OpTest)
runner.run_and_exit()
if __name__ == "__main__":
main()
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import torch
import infinicore
from framework.base import BaseOperatorTest, TensorSpec, TestCase
from framework.runner import GenericTestRunner
# Test cases format: (input_shape, input_strides_or_None, reduction_or_None, log_target_bool_or_None)
# infinicore.nn.functional.kl_div(input, target, reduction='mean', log_target=False)
_TEST_CASES_DATA = [
((4, 5), None, "batchmean", None),
((8, 8), (512, 64), "sum", False),
((1, 10), None, "batchmean", True),
((16, 100), None, "batchmean", False),
((3, 7), None, "batchmean", None),
((2, 2), None, "sum", None),
]
_TOLERANCE_MAP = {
infinicore.float16: {"atol": 1e-2, "rtol": 1e-1},
infinicore.float32: {"atol": 1e-5, "rtol": 1e-4},
infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2},
}
_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32]
def parse_test_cases():
test_cases = []
for shape, strides, reduction, log_target in _TEST_CASES_DATA:
for dtype in _TENSOR_DTYPES:
tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4})
a = TensorSpec.from_tensor(shape, strides, dtype)
b = TensorSpec.from_tensor(shape, None, dtype)
kwargs = {}
if reduction is not None:
kwargs["reduction"] = reduction
if log_target is not None:
kwargs["log_target"] = log_target
test_cases.append(
TestCase(
inputs=[a, b],
kwargs=kwargs,
output_spec=None,
comparison_target=None,
tolerance=tol,
description="kl_div - OUT_OF_PLACE",
)
)
return test_cases
class OpTest(BaseOperatorTest):
"""kl_div operator test with simplified implementation"""
def __init__(self):
super().__init__("kl_div")
def get_test_cases(self):
return parse_test_cases()
def torch_operator(self, *args, **kwargs):
return torch.nn.functional.kl_div(*args, **kwargs)
# def infinicore_operator(self, *args, **kwargs):
# """InfiniCore implementation (operator not yet available)."""
# return infinicore.nn.functional.kl_div(*args, **kwargs)
def main():
"""Main entry point"""
runner = GenericTestRunner(OpTest)
runner.run_and_exit()
if __name__ == "__main__":
main()
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import torch
import infinicore
from framework.base import BaseOperatorTest, TensorSpec, TestCase
from framework.runner import GenericTestRunner
# Test cases format: (a_shape, b_shape, a_strides_or_None, b_strides_or_None)
# infinicore.kron(a, b)
_TEST_CASES_DATA = [
((2, 3), (4, 1), None, None),
((1,), (3,), None, None),
((4, 4), (2, 2), (64, 16), (8, 1)),
((6,), (6,), None, None),
((3, 2), (2, 3), None, (12, 1)),
((8, 1), (1, 8), None, None),
]
_TOLERANCE_MAP = {
infinicore.float16: {"atol": 1e-3, "rtol": 1e-2},
infinicore.float32: {"atol": 1e-5, "rtol": 1e-4},
infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2},
}
_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32]
def parse_test_cases():
test_cases = []
for a_shape, b_shape, a_strides, b_strides in _TEST_CASES_DATA:
for dtype in _TENSOR_DTYPES:
tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4})
a = TensorSpec.from_tensor(a_shape, a_strides, dtype)
b = TensorSpec.from_tensor(b_shape, b_strides, dtype)
test_cases.append(
TestCase(
inputs=[a, b],
kwargs={},
output_spec=None,
comparison_target=None,
tolerance=tol,
description="kron - OUT_OF_PLACE",
)
)
return test_cases
class OpTest(BaseOperatorTest):
"""kron operator test with simplified implementation"""
def __init__(self):
super().__init__("kron")
def get_test_cases(self):
return parse_test_cases()
def torch_operator(self, *args, **kwargs):
return torch.kron(*args, **kwargs)
# def infinicore_operator(self, *args, **kwargs):
# """InfiniCore implementation (operator not yet available)."""
# return infinicore.kron(*args, **kwargs)
def main():
"""Main entry point"""
runner = GenericTestRunner(OpTest)
runner.run_and_exit()
if __name__ == "__main__":
main()
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import torch
import infinicore
from framework.base import BaseOperatorTest, TensorSpec, TestCase
from framework.runner import GenericTestRunner
from framework.utils import is_broadcast
# Test cases format: (shape, input_strides, k, dim, keepdim)
_TEST_CASES_DATA = [
((6, 8), None, 1, 1, False),
((8, 4), (16, 1), 2, 0, True),
((5, 5), None, 3, -1, False),
((3, 7), (14, 1), 2, 1, True),
((10, 3), None, 1, 1, False),
((2, 16), (32, 1), 5, 1, False),
]
_TOLERANCE_MAP = {
infinicore.float16: {"atol": 1e-3, "rtol": 1e-2},
infinicore.float32: {"atol": 1e-5, "rtol": 1e-4},
infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2},
}
_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32]
def parse_test_cases():
test_cases = []
for data in _TEST_CASES_DATA:
shape, in_strides, k, dim, keepdim = data
out_supports_inplace = not is_broadcast(in_strides)
for dtype in _TENSOR_DTYPES:
tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4})
input_spec = TensorSpec.from_tensor(shape, in_strides, dtype)
# kthvalue returns (values, indices). We'll request out-of-place and explicit out for values.
values_spec = TensorSpec.from_tensor(shape, None, dtype)
indices_spec = TensorSpec.from_tensor(shape, None, infinicore.int64)
kwargs = {"k": k, "dim": dim, "keepdim": keepdim}
# Out-of-place
test_cases.append(
TestCase(
inputs=[input_spec],
kwargs=kwargs,
output_spec=None,
comparison_target=None,
tolerance=tol,
description=f"kthvalue - OUT_OF_PLACE",
)
)
return test_cases
class OpTest(BaseOperatorTest):
"""KthValue operator test with simplified implementation"""
def __init__(self):
super().__init__("KthValue")
def get_test_cases(self):
return parse_test_cases()
def torch_operator(self, *args, **kwargs):
return torch.kthvalue(*args, **kwargs)
# def infinicore_operator(self, *args, **kwargs):
# """InfiniCore implementation (operator not yet available)."""
# return infinicore.kthvalue(*args, **kwargs)
def main():
"""Main entry point"""
runner = GenericTestRunner(OpTest)
runner.run_and_exit()
if __name__ == "__main__":
main()
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import torch
import infinicore
from framework.base import BaseOperatorTest, TensorSpec, TestCase
from framework.runner import GenericTestRunner
# Test cases format: (input_shape, target_shape, input_strides_or_None, reduction_or_None)
# infinicore.nn.functional.l1_loss(input, target, reduction='mean')
_TEST_CASES_DATA = [
((4, 5), (4, 5), None, None),
((8, 8), (8, 8), (512, 64), "sum"),
((1, 10), (1, 10), None, "mean"),
((16, 100), (16, 100), None, None),
((3, 7), (3, 7), None, "none"),
((2, 2), (2, 2), None, None),
]
_TOLERANCE_MAP = {
infinicore.float16: {"atol": 1e-3, "rtol": 1e-2},
infinicore.float32: {"atol": 1e-5, "rtol": 1e-4},
infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2},
}
_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32]
def parse_test_cases():
test_cases = []
for shape, tgt_shape, strides, reduction in _TEST_CASES_DATA:
for dtype in _TENSOR_DTYPES:
tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4})
inp = TensorSpec.from_tensor(shape, strides, dtype)
tgt = TensorSpec.from_tensor(tgt_shape, None, dtype)
kwargs = {}
if reduction is not None:
kwargs["reduction"] = reduction
test_cases.append(
TestCase(
inputs=[inp, tgt],
kwargs=kwargs,
output_spec=None,
comparison_target=None,
tolerance=tol,
description="l1_loss - OUT_OF_PLACE",
)
)
return test_cases
class OpTest(BaseOperatorTest):
"""l1_loss operator test with simplified implementation"""
def __init__(self):
super().__init__("l1_loss")
def get_test_cases(self):
return parse_test_cases()
def torch_operator(self, *args, **kwargs):
return torch.nn.functional.l1_loss(*args, **kwargs)
# def infinicore_operator(self, *args, **kwargs):
# """InfiniCore implementation (operator not yet available)."""
# return infinicore.nn.functional.l1_loss(*args, **kwargs)
def main():
"""Main entry point"""
runner = GenericTestRunner(OpTest)
runner.run_and_exit()
if __name__ == "__main__":
main()
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import torch
import infinicore
from framework.base import BaseOperatorTest, TensorSpec, TestCase
from framework.runner import GenericTestRunner
# ldexp(input, other) computes input * (2**other)
_TEST_CASES_DATA = [
((2, 3), (3,)),
((1, 4, 8), None),
((3, 2, 5, 7), (1,)),
((2, 1, 16), None),
((1, 8, 9, 11), None),
((2, 6, 10), (1,)),
]
_TOLERANCE_MAP = {
infinicore.float16: {"atol": 1e-3, "rtol": 1e-2},
infinicore.float32: {"atol": 1e-5, "rtol": 1e-4},
}
_TENSOR_DTYPES = [infinicore.float16, infinicore.float32]
def parse_test_cases():
cases = []
for shape, other_shape in _TEST_CASES_DATA:
for dtype in _TENSOR_DTYPES:
tol = _TOLERANCE_MAP[dtype]
in_spec = TensorSpec.from_tensor(shape, None, dtype)
if other_shape is None:
other_spec = TensorSpec.from_tensor((1,), None, infinicore.int32)
else:
other_spec = TensorSpec.from_tensor(other_shape, None, infinicore.int32)
# out-of-place
cases.append(
TestCase(
inputs=[in_spec, other_spec],
kwargs={},
output_spec=None,
comparison_target=None,
tolerance=tol,
description="ldexp_out",
)
)
# explicit out
out_spec = TensorSpec.from_tensor(shape, None, dtype)
cases.append(
TestCase(
inputs=[in_spec, other_spec],
kwargs={},
output_spec=out_spec,
comparison_target="out",
tolerance=tol,
description="ldexp_explicit_out",
)
)
# in-place
cases.append(
TestCase(
inputs=[in_spec, other_spec],
kwargs={},
output_spec=None,
comparison_target=0,
tolerance=tol,
description="ldexp_inplace",
)
)
return cases
class OpTest(BaseOperatorTest):
"""Ldexp operator test with simplified implementation"""
def __init__(self):
super().__init__("Ldexp")
def get_test_cases(self):
return parse_test_cases()
def torch_operator(self, *args, **kwargs):
return torch.ldexp(*args, **kwargs)
# def infinicore_operator(self, *args, **kwargs):
# """InfiniCore implementation (operator not yet available)."""
# return infinicore.ldexp(*args, **kwargs)
def main():
"""Main entry point"""
runner = GenericTestRunner(OpTest)
runner.run_and_exit()
if __name__ == "__main__":
main()
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import torch
import infinicore
from framework.base import BaseOperatorTest, TensorSpec, TestCase
from framework.runner import GenericTestRunner
from framework.utils import is_broadcast
# Test cases format: (in_shape, in_strides_or_None, negative_slope_or_None)
_TEST_CASES_DATA = [
((13, 4), None, None),
((13, 4), (10, 1), 0.01),
((8, 8, 8), None, 0.2),
]
_TOLERANCE_MAP = {
infinicore.float16: {"atol": 1e-3, "rtol": 1e-2},
infinicore.float32: {"atol": 1e-5, "rtol": 1e-4},
infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2},
}
_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32]
def parse_test_cases():
"""
leaky_relu(input, negative_slope=0.01, inplace=False)
"""
test_cases = []
for data in _TEST_CASES_DATA:
shape = data[0]
in_strides = data[1] if len(data) > 1 else None
slope = data[2] if len(data) > 2 else 0.01
input_supports_inplace = not is_broadcast(in_strides)
for dtype in _TENSOR_DTYPES:
tolerance = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4})
input_spec = TensorSpec.from_tensor(shape, in_strides, dtype)
kwargs = {}
if slope is not None:
kwargs["negative_slope"] = slope
test_cases.append(
TestCase(
inputs=[input_spec],
kwargs=kwargs,
output_spec=None,
comparison_target=None,
tolerance=tolerance,
description=f"LeakyReLU - OUT_OF_PLACE",
)
)
if input_supports_inplace:
inplace_kwargs = {"inplace": True}
if slope is not None:
inplace_kwargs["negative_slope"] = slope
test_cases.append(
TestCase(
inputs=[input_spec],
kwargs=inplace_kwargs,
output_spec=None,
comparison_target=0,
tolerance=tolerance,
description=f"LeakyReLU - INPLACE",
)
)
return test_cases
class OpTest(BaseOperatorTest):
"""LeakyReLU operator test with simplified implementation"""
def __init__(self):
super().__init__("LeakyReLU")
def get_test_cases(self):
return parse_test_cases()
def torch_operator(self, *args, **kwargs):
return torch.nn.functional.leaky_relu(*args, **kwargs)
# def infinicore_operator(self, *args, **kwargs):
# """InfiniCore implementation (operator not yet available)."""
# return infinicore.nn.functional.leaky_relu(*args, **kwargs)
def main():
"""Main entry point"""
runner = GenericTestRunner(OpTest)
runner.run_and_exit()
if __name__ == "__main__":
main()
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import torch
import infinicore
from framework.base import BaseOperatorTest, TensorSpec, TestCase
from framework.runner import GenericTestRunner
from framework.utils import is_broadcast
# Test cases format: (start_shape, start_strides_or_None, end_shape_or_None, weight_scalar_or_None, weight_tensor_shape_or_None)
# infinicore.lerp(start, end, weight)
_TEST_CASES_DATA = [
((2, 3, 4), None, None, 0.5, None),
((1, 4, 8), (32, 8, 1), None, None, (1, 4, 8)),
((3, 2, 5, 7), None, None, 0.25, None),
((2, 1, 16), None, None, None, (2, 1, 16)),
((1, 8, 9, 11), (792, 99, 11, 1), None, 0.75, None),
((2, 6, 10), None, None, None, (2, 6, 10)),
]
_TOLERANCE_MAP = {
infinicore.float16: {"atol": 1e-3, "rtol": 1e-2},
infinicore.float32: {"atol": 1e-5, "rtol": 1e-4},
}
_TENSOR_DTYPES = [infinicore.float16, infinicore.float32]
def parse_test_cases():
cases = []
for (
start_shape,
start_strides,
end_shape,
weight_scalar,
weight_tensor_shape,
) in _TEST_CASES_DATA:
for dtype in _TENSOR_DTYPES:
tol = _TOLERANCE_MAP[dtype]
start_spec = TensorSpec.from_tensor(start_shape, start_strides, dtype)
end_spec = TensorSpec.from_tensor(
start_shape if end_shape is None else end_shape, None, dtype
)
if weight_scalar is not None:
weight = weight_scalar
cases.append(
TestCase(
inputs=[start_spec, end_spec, weight],
kwargs={},
output_spec=None,
comparison_target=None,
tolerance=tol,
description="lerp_scalar_weight_out",
)
)
out_spec = TensorSpec.from_tensor(start_shape, None, dtype)
cases.append(
TestCase(
inputs=[start_spec, end_spec, weight],
kwargs={},
output_spec=out_spec,
comparison_target="out",
tolerance=tol,
description="lerp_scalar_weight_explicit_out",
)
)
if not is_broadcast(start_spec.strides):
cases.append(
TestCase(
inputs=[start_spec, end_spec, weight],
kwargs={"out": 0},
output_spec=None,
comparison_target=0,
tolerance=tol,
description="lerp_scalar_inplace_start",
)
)
if not is_broadcast(end_spec.strides):
cases.append(
TestCase(
inputs=[start_spec, end_spec, weight],
kwargs={"out": 1},
output_spec=None,
comparison_target=1,
tolerance=tol,
description="lerp_scalar_inplace_end",
)
)
if weight_tensor_shape is not None:
weight_spec = TensorSpec.from_tensor(weight_tensor_shape, None, dtype)
cases.append(
TestCase(
inputs=[start_spec, end_spec, weight_spec],
kwargs={},
output_spec=None,
comparison_target=None,
tolerance=tol,
description="lerp_tensor_weight_out",
)
)
out_spec = TensorSpec.from_tensor(start_shape, None, dtype)
cases.append(
TestCase(
inputs=[start_spec, end_spec, weight_spec],
kwargs={},
output_spec=out_spec,
comparison_target="out",
tolerance=tol,
description="lerp_tensor_weight_explicit_out",
)
)
if not is_broadcast(weight_spec.strides):
cases.append(
TestCase(
inputs=[start_spec, end_spec, weight_spec],
kwargs={"out": 2},
output_spec=None,
comparison_target=2,
tolerance=tol,
description="lerp_inplace_weight",
)
)
return cases
class OpTest(BaseOperatorTest):
"""Lerp operator test with simplified implementation"""
def __init__(self):
super().__init__("Lerp")
def get_test_cases(self):
return parse_test_cases()
def torch_operator(self, *args, **kwargs):
return torch.lerp(*args, **kwargs)
# def infinicore_operator(self, *args, **kwargs):
# """InfiniCore implementation (operator not yet available)."""
# return infinicore.lerp(*args, **kwargs)
def main():
"""Main entry point"""
runner = GenericTestRunner(OpTest)
runner.run_and_exit()
if __name__ == "__main__":
main()
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import torch
import infinicore
from framework.base import BaseOperatorTest, TensorSpec, TestCase
from framework.runner import GenericTestRunner
# Test cases format: (input_shape, input_strides_or_None, size, alpha_or_None, beta_or_None, k_or_None)
# infinicore.nn.functional.local_response_norm(input, size, alpha=1e-4, beta=0.75, k=1.0)
_TEST_CASES_DATA = [
((4, 3, 8, 8), None, 5, None, None, None),
((2, 6, 4, 4), (384, 96, 1, 1), 3, 1e-4, 0.75, 1.0),
((1, 3, 16, 16), None, 7, None, None, None),
((8, 5, 2, 2), None, 1, 1e-3, 0.5, 0.0),
((6, 4, 7, 7), None, 9, None, None, None),
((3, 2, 9, 9), None, 4, 1e-5, 0.9, 2.0),
]
_TOLERANCE_MAP = {
infinicore.float32: {"atol": 1e-5, "rtol": 1e-4},
}
_TENSOR_DTYPES = [infinicore.float32]
def parse_test_cases():
test_cases = []
for shape, strides, size, alpha, beta, k in _TEST_CASES_DATA:
for dtype in _TENSOR_DTYPES:
tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4})
inp = TensorSpec.from_tensor(shape, strides, dtype)
kwargs = {"size": size}
if alpha is not None:
kwargs["alpha"] = alpha
if beta is not None:
kwargs["beta"] = beta
if k is not None:
kwargs["k"] = k
test_cases.append(
TestCase(
inputs=[inp],
kwargs=kwargs,
output_spec=None,
comparison_target=None,
tolerance=tol,
description="local_response_norm - OUT_OF_PLACE",
)
)
return test_cases
class OpTest(BaseOperatorTest):
"""local_response_norm operator test with simplified implementation"""
def __init__(self):
super().__init__("local_response_norm")
def get_test_cases(self):
return parse_test_cases()
def torch_operator(self, *args, **kwargs):
return torch.nn.functional.local_response_norm(*args, **kwargs)
# def infinicore_operator(self, *args, **kwargs):
# """InfiniCore implementation (operator not yet available)."""
# return infinicore.nn.functional.local_response_norm(*args, **kwargs)
def main():
"""Main entry point"""
runner = GenericTestRunner(OpTest)
runner.run_and_exit()
if __name__ == "__main__":
main()
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import torch
import infinicore
from framework.base import BaseOperatorTest, TensorSpec, TestCase
from framework.runner import GenericTestRunner
from framework.utils import is_broadcast
# Test cases format: (in_shape, in_strides_or_None)
# infinicore.log(input)
_TEST_CASES_DATA = [
((2, 3), None),
((1, 4, 8), (32, 8, 1)),
((3, 2, 5, 7), None),
((2, 1, 16), None),
((1, 8, 9, 11), (792, 99, 11, 1)),
((2, 6, 10), None),
]
_TOLERANCE_MAP = {
infinicore.float16: {"atol": 1e-3, "rtol": 1e-2},
infinicore.float32: {"atol": 1e-5, "rtol": 1e-4},
}
_TENSOR_DTYPES = [infinicore.float16, infinicore.float32]
def parse_test_cases():
cases = []
for shape, strides in _TEST_CASES_DATA:
for dtype in _TENSOR_DTYPES:
tol = _TOLERANCE_MAP[dtype]
in_spec = TensorSpec.from_tensor(shape, strides, dtype)
cases.append(
TestCase(
inputs=[in_spec],
kwargs={},
output_spec=None,
comparison_target=None,
tolerance=tol,
description="log_out",
)
)
out_spec = TensorSpec.from_tensor(shape, None, dtype)
cases.append(
TestCase(
inputs=[in_spec],
kwargs={},
output_spec=out_spec,
comparison_target="out",
tolerance=tol,
description="log_out_explicit",
)
)
if not is_broadcast(in_spec.strides):
cases.append(
TestCase(
inputs=[in_spec],
kwargs={"out": 0},
output_spec=None,
comparison_target=0,
tolerance=tol,
description="log_inplace",
)
)
return cases
class OpTest(BaseOperatorTest):
"""Log operator test with simplified implementation"""
def __init__(self):
super().__init__("Log")
def get_test_cases(self):
return parse_test_cases()
def torch_operator(self, *args, **kwargs):
return torch.log(*args, **kwargs)
# def infinicore_operator(self, *args, **kwargs):
# """InfiniCore implementation (operator not yet available)."""
# return infinicore.log(*args, **kwargs)
def main():
"""Main entry point"""
runner = GenericTestRunner(OpTest)
runner.run_and_exit()
if __name__ == "__main__":
main()
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import torch
import infinicore
from framework.base import BaseOperatorTest, TensorSpec, TestCase
from framework.runner import GenericTestRunner
from framework.utils import is_broadcast
# Test cases format: (in_shape, in_strides_or_None)
# infinicore.log10(input)
_TEST_CASES_DATA = [
((2, 3), None),
((1, 4, 8), (32, 8, 1)),
((3, 2, 5, 7), None),
((2, 1, 16), None),
((1, 8, 9, 11), (792, 99, 11, 1)),
((2, 6, 10), None),
]
_TOLERANCE_MAP = {
infinicore.float16: {"atol": 1e-3, "rtol": 1e-2},
infinicore.float32: {"atol": 1e-5, "rtol": 1e-4},
}
_TENSOR_DTYPES = [infinicore.float16, infinicore.float32]
def parse_test_cases():
cases = []
for shape, strides in _TEST_CASES_DATA:
for dtype in _TENSOR_DTYPES:
tol = _TOLERANCE_MAP[dtype]
in_spec = TensorSpec.from_tensor(shape, strides, dtype)
cases.append(
TestCase(
inputs=[in_spec],
kwargs={},
output_spec=None,
comparison_target=None,
tolerance=tol,
description="log10_out",
)
)
out_spec = TensorSpec.from_tensor(shape, None, dtype)
cases.append(
TestCase(
inputs=[in_spec],
kwargs={},
output_spec=out_spec,
comparison_target="out",
tolerance=tol,
description="log10_out_explicit",
)
)
if not is_broadcast(in_spec.strides):
cases.append(
TestCase(
inputs=[in_spec],
kwargs={"out": 0},
output_spec=None,
comparison_target=0,
tolerance=tol,
description="log10_inplace",
)
)
return cases
class OpTest(BaseOperatorTest):
"""Log10 operator test with simplified implementation"""
def __init__(self):
super().__init__("Log10")
def get_test_cases(self):
return parse_test_cases()
def torch_operator(self, *args, **kwargs):
return torch.log10(*args, **kwargs)
# def infinicore_operator(self, *args, **kwargs):
# """InfiniCore implementation (operator not yet available)."""
# return infinicore.log10(*args, **kwargs)
def main():
"""Main entry point"""
runner = GenericTestRunner(OpTest)
runner.run_and_exit()
if __name__ == "__main__":
main()
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import torch
import infinicore
from framework.base import BaseOperatorTest, TensorSpec, TestCase
from framework.runner import GenericTestRunner
from framework.utils import is_broadcast
# Test cases format: (in_shape, in_strides_or_None)
# infinicore.log1p(input)
_TEST_CASES_DATA = [
((2, 3), None),
((1, 4, 8), (32, 8, 1)),
((3, 2, 5, 7), None),
((2, 1, 16), None),
((1, 8, 9, 11), (792, 99, 11, 1)),
((2, 6, 10), None),
]
_TOLERANCE_MAP = {
infinicore.float16: {"atol": 1e-3, "rtol": 1e-2},
infinicore.float32: {"atol": 1e-5, "rtol": 1e-4},
}
_TENSOR_DTYPES = [infinicore.float16, infinicore.float32]
def parse_test_cases():
cases = []
for shape, strides in _TEST_CASES_DATA:
for dtype in _TENSOR_DTYPES:
tol = _TOLERANCE_MAP[dtype]
in_spec = TensorSpec.from_tensor(shape, strides, dtype)
cases.append(
TestCase(
inputs=[in_spec],
kwargs={},
output_spec=None,
comparison_target=None,
tolerance=tol,
description="log1p_out",
)
)
out_spec = TensorSpec.from_tensor(shape, None, dtype)
cases.append(
TestCase(
inputs=[in_spec],
kwargs={},
output_spec=out_spec,
comparison_target="out",
tolerance=tol,
description="log1p_out_explicit",
)
)
if not is_broadcast(in_spec.strides):
cases.append(
TestCase(
inputs=[in_spec],
kwargs={"out": 0},
output_spec=None,
comparison_target=0,
tolerance=tol,
description="log1p_inplace",
)
)
return cases
class OpTest(BaseOperatorTest):
"""Log1p operator test with simplified implementation"""
def __init__(self):
super().__init__("Log1p")
def get_test_cases(self):
return parse_test_cases()
def torch_operator(self, *args, **kwargs):
return torch.log1p(*args, **kwargs)
# def infinicore_operator(self, *args, **kwargs):
# """InfiniCore implementation (operator not yet available)."""
# return infinicore.log1p(*args, **kwargs)
def main():
"""Main entry point"""
runner = GenericTestRunner(OpTest)
runner.run_and_exit()
if __name__ == "__main__":
main()
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import torch
import infinicore
from framework.base import BaseOperatorTest, TensorSpec, TestCase
from framework.runner import GenericTestRunner
from framework.utils import is_broadcast
# Test cases format: (in_shape, in_strides_or_None)
# infinicore.log2(input)
_TEST_CASES_DATA = [
((2, 3), None),
((1, 4, 8), (32, 8, 1)),
((3, 2, 5, 7), None),
((2, 1, 16), None),
((1, 8, 9, 11), (792, 99, 11, 1)),
((2, 6, 10), None),
]
_TOLERANCE_MAP = {
infinicore.float16: {"atol": 1e-3, "rtol": 1e-2},
infinicore.float32: {"atol": 1e-5, "rtol": 1e-4},
}
_TENSOR_DTYPES = [infinicore.float16, infinicore.float32]
def parse_test_cases():
cases = []
for shape, strides in _TEST_CASES_DATA:
for dtype in _TENSOR_DTYPES:
tol = _TOLERANCE_MAP[dtype]
in_spec = TensorSpec.from_tensor(shape, strides, dtype)
cases.append(
TestCase(
inputs=[in_spec],
kwargs={},
output_spec=None,
comparison_target=None,
tolerance=tol,
description="log2_out",
)
)
out_spec = TensorSpec.from_tensor(shape, None, dtype)
cases.append(
TestCase(
inputs=[in_spec],
kwargs={},
output_spec=out_spec,
comparison_target="out",
tolerance=tol,
description="log2_out_explicit",
)
)
if not is_broadcast(in_spec.strides):
cases.append(
TestCase(
inputs=[in_spec],
kwargs={"out": 0},
output_spec=None,
comparison_target=0,
tolerance=tol,
description="log2_inplace",
)
)
return cases
class OpTest(BaseOperatorTest):
"""Log2 operator test with simplified implementation"""
def __init__(self):
super().__init__("Log2")
def get_test_cases(self):
return parse_test_cases()
def torch_operator(self, *args, **kwargs):
return torch.log2(*args, **kwargs)
# def infinicore_operator(self, *args, **kwargs):
# """InfiniCore implementation (operator not yet available)."""
# return infinicore.log2(*args, **kwargs)
def main():
"""Main entry point"""
runner = GenericTestRunner(OpTest)
runner.run_and_exit()
if __name__ == "__main__":
main()
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import torch
import infinicore
from framework.base import BaseOperatorTest, TensorSpec, TestCase
from framework.runner import GenericTestRunner
from framework.utils import is_broadcast
# Test cases format: (in_shape, in_strides_or_None, dim_or_None)
_TEST_CASES_DATA = [
((4, 10), None, -1),
((2, 5, 8), (40, 8, 1), 1),
((8, 20), None, 1),
]
_TOLERANCE_MAP = {
infinicore.float16: {"atol": 1e-3, "rtol": 1e-2},
infinicore.float32: {"atol": 1e-5, "rtol": 1e-4},
infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2},
}
_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32]
def parse_test_cases():
"""log_softmax(input, dim=None, dtype=None)"""
test_cases = []
for data in _TEST_CASES_DATA:
shape = data[0]
in_strides = data[1] if len(data) > 1 else None
dim = data[2] if len(data) > 2 else -1
for dtype in _TENSOR_DTYPES:
tolerance = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4})
input_spec = TensorSpec.from_tensor(shape, in_strides, dtype)
kwargs = {"dim": dim}
test_cases.append(
TestCase(
inputs=[input_spec],
kwargs=kwargs,
output_spec=None,
comparison_target=None,
tolerance=tolerance,
description=f"LogSoftmax - OUT_OF_PLACE",
)
)
return test_cases
class OpTest(BaseOperatorTest):
"""LogSoftmax operator test with simplified implementation"""
def __init__(self):
super().__init__("LogSoftmax")
def get_test_cases(self):
return parse_test_cases()
def torch_operator(self, *args, **kwargs):
return torch.nn.functional.log_softmax(*args, **kwargs)
# def infinicore_operator(self, *args, **kwargs):
# """InfiniCore implementation (operator not yet available)."""
# return infinicore.nn.functional.log_softmax(*args, **kwargs)
def main():
"""Main entry point"""
runner = GenericTestRunner(OpTest)
runner.run_and_exit()
if __name__ == "__main__":
main()
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import torch
import infinicore
from framework.base import BaseOperatorTest, TensorSpec, TestCase
from framework.runner import GenericTestRunner
from framework.utils import is_broadcast
# Test cases format: (a_shape, a_strides_or_None, b_shape_or_None)
# infinicore.logaddexp(a, b)
_TEST_CASES_DATA = [
((2, 3, 4), None, None),
((1, 4, 8), (32, 8, 1), None),
((3, 2, 5, 7), None, None),
((2, 1, 16), None, None),
((1, 8, 9, 11), (792, 99, 11, 1), None),
((2, 6, 10), None, None),
]
_TOLERANCE_MAP = {
infinicore.float16: {"atol": 1e-3, "rtol": 1e-2},
infinicore.float32: {"atol": 1e-5, "rtol": 1e-4},
}
_TENSOR_DTYPES = [infinicore.float16, infinicore.float32]
def parse_test_cases():
cases = []
for a_shape, a_strides, b_shape in _TEST_CASES_DATA:
for dtype in _TENSOR_DTYPES:
tol = _TOLERANCE_MAP[dtype]
a_spec = TensorSpec.from_tensor(a_shape, a_strides, dtype)
b_spec = TensorSpec.from_tensor(
a_shape if b_shape is None else b_shape, None, dtype
)
cases.append(
TestCase(
inputs=[a_spec, b_spec],
kwargs={},
output_spec=None,
comparison_target=None,
tolerance=tol,
description="logaddexp_out",
)
)
out_spec = TensorSpec.from_tensor(a_shape, None, dtype)
cases.append(
TestCase(
inputs=[a_spec, b_spec],
kwargs={},
output_spec=out_spec,
comparison_target="out",
tolerance=tol,
description="logaddexp_out_explicit",
)
)
if not is_broadcast(a_spec.strides):
cases.append(
TestCase(
inputs=[a_spec, b_spec],
kwargs={"out": 0},
output_spec=None,
comparison_target=0,
tolerance=tol,
description="logaddexp_inplace_a",
)
)
if not is_broadcast(b_spec.strides):
cases.append(
TestCase(
inputs=[a_spec, b_spec],
kwargs={"out": 1},
output_spec=None,
comparison_target=1,
tolerance=tol,
description="logaddexp_inplace_b",
)
)
return cases
class OpTest(BaseOperatorTest):
"""LogAddExp operator test with simplified implementation"""
def __init__(self):
super().__init__("LogAddExp")
def get_test_cases(self):
return parse_test_cases()
def torch_operator(self, *args, **kwargs):
return torch.logaddexp(*args, **kwargs)
# def infinicore_operator(self, *args, **kwargs):
# """InfiniCore implementation (operator not yet available)."""
# return infinicore.logaddexp(*args, **kwargs)
def main():
"""Main entry point"""
runner = GenericTestRunner(OpTest)
runner.run_and_exit()
if __name__ == "__main__":
main()
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import torch
import infinicore
from framework.base import BaseOperatorTest, TensorSpec, TestCase
from framework.runner import GenericTestRunner
from framework.utils import is_broadcast
# Test cases format: (a_shape, a_strides_or_None, b_shape_or_None)
# infinicore.logaddexp2(a, b)
_TEST_CASES_DATA = [
((2, 3, 4), None, None),
((1, 4, 8), (32, 8, 1), None),
((3, 2, 5, 7), None, None),
((2, 1, 16), None, None),
((1, 8, 9, 11), (792, 99, 11, 1), None),
((2, 6, 10), None, None),
]
_TOLERANCE_MAP = {
infinicore.float16: {"atol": 1e-3, "rtol": 1e-2},
infinicore.float32: {"atol": 1e-5, "rtol": 1e-4},
}
_TENSOR_DTYPES = [infinicore.float16, infinicore.float32]
def parse_test_cases():
cases = []
for a_shape, a_strides, b_shape in _TEST_CASES_DATA:
for dtype in _TENSOR_DTYPES:
tol = _TOLERANCE_MAP[dtype]
a_spec = TensorSpec.from_tensor(a_shape, a_strides, dtype)
b_spec = TensorSpec.from_tensor(
a_shape if b_shape is None else b_shape, None, dtype
)
cases.append(
TestCase(
inputs=[a_spec, b_spec],
kwargs={},
output_spec=None,
comparison_target=None,
tolerance=tol,
description="logaddexp2_out",
)
)
out_spec = TensorSpec.from_tensor(a_shape, None, dtype)
cases.append(
TestCase(
inputs=[a_spec, b_spec],
kwargs={},
output_spec=out_spec,
comparison_target="out",
tolerance=tol,
description="logaddexp2_out_explicit",
)
)
if not is_broadcast(a_spec.strides):
cases.append(
TestCase(
inputs=[a_spec, b_spec],
kwargs={"out": 0},
output_spec=None,
comparison_target=0,
tolerance=tol,
description="logaddexp2_inplace_a",
)
)
if not is_broadcast(b_spec.strides):
cases.append(
TestCase(
inputs=[a_spec, b_spec],
kwargs={"out": 1},
output_spec=None,
comparison_target=1,
tolerance=tol,
description="logaddexp2_inplace_b",
)
)
return cases
class OpTest(BaseOperatorTest):
"""LogAddExp2 operator test with simplified implementation"""
def __init__(self):
super().__init__("LogAddExp2")
def get_test_cases(self):
return parse_test_cases()
def torch_operator(self, *args, **kwargs):
return torch.logaddexp2(*args, **kwargs)
# def infinicore_operator(self, *args, **kwargs):
# """InfiniCore implementation (operator not yet available)."""
# return infinicore.logaddexp2(*args, **kwargs)
def main():
"""Main entry point"""
runner = GenericTestRunner(OpTest)
runner.run_and_exit()
if __name__ == "__main__":
main()
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import torch
import infinicore
from framework.base import BaseOperatorTest, TensorSpec, TestCase
from framework.runner import GenericTestRunner
from framework.utils import is_broadcast
# Test cases format: (shape, dim, input_strides_or_None)
# logcumsumexp computes log of cumulative sum of exponentials along dim.
_TEST_CASES_DATA = [
((13, 4), 1, None),
((13, 4), 0, (10, 1)),
((8, 16), 1, None),
((2, 3, 5), 2, None),
((16, 64), 1, None),
((4, 5, 6), 0, None),
]
_TOLERANCE_MAP = {
infinicore.float16: {"atol": 1e-3, "rtol": 1e-2},
infinicore.float32: {"atol": 1e-5, "rtol": 1e-4},
infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2},
}
_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32]
def parse_test_cases():
test_cases = []
for data in _TEST_CASES_DATA:
shape, dim = data[0], data[1]
in_strides = data[2] if len(data) > 2 else None
supports_inplace = not is_broadcast(in_strides)
for dtype in _TENSOR_DTYPES:
tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4})
input_spec = TensorSpec.from_tensor(shape, in_strides, dtype)
out_spec = TensorSpec.from_tensor(shape, None, dtype)
# Out-of-place
kwargs = {"dim": dim}
test_cases.append(
TestCase(
inputs=[input_spec],
kwargs=kwargs,
output_spec=None,
comparison_target=None,
tolerance=tol,
description=f"logcumsumexp - OUT_OF_PLACE",
)
)
# PyTorch does not expose explicit out for logcumsumexp — skip out tests
return test_cases
class OpTest(BaseOperatorTest):
"""LogCumsumExp operator test with simplified implementation"""
def __init__(self):
super().__init__("LogCumsumExp")
def get_test_cases(self):
return parse_test_cases()
def torch_operator(self, *args, **kwargs):
return torch.logcumsumexp(*args, **kwargs)
# def infinicore_operator(self, *args, **kwargs):
# """InfiniCore implementation (operator not yet available)."""
# return infinicore.logcumsumexp(*args, **kwargs)
def main():
"""Main entry point"""
runner = GenericTestRunner(OpTest)
runner.run_and_exit()
if __name__ == "__main__":
main()
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import torch
import infinicore
from framework.base import BaseOperatorTest, TensorSpec, TestCase
from framework.runner import GenericTestRunner
# Test cases format: (matrix_shape, strides_or_None)
# logdet(input) — returns (sign, logabsdet) in PyTorch
_TEST_CASES_DATA = [
((1, 1), None),
((2, 2), None),
((3, 3), (3, 1)),
((4, 4), None),
((8, 8), (512, 1)),
((16, 16), None),
]
_TOLERANCE_MAP = {
infinicore.float16: {"atol": 1e-3, "rtol": 1e-2},
infinicore.float32: {"atol": 1e-5, "rtol": 1e-4},
infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2},
}
_TENSOR_DTYPES = [infinicore.float32]
def parse_test_cases():
test_cases = []
for shape, strides in _TEST_CASES_DATA:
for dtype in _TENSOR_DTYPES:
tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4})
spec = TensorSpec.from_tensor(shape, strides, dtype)
test_cases.append(
TestCase(
inputs=[spec],
kwargs={},
output_spec=None,
comparison_target=None,
tolerance=tol,
description="logdet - OUT_OF_PLACE",
)
)
return test_cases
class OpTest(BaseOperatorTest):
"""logdet operator test with simplified implementation"""
def __init__(self):
super().__init__("logdet")
def get_test_cases(self):
return parse_test_cases()
def torch_operator(self, *args, **kwargs):
return torch.logdet(*args, **kwargs)
# def infinicore_operator(self, *args, **kwargs):
# """InfiniCore implementation (operator not yet available)."""
# return infinicore.logdet(*args, **kwargs)
def main():
"""Main entry point"""
runner = GenericTestRunner(OpTest)
runner.run_and_exit()
if __name__ == "__main__":
main()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment