Commit 4deec279 authored by thatPepe's avatar thatPepe Committed by MaYuhang
Browse files

Merge pull request #631 from InfiniTensor/issue/630

issue/630 - slightly improved unimplemented messages
parents f3a25b70 2a343a3a
from .base import TestConfig, TestRunner, TestCase, BaseOperatorTest
from .base import TestConfig, TestRunner, BaseOperatorTest
from .test_case import TestCase, TestResult
from .benchmark import BenchmarkUtils, BenchmarkResult
from .config import (
get_args,
......@@ -32,6 +33,7 @@ __all__ = [
"TensorSpec",
"TestCase",
"TestConfig",
"TestResult",
"TestRunner",
# Core functions
"compare_results",
......
"""
Core base classes for operator testing framework.
Contains TestConfig, TestRunner, and BaseOperatorTest classes.
"""
import torch
import infinicore
import traceback
from dataclasses import dataclass
from abc import ABC, abstractmethod
from typing import List, Dict, Any, Optional, Tuple
from .test_case import TestCase, TestResult
from .datatypes import to_torch_dtype, to_infinicore_dtype
from .devices import InfiniDeviceNames, torch_device_map
from .tensor import TensorSpec, TensorInitializer
......@@ -15,163 +19,6 @@ from .utils import (
from .benchmark import BenchmarkUtils
@dataclass
class TestResult:
"""Test result data structure"""
success: bool
return_code: int # 0: success, -1: failure, -2: skipped, -3: partial
torch_host_time: float = 0.0
torch_device_time: float = 0.0
infini_host_time: float = 0.0
infini_device_time: float = 0.0
error_message: str = ""
test_case: Any = None
device: Any = None
class TestCase:
"""Test case with all configuration included"""
def __init__(
self,
inputs,
kwargs=None,
output_spec=None,
output_specs=None,
comparison_target=None,
description="",
tolerance=None,
output_count=1,
):
"""
Initialize a test case with complete configuration
Args:
inputs: List of TensorSpec objects, scalars, or tuples containing multiple TensorSpecs
kwargs: Additional keyword arguments for the operator
output_spec: TensorSpec for output tensor (for single output operations)
output_specs: List of TensorSpec for multiple output tensors
comparison_target: Target for comparison ('out', index, or None for return value)
description: Test case description
tolerance: Tolerance settings for this test case {'atol': float, 'rtol': float}
output_count: Number of outputs (default: 1)
"""
self.inputs = []
# Process inputs - support both single TensorSpecs and tuples of TensorSpecs
for i, inp in enumerate(inputs):
if isinstance(inp, (list, tuple)):
# Handle tuple/list of multiple TensorSpecs (e.g., for torch.cat)
processed_tuple = []
for j, item in enumerate(inp):
if isinstance(item, (list, tuple)):
# Nested tuple - recursively process
nested_processed = []
for k, nested_item in enumerate(item):
if isinstance(nested_item, TensorSpec):
nested_item.fill_name(f"in_{i}_{j}_{k}")
nested_processed.append(nested_item)
else:
nested_processed.append(nested_item)
processed_tuple.append(tuple(nested_processed))
elif isinstance(item, TensorSpec):
item.fill_name(f"in_{i}_{j}")
processed_tuple.append(item)
else:
processed_tuple.append(item)
self.inputs.append(tuple(processed_tuple))
elif isinstance(inp, TensorSpec):
inp.fill_name(f"in_{i}")
self.inputs.append(inp)
else:
self.inputs.append(inp)
self.kwargs = kwargs or {}
self.output_spec = output_spec
self.output_specs = output_specs
self.comparison_target = comparison_target
self.description = description
self.tolerance = tolerance or {"atol": 1e-5, "rtol": 1e-3}
self.output_count = output_count
if self.output_count > 1 and self.output_specs is not None:
for idx, spec in enumerate(self.output_specs):
spec.fill_name(f"out_{idx}")
# Validate output configuration
if self.output_count == 1:
if self.output_specs is not None:
raise ValueError("output_specs cannot be used when output_count=1")
else:
if self.output_spec is not None:
raise ValueError("output_spec cannot be used when output_count>1")
if (
self.output_specs is not None
and len(self.output_specs) != self.output_count
):
raise ValueError(
f"output_specs count ({len(self.output_specs)}) must match output_count ({self.output_count})"
)
def get_tensor_input_count(self):
"""Count the number of tensor inputs (excluding scalars)"""
count = 0
for inp in self.inputs:
if isinstance(inp, TensorSpec) and not inp.is_scalar:
count += 1
elif isinstance(inp, (list, tuple)):
# Count all TensorSpecs within the tuple
for item in inp:
if isinstance(item, TensorSpec) and not item.is_scalar:
count += 1
return count
def __str__(self):
input_strs = []
for inp in self.inputs:
if isinstance(inp, (list, tuple)):
# Handle tuple inputs (e.g., for torch.cat)
tuple_strs = []
for item in inp:
if isinstance(item, (list, tuple)):
# Handle nested tuples
nested_strs = []
for nested_item in item:
nested_strs.append(str(nested_item))
tuple_strs.append(f"tuple({', '.join(nested_strs)})")
else:
tuple_strs.append(str(item))
input_strs.append(f"tuple({'; '.join(tuple_strs)})")
else:
input_strs.append(str(inp))
base_str = f"TestCase("
if self.description:
base_str += f"{self.description}"
base_str += f" - inputs=[{'; '.join(input_strs)}]"
if self.kwargs or self.output_spec or self.output_specs:
kwargs_strs = []
for key, value in self.kwargs.items():
if key == "out" and isinstance(value, int):
kwargs_strs.append(f"{key}={self.inputs[value].name}")
else:
kwargs_strs.append(f"{key}={value}")
# Handle output specifications using TensorSpec's __str__
if self.output_count == 1 and self.output_spec:
kwargs_strs.append(f"out={self.output_spec}")
elif self.output_count > 1 and self.output_specs:
for i, spec in enumerate(self.output_specs):
kwargs_strs.append(f"out_{i}={spec}")
base_str += f", kwargs={{{'; '.join(kwargs_strs)}}}"
base_str += ")"
return base_str
class TestConfig:
"""Test configuration"""
......@@ -245,20 +92,20 @@ class TestRunner:
)
print(f"\033[92m✓\033[0m Passed")
elif test_result.return_code == -1:
fail_msg = f"{test_case} - {InfiniDeviceNames[device]} - Test terminated in verbose mode."
# Test failed - use the actual error message from test_result
fail_msg = f"{test_case} - {InfiniDeviceNames[device]} - {test_result.error_message}"
self.failed_tests.append(fail_msg)
print(f"\033[91m✗\033[0m {test_result.error_message}")
elif test_result.return_code == -2: # Skipped
skip_msg = f"{test_case} - {InfiniDeviceNames[device]} - Both operators not implemented"
# Both operators not implemented - use actual error message
skip_msg = f"{test_case} - {InfiniDeviceNames[device]} - {test_result.error_message}"
self.skipped_tests.append(skip_msg)
print(
f"\033[93m⚠\033[0m Both operators not implemented - test skipped"
)
print(f"\033[93m⚠\033[0m {test_result.error_message}")
elif test_result.return_code == -3: # Partial
partial_msg = f"{test_case} - {InfiniDeviceNames[device]} - One operator not implemented"
# One operator not implemented - use actual error message
partial_msg = f"{test_case} - {InfiniDeviceNames[device]} - {test_result.error_message}"
self.partial_tests.append(partial_msg)
print(
f"\033[93m⚠\033[0m One operator not implemented - running single operator without comparison"
)
print(f"\033[93m⚠\033[0m {test_result.error_message}")
if self.config.verbose and test_result.return_code != 0:
return False
......@@ -569,43 +416,57 @@ class BaseOperatorTest(ABC):
# Check operator implementations
torch_implemented = True
infini_implemented = True
torch_error_msg = ""
infini_error_msg = ""
try:
torch_result = self.torch_operator(*inputs, **kwargs)
if torch_result is None:
torch_implemented = False
except NotImplementedError:
except NotImplementedError as e:
if config.verbose:
traceback.print_exc()
# Return test result immediately in verbose mode
test_result.return_code = -1
test_result.error_message = "torch_operator not implemented"
return test_result
torch_implemented = False
torch_result = None
torch_error_msg = str(e)
try:
infini_result = self.infinicore_operator(*infini_inputs, **infini_kwargs)
if infini_result is None:
infini_implemented = False
except NotImplementedError:
except NotImplementedError as e:
if config.verbose:
traceback.print_exc()
# Return test result immediately in verbose mode
test_result.return_code = -1
test_result.error_message = "infinicore_operator not implemented"
return test_result
infini_implemented = False
infini_result = None
infini_error_msg = str(e)
if not torch_error_msg:
torch_error_msg = "unimplemented test function"
if not infini_error_msg:
infini_error_msg = "unimplemented test function"
# Skip if neither operator is implemented
if not torch_implemented and not infini_implemented:
test_result.return_code = -2 # Skipped
# Combine both error messages
test_result.error_message = f"Both operators failed: PyTorch - {torch_error_msg}; InfiniCore - {infini_error_msg}"
return test_result
# Single operator execution without comparison
if not torch_implemented or not infini_implemented:
test_result.return_code = -3 # Partial
# Determine which operator is missing and create appropriate message with actual error
if not torch_implemented:
test_result.error_message = (
f"PyTorch operator failed: {torch_error_msg}"
)
else:
test_result.error_message = (
f"InfiniCore operator failed: {infini_error_msg}"
)
# Run benchmarking for partial tests if enabled
if config.bench:
torch_host, torch_device, infini_host, infini_device = (
......
"""
Test case definitions and related functionality for the InfiniCore testing framework.
"""
from dataclasses import dataclass
from typing import List, Dict, Any, Optional, Tuple
from .tensor import TensorSpec
@dataclass
class TestResult:
"""Test result data structure"""
success: bool
return_code: int # 0: success, -1: failure, -2: skipped, -3: partial
torch_host_time: float = 0.0
torch_device_time: float = 0.0
infini_host_time: float = 0.0
infini_device_time: float = 0.0
error_message: str = ""
test_case: Any = None
device: Any = None
class TestCase:
"""Test case with all configuration included"""
def __init__(
self,
inputs,
kwargs=None,
output_spec=None,
output_specs=None,
comparison_target=None,
description="",
tolerance=None,
output_count=1,
):
"""
Initialize a test case with complete configuration
Args:
inputs: List of TensorSpec objects, scalars, or tuples containing multiple TensorSpecs
kwargs: Additional keyword arguments for the operator
output_spec: TensorSpec for output tensor (for single output operations)
output_specs: List of TensorSpec for multiple output tensors
comparison_target: Target for comparison ('out', index, or None for return value)
description: Test case description
tolerance: Tolerance settings for this test case {'atol': float, 'rtol': float}
output_count: Number of outputs (default: 1)
"""
self.inputs = []
# Process inputs - support both single TensorSpecs and tuples of TensorSpecs
for i, inp in enumerate(inputs):
if isinstance(inp, (list, tuple)):
# Handle tuple/list of multiple TensorSpecs (e.g., for torch.cat)
processed_tuple = []
for j, item in enumerate(inp):
if isinstance(item, (list, tuple)):
# Nested tuple - recursively process
nested_processed = []
for k, nested_item in enumerate(item):
if isinstance(nested_item, TensorSpec):
nested_item.fill_name(f"in_{i}_{j}_{k}")
nested_processed.append(nested_item)
else:
nested_processed.append(nested_item)
processed_tuple.append(tuple(nested_processed))
elif isinstance(item, TensorSpec):
item.fill_name(f"in_{i}_{j}")
processed_tuple.append(item)
else:
processed_tuple.append(item)
self.inputs.append(tuple(processed_tuple))
elif isinstance(inp, TensorSpec):
inp.fill_name(f"in_{i}")
self.inputs.append(inp)
else:
self.inputs.append(inp)
self.kwargs = kwargs or {}
self.output_spec = output_spec
self.output_specs = output_specs
self.comparison_target = comparison_target
self.description = description
self.tolerance = tolerance or {"atol": 1e-5, "rtol": 1e-3}
self.output_count = output_count
if self.output_count > 1 and self.output_specs is not None:
for idx, spec in enumerate(self.output_specs):
spec.fill_name(f"out_{idx}")
# Validate output configuration
if self.output_count == 1:
if self.output_specs is not None:
raise ValueError("output_specs cannot be used when output_count=1")
else:
if self.output_spec is not None:
raise ValueError("output_spec cannot be used when output_count>1")
if (
self.output_specs is not None
and len(self.output_specs) != self.output_count
):
raise ValueError(
f"output_specs count ({len(self.output_specs)}) must match output_count ({self.output_count})"
)
def get_tensor_input_count(self):
"""Count the number of tensor inputs (excluding scalars)"""
count = 0
for inp in self.inputs:
if isinstance(inp, TensorSpec) and not inp.is_scalar:
count += 1
elif isinstance(inp, (list, tuple)):
# Count all TensorSpecs within the tuple
for item in inp:
if isinstance(item, TensorSpec) and not item.is_scalar:
count += 1
return count
def __str__(self):
input_strs = []
for inp in self.inputs:
if isinstance(inp, (list, tuple)):
# Handle tuple inputs (e.g., for torch.cat)
tuple_strs = []
for item in inp:
if isinstance(item, (list, tuple)):
# Handle nested tuples
nested_strs = []
for nested_item in item:
nested_strs.append(str(nested_item))
tuple_strs.append(f"tuple({', '.join(nested_strs)})")
else:
tuple_strs.append(str(item))
input_strs.append(f"tuple({'; '.join(tuple_strs)})")
else:
input_strs.append(str(inp))
base_str = f"TestCase("
if self.description:
base_str += f"{self.description}"
base_str += f" - inputs=[{'; '.join(input_strs)}]"
if self.kwargs or self.output_spec or self.output_specs:
kwargs_strs = []
for key, value in self.kwargs.items():
if key == "out" and isinstance(value, int):
kwargs_strs.append(f"{key}={self.inputs[value].name}")
else:
kwargs_strs.append(f"{key}={value}")
# Handle output specifications using TensorSpec's __str__
if self.output_count == 1 and self.output_spec:
kwargs_strs.append(f"out={self.output_spec}")
elif self.output_count > 1 and self.output_specs:
for i, spec in enumerate(self.output_specs):
kwargs_strs.append(f"out_{i}={spec}")
base_str += f", kwargs={{{'; '.join(kwargs_strs)}}}"
base_str += ")"
return base_str
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import torch
import infinicore
from framework.base import BaseOperatorTest, TensorSpec, TestCase
from framework.runner import GenericTestRunner
# Test cases format: (in_shape, in_strides_or_None)
_TEST_CASES_DATA = [
((2, 3), None),
((1, 4, 8), (32, 8, 1)),
((3, 2, 5, 7), None),
((2, 1, 16), None),
((1, 8, 9, 11), (792, 99, 11, 1)),
((2, 6, 10), None),
]
_TOLERANCE_MAP = {
infinicore.float16: {"atol": 0.0, "rtol": 0.0},
infinicore.float32: {"atol": 0.0, "rtol": 0.0},
}
_TENSOR_DTYPES = [infinicore.float16, infinicore.float32]
def parse_test_cases():
cases = []
for shape, strides in _TEST_CASES_DATA:
for dtype in _TENSOR_DTYPES:
tol = _TOLERANCE_MAP[dtype]
in_spec = TensorSpec.from_tensor(shape, strides, dtype)
# Out-of-place
cases.append(
TestCase(
inputs=[in_spec],
kwargs={},
output_spec=None,
comparison_target=None,
tolerance=tol,
description="Abs - OUT_OF_PLACE",
)
)
# Explicit out
out_spec = TensorSpec.from_tensor(shape, None, dtype)
cases.append(
TestCase(
inputs=[in_spec],
kwargs={},
output_spec=out_spec,
comparison_target="out",
tolerance=tol,
description="Abs - INPLACE(out)",
)
)
# In-place on input (out=0)
cases.append(
TestCase(
inputs=[in_spec],
kwargs={"out": 0},
output_spec=None,
comparison_target=0,
tolerance=tol,
description="Abs - INPLACE(a)",
)
)
return cases
class OpTest(BaseOperatorTest):
"""Abs operator test with simplified implementation"""
def __init__(self):
super().__init__("Abs")
def get_test_cases(self):
return parse_test_cases()
def torch_operator(self, *args, **kwargs):
return torch.abs(*args, **kwargs)
# def infinicore_operator(self, *args, **kwargs):
# """InfiniCore implementation (operator not yet available)."""
# return infinicore.abs(*args, **kwargs)
def main():
"""Main entry point"""
runner = GenericTestRunner(OpTest)
runner.run_and_exit()
if __name__ == "__main__":
main()
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import torch
import infinicore
from framework.base import BaseOperatorTest, TensorSpec, TestCase
from framework.runner import GenericTestRunner
from framework.utils import is_broadcast
# =======================================================================
# Test cases format: (shape, input_strides_or_None)
# =======================================================================
_TEST_CASES_DATA = [
((13, 4), None),
((13, 4), (10, 1)),
((8, 16), None),
((8, 16), (40, 1)),
((2, 3, 4), None),
((16, 5632), None),
]
_TOLERANCE_MAP = {
infinicore.float16: {"atol": 1e-3, "rtol": 1e-2},
infinicore.float32: {"atol": 1e-5, "rtol": 1e-4},
infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2},
}
_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32]
def parse_test_cases():
test_cases = []
for data in _TEST_CASES_DATA:
shape = data[0]
in_strides = data[1] if len(data) > 1 else None
supports_inplace = not is_broadcast(in_strides)
for dtype in _TENSOR_DTYPES:
tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4})
input_spec = TensorSpec.from_tensor(shape, in_strides, dtype)
out_spec = TensorSpec.from_tensor(shape, None, dtype)
test_cases.append(
TestCase(
inputs=[input_spec],
kwargs={},
output_spec=None,
comparison_target=None,
tolerance=tol,
description="acos - OUT_OF_PLACE",
)
)
test_cases.append(
TestCase(
inputs=[input_spec],
kwargs={},
output_spec=out_spec,
comparison_target="out",
tolerance=tol,
description="acos - INPLACE(out)",
)
)
if supports_inplace:
test_cases.append(
TestCase(
inputs=[input_spec],
kwargs={"out": 0},
output_spec=None,
comparison_target=0,
tolerance=tol,
description="acos - INPLACE(input)",
)
)
return test_cases
class OpTest(BaseOperatorTest):
"""Acos operator test with simplified implementation"""
def __init__(self):
super().__init__("Acos")
def get_test_cases(self):
return parse_test_cases()
def torch_operator(self, *args, **kwargs):
return torch.acos(*args, **kwargs)
# def infinicore_operator(self, *args, **kwargs):
# """InfiniCore implementation (operator not yet available)."""
# return infinicore.acos(*args, **kwargs)
def main():
"""Main entry point"""
runner = GenericTestRunner(OpTest)
runner.run_and_exit()
if __name__ == "__main__":
main()
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import torch
import infinicore
from framework.base import BaseOperatorTest, TensorSpec, TestCase
from framework.runner import GenericTestRunner
from framework.utils import is_broadcast
# =======================================================================
# Test cases format: (shape, input_strides_or_None)
# Note: acosh domain is [1, inf); tests should use valid ranges when generating tensors.
# =======================================================================
_TEST_CASES_DATA = [
((13, 4), None),
((13, 4), (10, 1)),
((8, 16), None),
((8, 16), (40, 1)),
((2, 3, 4), None),
((16, 5632), None),
]
_TOLERANCE_MAP = {
infinicore.float16: {"atol": 1e-2, "rtol": 1e-2},
infinicore.float32: {"atol": 1e-5, "rtol": 1e-4},
infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2},
}
_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32]
def parse_test_cases():
test_cases = []
for data in _TEST_CASES_DATA:
shape = data[0]
in_strides = data[1] if len(data) > 1 else None
supports_inplace = not is_broadcast(in_strides)
for dtype in _TENSOR_DTYPES:
tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4})
input_spec = TensorSpec.from_tensor(shape, in_strides, dtype)
out_spec = TensorSpec.from_tensor(shape, None, dtype)
test_cases.append(
TestCase(
inputs=[input_spec],
kwargs={},
output_spec=None,
comparison_target=None,
tolerance=tol,
description="acosh - OUT_OF_PLACE",
)
)
test_cases.append(
TestCase(
inputs=[input_spec],
kwargs={},
output_spec=out_spec,
comparison_target="out",
tolerance=tol,
description="acosh - INPLACE(out)",
)
)
if supports_inplace:
test_cases.append(
TestCase(
inputs=[input_spec],
kwargs={"out": 0},
output_spec=None,
comparison_target=0,
tolerance=tol,
description="acosh - INPLACE(input)",
)
)
return test_cases
class OpTest(BaseOperatorTest):
"""Acosh operator test with simplified implementation"""
def __init__(self):
super().__init__("Acosh")
def get_test_cases(self):
return parse_test_cases()
def torch_operator(self, *args, **kwargs):
return torch.acosh(*args, **kwargs)
# def infinicore_operator(self, *args, **kwargs):
# """InfiniCore implementation (operator not yet available)."""
# return infinicore.acosh(*args, **kwargs)
def main():
"""Main entry point"""
runner = GenericTestRunner(OpTest)
runner.run_and_exit()
if __name__ == "__main__":
main()
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import torch
import infinicore
from framework.base import BaseOperatorTest, TensorSpec, TestCase
from framework.runner import GenericTestRunner
from framework.utils import is_broadcast
# Test cases format: (in_shape, in_strides_or_None, output_size_or_None)
_TEST_CASES_DATA = [
((2, 3, 16), None, 1),
((2, 3, 15), (45, 15, 1), 5),
((1, 4, 64), None, 8),
((4, 2, 7), (14, 7, 1), 3),
((3, 3, 32), None, 16),
((2, 8, 9), (72, 9, 1), 4),
]
_TOLERANCE_MAP = {
infinicore.float16: {"atol": 1e-3, "rtol": 1e-2},
infinicore.float32: {"atol": 1e-5, "rtol": 1e-4},
}
_TENSOR_DTYPES = [infinicore.float16, infinicore.float32]
def parse_test_cases():
test_cases = []
for data in _TEST_CASES_DATA:
shape, strides, out_size = data
for dtype in _TENSOR_DTYPES:
tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-3})
in_spec = TensorSpec.from_tensor(shape, strides, dtype)
kwargs = {"output_size": out_size}
# Out-of-place
test_cases.append(
TestCase(
inputs=[in_spec],
kwargs=kwargs,
output_spec=None,
comparison_target=None,
tolerance=tol,
description="AdaptiveAvgPool1d - OUT_OF_PLACE",
)
)
return test_cases
class OpTest(BaseOperatorTest):
"""AdaptiveAvgPool1d operator test with simplified implementation"""
def __init__(self):
super().__init__("AdaptiveAvgPool1d")
def get_test_cases(self):
return parse_test_cases()
def torch_operator(self, *args, **kwargs):
return torch.nn.functional.adaptive_avg_pool1d(*args, **kwargs)
# def infinicore_operator(self, *args, **kwargs):
# """InfiniCore implementation (operator not yet available)."""
# return infinicore.nn.functional.adaptive_avg_pool1d(*args, **kwargs)
def main():
"""Main entry point"""
runner = GenericTestRunner(OpTest)
runner.run_and_exit()
if __name__ == "__main__":
main()
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import torch
import infinicore
from framework.base import BaseOperatorTest, TensorSpec, TestCase
from framework.runner import GenericTestRunner
from framework.utils import is_broadcast
# Test cases format: (in_shape, in_strides_or_None, output_size_or_None)
# adaptive_avg_pool2d maps input HxW to target output size (h, w)
_TEST_CASES_DATA = [
((2, 3, 16, 16), None, (1, 1)),
((2, 4, 15, 17), (204, 51, 17, 1), (5, 6)),
((1, 8, 32, 32), None, (8, 8)),
((4, 2, 7, 9), (126, 63, 9, 1), (3, 4)),
((3, 3, 31, 29), None, (16, 15)),
((2, 8, 9, 11), (792, 99, 11, 1), (4, 5)),
]
_TOLERANCE_MAP = {
infinicore.float16: {"atol": 1e-3, "rtol": 1e-2},
infinicore.float32: {"atol": 1e-5, "rtol": 1e-4},
}
_TENSOR_DTYPES = [infinicore.float16, infinicore.float32]
def parse_test_cases():
test_cases = []
for data in _TEST_CASES_DATA:
shape, strides, out_size = data
for dtype in _TENSOR_DTYPES:
tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-3})
in_spec = TensorSpec.from_tensor(shape, strides, dtype)
kwargs = {"output_size": out_size}
test_cases.append(
TestCase(
inputs=[in_spec],
kwargs=kwargs,
output_spec=None,
comparison_target=None,
tolerance=tol,
description="AdaptiveAvgPool2d - OUT_OF_PLACE",
)
)
return test_cases
class OpTest(BaseOperatorTest):
"""AdaptiveAvgPool2d operator test with simplified implementation"""
def __init__(self):
super().__init__("AdaptiveAvgPool2d")
def get_test_cases(self):
return parse_test_cases()
def torch_operator(self, *args, **kwargs):
return torch.nn.functional.adaptive_avg_pool2d(*args, **kwargs)
# def infinicore_operator(self, *args, **kwargs):
# """InfiniCore implementation (operator not yet available)."""
# return infinicore.nn.functional.adaptive_avg_pool2d(*args, **kwargs)
def main():
"""Main entry point"""
runner = GenericTestRunner(OpTest)
runner.run_and_exit()
if __name__ == "__main__":
main()
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import torch
import infinicore
from framework.base import BaseOperatorTest, TensorSpec, TestCase
from framework.runner import GenericTestRunner
from framework.utils import is_broadcast
# Test cases format: (in_shape, in_strides_or_None, output_size_or_None)
# adaptive_avg_pool3d maps input D x H x W to target output size (d, h, w)
_TEST_CASES_DATA = [
((2, 3, 8, 8, 8), None, (1, 1, 1)),
((2, 4, 7, 9, 6), (2016, 504, 72, 8, 1), (3, 3, 2)),
((1, 8, 16, 16, 16), None, (4, 4, 4)),
((2, 2, 5, 7, 9), (1260, 630, 126, 18, 2), (2, 3, 4)),
((3, 3, 10, 9, 8), None, (5, 5, 4)),
((2, 6, 9, 11, 13), (5148, 858, 13, 1, 1), (3, 4, 5)),
]
_TOLERANCE_MAP = {
infinicore.float16: {"atol": 1e-3, "rtol": 1e-2},
infinicore.float32: {"atol": 1e-5, "rtol": 1e-4},
}
_TENSOR_DTYPES = [infinicore.float16, infinicore.float32]
def parse_test_cases():
test_cases = []
for data in _TEST_CASES_DATA:
shape, strides, out_size = data
for dtype in _TENSOR_DTYPES:
tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-3})
in_spec = TensorSpec.from_tensor(shape, strides, dtype)
kwargs = {"output_size": out_size}
test_cases.append(
TestCase(
inputs=[in_spec],
kwargs=kwargs,
output_spec=None,
comparison_target=None,
tolerance=tol,
description="AdaptiveAvgPool3d - OUT_OF_PLACE",
)
)
return test_cases
class OpTest(BaseOperatorTest):
"""AdaptiveAvgPool3d operator test with simplified implementation"""
def __init__(self):
super().__init__("AdaptiveAvgPool3d")
def get_test_cases(self):
return parse_test_cases()
def torch_operator(self, *args, **kwargs):
return torch.nn.functional.adaptive_avg_pool3d(*args, **kwargs)
# def infinicore_operator(self, *args, **kwargs):
# """InfiniCore implementation (operator not yet available)."""
# return infinicore.nn.functional.adaptive_avg_pool3d(*args, **kwargs)
def main():
"""Main entry point"""
runner = GenericTestRunner(OpTest)
runner.run_and_exit()
if __name__ == "__main__":
main()
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import torch
import infinicore
from framework.base import BaseOperatorTest, TensorSpec, TestCase
from framework.runner import GenericTestRunner
from framework.utils import is_broadcast
# Test cases format: (in_shape, in_strides_or_None, output_size_or_None)
_TEST_CASES_DATA = [
((2, 3, 16), None, 1),
((2, 3, 15), (45, 15, 1), 5),
((1, 4, 64), None, 8),
((4, 2, 7), (14, 7, 1), 3),
((3, 3, 32), None, 16),
((2, 8, 9), (72, 9, 1), 4),
]
_TOLERANCE_MAP = {
infinicore.float16: {"atol": 1e-3, "rtol": 1e-2},
infinicore.float32: {"atol": 1e-5, "rtol": 1e-4},
}
_TENSOR_DTYPES = [infinicore.float16, infinicore.float32]
def parse_test_cases():
test_cases = []
for data in _TEST_CASES_DATA:
shape, strides, out_size = data
for dtype in _TENSOR_DTYPES:
tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-3})
in_spec = TensorSpec.from_tensor(shape, strides, dtype)
kwargs = {"output_size": out_size}
test_cases.append(
TestCase(
inputs=[in_spec],
kwargs=kwargs,
output_spec=None,
comparison_target=None,
tolerance=tol,
description="AdaptiveMaxPool1d - OUT_OF_PLACE",
)
)
return test_cases
class OpTest(BaseOperatorTest):
"""AdaptiveMaxPool1d operator test with simplified implementation"""
def __init__(self):
super().__init__("AdaptiveMaxPool1d")
def get_test_cases(self):
return parse_test_cases()
def torch_operator(self, *args, **kwargs):
return torch.nn.functional.adaptive_max_pool1d(*args, **kwargs)
# def infinicore_operator(self, *args, **kwargs):
# """InfiniCore implementation (operator not yet available)."""
# return infinicore.nn.functional.adaptive_max_pool1d(*args, **kwargs)
def main():
"""Main entry point"""
runner = GenericTestRunner(OpTest)
runner.run_and_exit()
if __name__ == "__main__":
main()
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import torch
import infinicore
from framework.base import BaseOperatorTest, TensorSpec, TestCase
from framework.runner import GenericTestRunner
from framework.utils import is_broadcast
# Test cases format: (in_shape, in_strides_or_None, output_size_or_None)
_TEST_CASES_DATA = [
((2, 3, 16, 16), None, (1, 1)),
((2, 4, 15, 17), (204, 51, 17, 1), (5, 6)),
((1, 8, 32, 32), None, (8, 8)),
((4, 2, 7, 9), (126, 63, 9, 1), (3, 4)),
((3, 3, 31, 29), None, (16, 15)),
((2, 8, 9, 11), (792, 99, 11, 1), (4, 5)),
]
_TOLERANCE_MAP = {
infinicore.float16: {"atol": 1e-3, "rtol": 1e-2},
infinicore.float32: {"atol": 1e-5, "rtol": 1e-4},
}
_TENSOR_DTYPES = [infinicore.float16, infinicore.float32]
def parse_test_cases():
test_cases = []
for data in _TEST_CASES_DATA:
shape, strides, out_size = data
for dtype in _TENSOR_DTYPES:
tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-3})
in_spec = TensorSpec.from_tensor(shape, strides, dtype)
kwargs = {"output_size": out_size}
test_cases.append(
TestCase(
inputs=[in_spec],
kwargs=kwargs,
output_spec=None,
comparison_target=None,
tolerance=tol,
description="AdaptiveMaxPool2d - OUT_OF_PLACE",
)
)
return test_cases
class OpTest(BaseOperatorTest):
"""AdaptiveMaxPool2d operator test with simplified implementation"""
def __init__(self):
super().__init__("AdaptiveMaxPool2d")
def get_test_cases(self):
return parse_test_cases()
def torch_operator(self, *args, **kwargs):
return torch.nn.functional.adaptive_max_pool2d(*args, **kwargs)
# def infinicore_operator(self, *args, **kwargs):
# """InfiniCore implementation (operator not yet available)."""
# return infinicore.nn.functional.adaptive_max_pool2d(*args, **kwargs)
def main():
"""Main entry point"""
runner = GenericTestRunner(OpTest)
runner.run_and_exit()
if __name__ == "__main__":
main()
......@@ -86,7 +86,7 @@ def parse_test_cases():
test_cases.append(
TestCase(
inputs=[a_spec, b_spec],
kwargs=None,
kwargs={},
output_spec=c_spec, # Specify the output tensor spec
comparison_target="out",
tolerance=tolerance,
......
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import torch
import infinicore
from framework.base import BaseOperatorTest, TensorSpec, TestCase
from framework.runner import GenericTestRunner
from framework.utils import is_broadcast
# Test cases format: (input_shape, batch1_shape, batch2_shape, input_strides_or_None, batch1_strides_or_None, batch2_strides_or_None, beta_or_None, alpha_or_None)
# addbmm(input, batch1, batch2, beta=1, alpha=1, out=None)
_TEST_CASES_DATA = [
# small basic (shapes must satisfy: input (M,N), batch1 (B,M,K), batch2 (B,K,N))
((3, 5), (2, 3, 4), (2, 4, 5), None, None, None, None, None),
# larger
((8, 8), (4, 8, 8), (4, 8, 8), None, None, None, 0.5, 2.0),
# strided input
((5, 7), (2, 5, 6), (2, 6, 7), (30, 1), (0, 5, 1), None, None, None),
# batched different strides
((2, 2), (4, 2, 3), (4, 3, 2), None, (24, 6, 1), (0, 3, 1), 1.0, None),
# square
((16, 16), (2, 16, 16), (2, 16, 16), None, None, (512, 1, 1), None, 0.1),
# edge small
((1, 1), (1, 1, 1), (1, 1, 1), None, None, None, None, None),
]
_TOLERANCE_MAP = {
infinicore.float16: {"atol": 0, "rtol": 1e-2},
infinicore.float32: {"atol": 0, "rtol": 1e-3},
infinicore.bfloat16: {"atol": 0, "rtol": 5e-2},
}
_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32]
def parse_test_cases():
test_cases = []
for data in _TEST_CASES_DATA:
in_shape, b1_shape, b2_shape = data[0], data[1], data[2]
in_strides = data[3] if len(data) > 3 else None
b1_strides = data[4] if len(data) > 4 else None
b2_strides = data[5] if len(data) > 5 else None
beta = data[6] if len(data) > 6 else None
alpha = data[7] if len(data) > 7 else None
out_supports_inplace = not is_broadcast(in_strides)
for dtype in _TENSOR_DTYPES:
tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 1e-3})
in_spec = TensorSpec.from_tensor(in_shape, in_strides, dtype)
b1_spec = TensorSpec.from_tensor(b1_shape, b1_strides, dtype)
b2_spec = TensorSpec.from_tensor(b2_shape, b2_strides, dtype)
kwargs = {}
if beta is not None:
kwargs["beta"] = beta
if alpha is not None:
kwargs["alpha"] = alpha
# Out-of-place
test_cases.append(
TestCase(
inputs=[in_spec, b1_spec, b2_spec],
kwargs=kwargs,
output_spec=None,
comparison_target=None,
tolerance=tol,
description="addbmm - OUT_OF_PLACE",
)
)
# In-place out= provided
if out_supports_inplace:
test_cases.append(
TestCase(
inputs=[in_spec, b1_spec, b2_spec],
kwargs=kwargs,
output_spec=in_spec,
comparison_target="out",
tolerance=tol,
description="addbmm - INPLACE(out)",
)
)
return test_cases
class OpTest(BaseOperatorTest):
"""addbmm operator test with simplified implementation"""
def __init__(self):
super().__init__("addbmm")
def get_test_cases(self):
return parse_test_cases()
def torch_operator(self, *args, **kwargs):
return torch.addbmm(*args, **kwargs)
# def infinicore_operator(self, *args, **kwargs):
# """InfiniCore implementation (operator not yet available)."""
# return infinicore.addbmm(*args, **kwargs)
def main():
"""Main entry point"""
runner = GenericTestRunner(OpTest)
runner.run_and_exit()
if __name__ == "__main__":
main()
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import torch
import infinicore
from framework.base import BaseOperatorTest, TensorSpec, TestCase
from framework.runner import GenericTestRunner
from framework.utils import is_broadcast
# Test cases format: (in_shape, in_strides_or_None, t1_shape_or_None, t2_shape_or_None, value)
_TEST_CASES_DATA = [
((2, 3, 4), None, None, None, 1.0),
((1, 4, 8), (32, 8, 1), None, None, 0.5),
((3, 2, 5, 7), None, None, None, 2.0),
((2, 1, 16), None, None, None, 1.0),
((1, 8, 9, 11), (792, 99, 11, 1), None, None, 1.5),
((2, 6, 10), None, None, None, 0.25),
]
_TOLERANCE_MAP = {
infinicore.float16: {"atol": 1e-3, "rtol": 1e-2},
infinicore.float32: {"atol": 1e-5, "rtol": 1e-4},
}
_TENSOR_DTYPES = [infinicore.float16, infinicore.float32]
def parse_test_cases():
cases = []
for in_shape, in_strides, t1_shape, t2_shape, value in _TEST_CASES_DATA:
for dtype in _TENSOR_DTYPES:
tol = _TOLERANCE_MAP[dtype]
input_spec = TensorSpec.from_tensor(in_shape, in_strides, dtype)
t1_spec = TensorSpec.from_tensor(
in_shape if t1_shape is None else t1_shape, None, dtype
)
t2_spec = TensorSpec.from_tensor(
in_shape if t2_shape is None else t2_shape, None, dtype
)
# Out-of-place
kwargs = {"value": value}
cases.append(
TestCase(
inputs=[input_spec, t1_spec, t2_spec],
kwargs=kwargs,
output_spec=None,
comparison_target=None,
tolerance=tol,
description="addcdiv - OUT_OF_PLACE",
)
)
# Explicit out
out_spec = TensorSpec.from_tensor(in_shape, None, dtype)
cases.append(
TestCase(
inputs=[input_spec, t1_spec, t2_spec],
kwargs=kwargs,
output_spec=out_spec,
comparison_target="out",
tolerance=tol,
description="addcdiv - INPLACE(out)",
)
)
# In-place on input (out=0)
if not is_broadcast(input_spec.strides):
cases.append(
TestCase(
inputs=[input_spec, t1_spec, t2_spec],
kwargs={"value": value, "out": 0},
output_spec=None,
comparison_target=0,
tolerance=tol,
description="addcdiv - INPLACE(a)",
)
)
# In-place on tensor1 (out=1)
if not is_broadcast(t1_spec.strides):
cases.append(
TestCase(
inputs=[input_spec, t1_spec, t2_spec],
kwargs={"value": value, "out": 1},
output_spec=None,
comparison_target=1,
tolerance=tol,
description="addcdiv - INPLACE(b)",
)
)
# In-place on tensor2 (out=2)
if not is_broadcast(t2_spec.strides):
cases.append(
TestCase(
inputs=[input_spec, t1_spec, t2_spec],
kwargs={"value": value, "out": 2},
output_spec=None,
comparison_target=2,
tolerance=tol,
description="addcdiv - INPLACE(c)",
)
)
return cases
class OpTest(BaseOperatorTest):
"""AddCdiv operator test with simplified implementation"""
def __init__(self):
super().__init__("AddCdiv")
def get_test_cases(self):
return parse_test_cases()
def torch_operator(self, *args, **kwargs):
return torch.addcdiv(*args, **kwargs)
# def infinicore_operator(self, *args, **kwargs):
# """InfiniCore implementation (operator not yet available)."""
# return infinicore.addcdiv(*args, **kwargs)
def main():
"""Main entry point"""
runner = GenericTestRunner(OpTest)
runner.run_and_exit()
if __name__ == "__main__":
main()
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import torch
import infinicore
from framework.base import BaseOperatorTest, TensorSpec, TestCase
from framework.runner import GenericTestRunner
from framework.utils import is_broadcast
# Test cases format: (in_shape, in_strides_or_None, t1_shape_or_None, t2_shape_or_None, value)
_TEST_CASES_DATA = [
((2, 3, 4), None, None, None, 1.0),
((1, 4, 8), (32, 8, 1), None, None, 0.5),
((3, 2, 5, 7), None, None, None, 2.0),
((2, 1, 16), None, None, None, 1.0),
((1, 8, 9, 11), (792, 99, 11, 1), None, None, 1.5),
((2, 6, 10), None, None, None, 0.25),
]
_TOLERANCE_MAP = {
infinicore.float16: {"atol": 1e-3, "rtol": 1e-2},
infinicore.float32: {"atol": 1e-5, "rtol": 1e-4},
}
_TENSOR_DTYPES = [infinicore.float16, infinicore.float32]
def parse_test_cases():
cases = []
for in_shape, in_strides, t1_shape, t2_shape, value in _TEST_CASES_DATA:
for dtype in _TENSOR_DTYPES:
tol = _TOLERANCE_MAP[dtype]
input_spec = TensorSpec.from_tensor(in_shape, in_strides, dtype)
t1_spec = TensorSpec.from_tensor(
in_shape if t1_shape is None else t1_shape, None, dtype
)
t2_spec = TensorSpec.from_tensor(
in_shape if t2_shape is None else t2_shape, None, dtype
)
kwargs = {"value": value}
cases.append(
TestCase(
inputs=[input_spec, t1_spec, t2_spec],
kwargs=kwargs,
output_spec=None,
comparison_target=None,
tolerance=tol,
description="addcmul - OUT_OF_PLACE",
)
)
out_spec = TensorSpec.from_tensor(in_shape, None, dtype)
cases.append(
TestCase(
inputs=[input_spec, t1_spec, t2_spec],
kwargs=kwargs,
output_spec=out_spec,
comparison_target="out",
tolerance=tol,
description="addcmul - INPLACE(out)",
)
)
if not is_broadcast(input_spec.strides):
cases.append(
TestCase(
inputs=[input_spec, t1_spec, t2_spec],
kwargs={"value": value, "out": 0},
output_spec=None,
comparison_target=0,
tolerance=tol,
description="addcmul - INPLACE(a)",
)
)
if not is_broadcast(t1_spec.strides):
cases.append(
TestCase(
inputs=[input_spec, t1_spec, t2_spec],
kwargs={"value": value, "out": 1},
output_spec=None,
comparison_target=1,
tolerance=tol,
description="addcmul - INPLACE(b)",
)
)
if not is_broadcast(t2_spec.strides):
cases.append(
TestCase(
inputs=[input_spec, t1_spec, t2_spec],
kwargs={"value": value, "out": 2},
output_spec=None,
comparison_target=2,
tolerance=tol,
description="addcmul - INPLACE(c)",
)
)
return cases
class OpTest(BaseOperatorTest):
"""AddCmul operator test with simplified implementation"""
def __init__(self):
super().__init__("AddCmul")
def get_test_cases(self):
return parse_test_cases()
def torch_operator(self, *args, **kwargs):
return torch.addcmul(*args, **kwargs)
# def infinicore_operator(self, *args, **kwargs):
# """InfiniCore implementation (operator not yet available)."""
# return infinicore.addcmul(*args, **kwargs)
def main():
"""Main entry point"""
runner = GenericTestRunner(OpTest)
runner.run_and_exit()
if __name__ == "__main__":
main()
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import torch
import infinicore
from framework.base import BaseOperatorTest, TensorSpec, TestCase
from framework.runner import GenericTestRunner
from framework.utils import is_broadcast
# Test cases format: (input_shape, mat_shape, vec_shape, input_strides_or_None, mat_strides_or_None, vec_strides_or_None, beta_or_None, alpha_or_None)
_TEST_CASES_DATA = [
((4,), (4, 6), (6,), None, None, None, None, None),
((8,), (8, 8), (8,), None, None, None, 0.0, 1.0),
((3,), (3, 5), (5,), None, (15, 1), None, None, 0.5),
((16,), (16, 32), (32,), None, (512, 1), None, 1.0, None),
((1,), (1, 1), (1,), None, None, None, None, None),
((12,), (12, 12), (12,), None, None, None, None, None),
]
_TOLERANCE_MAP = {
infinicore.float16: {"atol": 0, "rtol": 1e-2},
infinicore.float32: {"atol": 0, "rtol": 1e-3},
infinicore.bfloat16: {"atol": 0, "rtol": 5e-2},
}
_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32]
def parse_test_cases():
test_cases = []
for d in _TEST_CASES_DATA:
in_shape, mat_shape, vec_shape = d[0], d[1], d[2]
in_strides, mat_strides, vec_strides = d[3], d[4], d[5]
beta, alpha = d[6], d[7]
out_supports_inplace = not is_broadcast(in_strides)
for dtype in _TENSOR_DTYPES:
tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 1e-3})
in_spec = TensorSpec.from_tensor(in_shape, in_strides, dtype)
mat_spec = TensorSpec.from_tensor(mat_shape, mat_strides, dtype)
vec_spec = TensorSpec.from_tensor(vec_shape, vec_strides, dtype)
kwargs = {}
if beta is not None:
kwargs["beta"] = beta
if alpha is not None:
kwargs["alpha"] = alpha
test_cases.append(
TestCase(
inputs=[in_spec, mat_spec, vec_spec],
kwargs=kwargs,
output_spec=None,
comparison_target=None,
tolerance=tol,
description="addmv - OUT_OF_PLACE",
)
)
if out_supports_inplace:
test_cases.append(
TestCase(
inputs=[in_spec, mat_spec, vec_spec],
kwargs=kwargs,
output_spec=in_spec,
comparison_target="out",
tolerance=tol,
description="addmv - INPLACE(out)",
)
)
return test_cases
class OpTest(BaseOperatorTest):
"""addmv operator test with simplified implementation"""
def __init__(self):
super().__init__("addmv")
def get_test_cases(self):
return parse_test_cases()
def torch_operator(self, *args, **kwargs):
return torch.addmv(*args, **kwargs)
# def infinicore_operator(self, *args, **kwargs):
# """InfiniCore implementation (operator not yet available)."""
# return infinicore.addmv(*args, **kwargs)
def main():
"""Main entry point"""
runner = GenericTestRunner(OpTest)
runner.run_and_exit()
if __name__ == "__main__":
main()
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import torch
import infinicore
from framework.base import BaseOperatorTest, TensorSpec, TestCase
from framework.runner import GenericTestRunner
from framework.utils import is_broadcast
# Test cases format: (input_shape, vec1_shape, vec2_shape, input_strides_or_None, vec1_strides_or_None, vec2_strides_or_None, beta_or_None, alpha_or_None)
_TEST_CASES_DATA = [
((3, 4), (3,), (4,), None, None, None, None, None),
((8, 8), (8,), (8,), None, None, None, 0.5, 2.0),
((5, 6), (5,), (6,), (30, 1), None, None, None, None),
((1, 1), (1,), (1,), None, None, None, None, None),
((16, 4), (16,), (4,), None, None, None, 1.0, None),
((2, 7), (2,), (7,), None, None, None, None, None),
]
_TOLERANCE_MAP = {
infinicore.float16: {"atol": 0, "rtol": 1e-2},
infinicore.float32: {"atol": 0, "rtol": 1e-3},
infinicore.bfloat16: {"atol": 0, "rtol": 5e-2},
}
_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32]
def parse_test_cases():
test_cases = []
for d in _TEST_CASES_DATA:
in_shape, v1_shape, v2_shape = d[0], d[1], d[2]
in_strides, v1_strides, v2_strides = d[3], d[4], d[5]
beta, alpha = d[6], d[7]
out_supports_inplace = not is_broadcast(in_strides)
for dtype in _TENSOR_DTYPES:
tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 1e-3})
in_spec = TensorSpec.from_tensor(in_shape, in_strides, dtype)
v1_spec = TensorSpec.from_tensor(v1_shape, v1_strides, dtype)
v2_spec = TensorSpec.from_tensor(v2_shape, v2_strides, dtype)
kwargs = {}
if beta is not None:
kwargs["beta"] = beta
if alpha is not None:
kwargs["alpha"] = alpha
test_cases.append(
TestCase(
inputs=[in_spec, v1_spec, v2_spec],
kwargs=kwargs,
output_spec=None,
comparison_target=None,
tolerance=tol,
description="addr - OUT_OF_PLACE",
)
)
if out_supports_inplace:
test_cases.append(
TestCase(
inputs=[in_spec, v1_spec, v2_spec],
kwargs=kwargs,
output_spec=in_spec,
comparison_target="out",
tolerance=tol,
description="addr - INPLACE(out)",
)
)
return test_cases
class OpTest(BaseOperatorTest):
"""addr operator test with simplified implementation"""
def __init__(self):
super().__init__("addr")
def get_test_cases(self):
return parse_test_cases()
def torch_operator(self, *args, **kwargs):
return torch.addr(*args, **kwargs)
# def infinicore_operator(self, *args, **kwargs):
# """InfiniCore implementation (operator not yet available)."""
# return infinicore.addr(*args, **kwargs)
def main():
"""Main entry point"""
runner = GenericTestRunner(OpTest)
runner.run_and_exit()
if __name__ == "__main__":
main()
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import torch
import infinicore
from framework.base import BaseOperatorTest, TensorSpec, TestCase
from framework.runner import GenericTestRunner
from framework.utils import is_broadcast
# Test cases format: (theta_shape, out_shape, theta_strides_or_None)
_TEST_CASES_DATA = [
((1, 2, 3), (1, 3, 4, 4), None),
((2, 2, 3), (2, 3, 8, 8), None),
((1, 2, 3), (1, 4, 6, 6), (6, 2, 1)),
((4, 2, 3), (4, 3, 5, 5), None),
((2, 2, 3), (2, 1, 7, 7), None),
((3, 2, 3), (3, 3, 2, 2), None),
]
_TOLERANCE_MAP = {
infinicore.float16: {"atol": 1e-2, "rtol": 1e-2},
infinicore.float32: {"atol": 1e-5, "rtol": 1e-4},
infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2},
}
_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32]
def parse_test_cases():
test_cases = []
for data in _TEST_CASES_DATA:
theta_shape, out_shape = data[0], data[1]
theta_strides = data[2] if len(data) > 2 else None
for dtype in _TENSOR_DTYPES:
tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-5, "rtol": 1e-4})
theta_spec = TensorSpec.from_tensor(theta_shape, theta_strides, dtype)
# Out-of-place with align_corners variations
for align in (True, False):
kwargs = {"size": out_shape}
if align is not None:
kwargs["align_corners"] = align
test_cases.append(
TestCase(
inputs=[theta_spec],
kwargs=kwargs,
output_spec=None,
comparison_target=None,
tolerance=tol,
description="affine_grid - OUT_OF_PLACE",
)
)
return test_cases
class OpTest(BaseOperatorTest):
"""AffineGrid operator test with simplified implementation"""
def __init__(self):
super().__init__("AffineGrid")
def get_test_cases(self):
return parse_test_cases()
def torch_operator(self, *args, **kwargs):
return torch.nn.functional.affine_grid(*args, **kwargs)
# def infinicore_operator(self, *args, **kwargs):
# """InfiniCore implementation (operator not yet available)."""
# return infinicore.nn.functional.affine_grid(*args, **kwargs)
def main():
"""Main entry point"""
runner = GenericTestRunner(OpTest)
runner.run_and_exit()
if __name__ == "__main__":
main()
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import torch
import infinicore
from framework.base import BaseOperatorTest, TensorSpec, TestCase
from framework.runner import GenericTestRunner
from framework.utils import is_broadcast
# Test cases format: (in_shape, in_strides_or_None, dim_or_None, keepdim_or_None, out_strides_or_None)
_TEST_CASES_DATA = [
((8, 8), None, None, None, None),
((8, 8), (16, 1), 1, False, None),
((2, 3, 4), None, 0, True, (0, 1, 1)),
((1, 8), None, (0, 1), False, None),
((16, 64), (128, 1), None, None, None),
((4, 5, 6), (60, 12, 2), 2, True, (12, 4, 1)),
]
_TOLERANCE_MAP = {infinicore.bool: {"atol": 0, "rtol": 0}}
_TENSOR_DTYPES = [infinicore.bool, infinicore.uint8]
def _compute_out_shape(shape, dim, keepdim):
if dim is None:
return ()
if isinstance(dim, tuple):
dims = sorted([(d if d >= 0 else len(shape) + d) for d in dim])
if keepdim:
out = list(shape)
for d in dims:
out[d] = 1
return tuple(out)
else:
return tuple(s for i, s in enumerate(shape) if i not in dims)
else:
d = dim if dim >= 0 else len(shape) + dim
if keepdim:
out = list(shape)
out[d] = 1
return tuple(out)
else:
return tuple(s for i, s in enumerate(shape) if i != d)
def parse_test_cases():
test_cases = []
for data in _TEST_CASES_DATA:
shape, strides, dim, keepdim, out_strides = data
input_supports_inplace = not is_broadcast(strides)
out_supports_inplace = not is_broadcast(out_strides)
for dtype in _TENSOR_DTYPES:
tol = _TOLERANCE_MAP.get(dtype, {"atol": 0, "rtol": 0})
in_spec = TensorSpec.from_tensor(shape, strides, dtype)
# Out-of-place
kwargs = {}
if dim is not None:
kwargs["dim"] = dim
if keepdim is not None:
kwargs["keepdim"] = keepdim
test_cases.append(
TestCase(
inputs=[in_spec],
kwargs=kwargs,
output_spec=None,
comparison_target=None,
tolerance=tol,
description="All - OUT_OF_PLACE",
)
)
# explicit out when supported (create out tensor with computed shape)
out_shape = _compute_out_shape(shape, dim, keepdim)
out_spec = TensorSpec.from_tensor(out_shape, out_strides, infinicore.bool)
if out_supports_inplace:
test_cases.append(
TestCase(
inputs=[in_spec],
kwargs=kwargs,
output_spec=out_spec,
comparison_target="out",
tolerance=tol,
description="All - INPLACE(out)",
)
)
return test_cases
class OpTest(BaseOperatorTest):
"""All operator test with simplified implementation"""
def __init__(self):
super().__init__("All")
def get_test_cases(self):
return parse_test_cases()
def torch_operator(self, *args, **kwargs):
return torch.all(*args, **kwargs)
# def infinicore_operator(self, *args, **kwargs):
# """InfiniCore implementation (operator not yet available)."""
# return infinicore.all(*args, **kwargs)
def main():
"""Main entry point"""
runner = GenericTestRunner(OpTest)
runner.run_and_exit()
if __name__ == "__main__":
main()
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import torch
import infinicore
from framework.base import BaseOperatorTest, TensorSpec, TestCase
from framework.runner import GenericTestRunner
from framework.utils import is_broadcast
# Test cases format: (shape, p, training, in_strides_or_None)
_TEST_CASES_DATA = [
((8, 16), 0.1, True, None),
((8, 16), 0.2, False, (128, 1)),
((2, 3, 4), 0.5, True, None),
((16, 64), 0.3, True, None),
((4, 5, 6), 0.5, False, None),
((3, 4, 5), 0.4, True, (60, 20, 4)),
]
_TOLERANCE_MAP = {
infinicore.float16: {"atol": 1e-2, "rtol": 1e-2},
infinicore.float32: {"atol": 1e-5, "rtol": 1e-4},
infinicore.bfloat16: {"atol": 1e-2, "rtol": 5e-2},
}
_TENSOR_DTYPES = [infinicore.float16, infinicore.bfloat16, infinicore.float32]
def parse_test_cases():
test_cases = []
for data in _TEST_CASES_DATA:
shape, p, training = data[0], data[1], data[2]
in_strides = data[3] if len(data) > 3 else None
supports_inplace = not is_broadcast(in_strides)
for dtype in _TENSOR_DTYPES:
tol = _TOLERANCE_MAP.get(dtype, {"atol": 1e-2, "rtol": 1e-2})
input_spec = TensorSpec.from_tensor(shape, in_strides, dtype)
kwargs = {"p": p, "training": training}
# Out-of-place
test_cases.append(
TestCase(
inputs=[input_spec],
kwargs=kwargs,
output_spec=None,
comparison_target=None,
tolerance=tol,
description="alpha_dropout - OUT_OF_PLACE",
)
)
return test_cases
class OpTest(BaseOperatorTest):
"""AlphaDropout operator test with simplified implementation"""
def __init__(self):
super().__init__("AlphaDropout")
def get_test_cases(self):
return parse_test_cases()
def torch_operator(self, *args, **kwargs):
return torch.nn.functional.alpha_dropout(*args, **kwargs)
# def infinicore_operator(self, *args, **kwargs):
# """InfiniCore implementation (operator not yet available)."""
# return infinicore.nn.functional.alpha_dropout(*args, **kwargs)
def main():
"""Main entry point"""
runner = GenericTestRunner(OpTest)
runner.run_and_exit()
if __name__ == "__main__":
main()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment