Unverified Commit 27777ee1 authored by baominghelly's avatar baominghelly Committed by GitHub
Browse files

issue/818 - Create a test api and add load test case from file feature (#844)

* issue/818 - Create a test api and add load test case from file feature

* issue/818 - Rename class
parent a2d05600
......@@ -11,6 +11,7 @@ from .datatypes import to_torch_dtype, to_infinicore_dtype
from .devices import InfiniDeviceEnum, InfiniDeviceNames, torch_device_map
from .results import TestTiming, OperatorResult, CaseResult, TestSummary
from .runner import GenericTestRunner
from .test_manager import TestManager, TestCollector
from .tensor import TensorSpec, TensorInitializer
from .executor import TestExecutor
from .utils.compare_utils import (
......@@ -20,6 +21,7 @@ from .utils.compare_utils import (
get_tolerance,
)
from .utils.json_utils import save_json_report
from .utils.load_utils import TestGenerator
from .utils.tensor_utils import (
infinicore_tensor_from_torch,
convert_infinicore_to_torch,
......@@ -39,11 +41,14 @@ __all__ = [
"InfiniDeviceEnum",
"InfiniDeviceNames",
"OperatorResult",
"TestGenerator",
"TensorInitializer",
"TensorSpec",
"TestCase",
"TestCollector",
"TestConfig",
"TestExecutor",
"TestManager",
"TestSummary",
"TestRunner",
"TestTiming",
......
......@@ -18,7 +18,13 @@ def capture_output():
class TestExecutor:
def execute(self, file_path) -> OperatorResult:
def execute(self, file_path, test_args) -> OperatorResult:
"""
Execute a test file dynamically.
Args:
file_path (Path): Path to the python test file.
test_args (argparse.Namespace): Arguments to pass to the runner. Must be provided.
"""
result = OperatorResult(name=file_path.stem)
try:
......@@ -36,7 +42,7 @@ class TestExecutor:
test_instance = test_class()
runner_class = module.GenericTestRunner
runner = runner_class(test_instance.__class__)
runner = runner_class(test_instance.__class__, args=test_args)
# 4. Execute and capture output
with capture_output() as (out, err):
......
......@@ -119,9 +119,9 @@ class TestSummary:
# Part 2: Console Output (View)
# =========================================================
def list_tests(self, discoverer):
ops_dir = discoverer.ops_dir
operators = discoverer.get_available_operators()
def list_tests(self, collector):
ops_dir = collector.ops_dir
operators = collector.get_available_operators()
if operators:
print(f"Available operator test files in {ops_dir}:")
......@@ -130,7 +130,7 @@ class TestSummary:
print(f"\nTotal: {len(operators)} operators")
else:
print(f"No valid operator tests found in {ops_dir}")
raw_files = discoverer.get_raw_python_files()
raw_files = collector.get_raw_python_files()
if raw_files:
print(
f"\n💡 Debug Hint: Found Python files but they are not valid tests:"
......
import sys
import argparse
import tempfile
from pathlib import Path
from .executor import TestExecutor
from .results import TestSummary, TestTiming
from .utils.load_utils import TestGenerator
class TestCollector:
"""
Responsible for scanning and verifying operator test files.
"""
def __init__(self, ops_dir_path=None):
self.ops_dir = self._resolve_dir(ops_dir_path)
def _resolve_dir(self, path):
if path:
p = Path(path)
if p.exists():
return p
# Fallback: 'ops' directory relative to the project root
fallback = Path(__file__).parent.parent / "ops"
return fallback if fallback.exists() else None
def get_available_operators(self):
if not self.ops_dir:
return []
files = self.scan()
return sorted([f.stem for f in files])
def get_raw_python_files(self):
if not self.ops_dir or not self.ops_dir.exists():
return []
files = list(self.ops_dir.glob("*.py"))
return [
f.name for f in files if f.name != "run.py" and not f.name.startswith("__")
]
def scan(self, specific_ops=None):
if not self.ops_dir or not self.ops_dir.exists():
return []
files = list(self.ops_dir.glob("*.py"))
target_ops_set = set(specific_ops) if specific_ops else None
valid_files = []
for f in files:
if f.name.startswith("_") or f.name == "run.py":
continue
if target_ops_set and f.stem not in target_ops_set:
continue
if self._is_operator_test(f):
valid_files.append(f)
return valid_files
def _is_operator_test(self, file_path):
try:
with open(file_path, "r", encoding="utf-8") as f:
content = f.read()
return "infinicore" in content and (
"BaseOperatorTest" in content or "GenericTestRunner" in content
)
except:
return False
class TestManager:
"""
High-level API to execute operator tests.
Encapsulates the test loop, timing aggregation, and reporting.
"""
def __init__(self, ops_dir=None, verbose=False, bench_mode=None):
self.collector = TestCollector(ops_dir)
self.verbose = verbose
self.bench_mode = bench_mode
# Initialize components
self.executor = TestExecutor()
self.summary = TestSummary(verbose, bench_mode)
self.cumulative_timing = TestTiming()
self.results = []
def test(self, target_ops=None, json_cases_list=None, global_exec_args=None):
"""
Args:
target_ops: List of target operators for local scan
json_cases_list: List of test cases in JSON mode
global_exec_args (argparse.Namespace): Unified argument object passed to Executor in local scan mode
"""
with tempfile.TemporaryDirectory() as temp_dir_str:
test_files = []
test_configs = [] # Stores args for each file
display_location = ""
# =================================================
# 1. Mode Selection
# =================================================
if json_cases_list:
# [Mode A] Dynamic Execution (JSON)
print(f"🚀 Mode: Dynamic Execution")
project_root = getattr(
self, "project_root", Path(__file__).resolve().parent.parent
)
generator = TestGenerator(project_root=str(project_root))
# Generate files
dynamic_paths = generator.generate(json_cases_list, temp_dir_str)
test_files = [Path(p) for p in dynamic_paths]
# Convert JSON dict to Namespace
for case_data in json_cases_list:
# run.py has sanitized the data, convert directly to Namespace
ns = argparse.Namespace(**case_data.get("args", {}))
test_configs.append(ns)
display_location = f"Dynamic ({len(test_files)} cases)"
else:
# [Mode B] Local File Scan
# print(f"📂 Mode: Local File Scan")
test_files = self.collector.scan(target_ops)
display_location = str(self.collector.ops_dir)
# ✅ Key Logic: Apply global_exec_args passed from run.py to all files
# If global_exec_args is None (run.py should theoretically fill this), executor falls back to default behavior
test_configs = [global_exec_args] * len(test_files)
# =================================================
# 2. Execution Loop
# =================================================
if not test_files:
print(f"No valid tests found in {display_location}")
return True
self.summary.print_header(display_location, len(test_files))
for f, run_args in zip(test_files, test_configs):
# Inject prepared args (whether from JSON or Local global) into Executor
result = self.executor.execute(f, test_args=run_args)
self.results.append(result)
self.summary.print_live_result(result)
if result.success:
self._accumulate_timing(result.timing)
if self.verbose and not result.success:
print("\nStopping due to failure in verbose mode.")
break
# Summary
all_passed = self.summary.print_summary(
self.results,
self.cumulative_timing if self.bench_mode else None,
ops_dir=display_location,
total_expected=len(test_files),
)
return all_passed
def _accumulate_timing(self, timing):
self.cumulative_timing.torch_host += timing.torch_host
self.cumulative_timing.infini_host += timing.infini_host
self.cumulative_timing.torch_device += timing.torch_device
self.cumulative_timing.infini_device += timing.infini_device
self.cumulative_timing.operators_tested += 1
import json
import os
import sys
import pprint
from pathlib import Path
# ==============================================================================
# OpTest Templates
# ==============================================================================
_TEST_FILE_TEMPLATE = r'''import sys
import os
import json
import pprint
# Path Injection
sys.path.insert(0, r"{project_root}")
import torch
import torch.nn.functional
import infinicore
from framework import (
BaseOperatorTest,
TensorSpec,
TestCase,
GenericTestRunner,
)
# ==============================================================================
# Injected Configuration
# ==============================================================================
_OP_CONFIG = {op_config_json}
# ==============================================================================
# Helpers
# ==============================================================================
def _parse_dtype(dtype_str):
"""Convert string dtype to framework/torch object."""
if hasattr(infinicore, dtype_str): return getattr(infinicore, dtype_str)
if hasattr(torch, dtype_str): return getattr(torch, dtype_str)
return dtype_str
def _dict_to_spec(spec_dict):
"""Convert JSON dict to TensorSpec object."""
if not isinstance(spec_dict, dict): return spec_dict
return TensorSpec(
shape=tuple(spec_dict['shape']),
dtype=_parse_dtype(spec_dict['dtype']),
name=spec_dict.get('name'),
strides=tuple(spec_dict['strides']) if spec_dict.get('strides') else None
)
def parse_test_cases():
"""Parse JSON testcases into framework TestCase objects."""
test_cases = []
raw_cases = _OP_CONFIG.get("testcases", [])
for case in raw_cases:
# 1. Parse Inputs and build name-to-index map
input_specs = []
name_to_index = {}
for idx, inp in enumerate(case.get('inputs', [])):
spec = _dict_to_spec(inp)
input_specs.append(spec)
if spec.name:
name_to_index[spec.name] = idx
# 2. Parse Kwargs
kwargs = {}
for k, v in case.get('kwargs', {}).items():
# Resolve string references (e.g., "out": "a" -> "out": 0)
if k == "out" and isinstance(v, str) and v in name_to_index:
kwargs[k] = name_to_index[v]
elif isinstance(v, dict) and "shape" in v:
kwargs[k] = _dict_to_spec(v)
else:
kwargs[k] = v
# 3. Handle explicit output spec
output_spec = None
if "out" in kwargs and isinstance(kwargs["out"], TensorSpec):
output_spec = kwargs.pop("out")
# 4. Tolerance & Comparison Target
tol_dict = case.get('tolerance', {})
tolerance = {"atol": tol_dict.get("atol", 0), "rtol": tol_dict.get("rtol", 1e-3)}
comp_target = case.get('comparison_target')
if isinstance(comp_target, str) and comp_target in name_to_index:
comp_target = name_to_index[comp_target]
test_cases.append(TestCase(
inputs=input_specs,
kwargs=kwargs,
output_spec=output_spec,
comparison_target=comp_target,
tolerance=tolerance,
description=case.get('description', "Dynamic Case")
))
return test_cases
class OpTest(BaseOperatorTest):
def __init__(self):
super().__init__(_OP_CONFIG.get("operator", "UnknownOp"))
def get_test_cases(self):
"""Returns the list of parsed test cases."""
return parse_test_cases()
def _resolve_kwargs(self, args, kwargs):
"""Resolves index-based 'out' arguments to actual Tensors."""
resolved_kwargs = kwargs.copy()
if "out" in resolved_kwargs:
val = resolved_kwargs["out"]
if isinstance(val, int) and 0 <= val < len(args):
resolved_kwargs["out"] = args[val]
return resolved_kwargs
def torch_operator(self, *args, **kwargs):
"""PyTorch operator implementation."""
{torch_method_body}
def infinicore_operator(self, *args, **kwargs):
"""InfiniCore operator implementation."""
{infini_method_body}
def main():
"""Execution entry point."""
runner = GenericTestRunner(OpTest)
runner.run_and_exit()
if __name__ == "__main__":
main()
'''
class TestGenerator:
def __init__(self, project_root):
self.project_root = os.path.abspath(project_root)
def generate(self, json_list, output_dir):
generated_files = []
for idx, op_config in enumerate(json_list):
op_name = op_config.get("operator", "Unknown")
file_name = f"test_{op_name}_{idx}.py"
file_path = os.path.join(output_dir, file_name)
# 1. Fetch operator names
torch_op_name = op_config.get("torch_op")
infinicore_op_name = op_config.get("infinicore_op")
# 2. Prepare method bodies
# If the op name is provided, generate the return statement.
# If it's None/null, use 'pass' to avoid syntax errors.
make_body = lambda name, tag: (
f"return {name}(*args, **self._resolve_kwargs(args, kwargs))"
if name else f"pass # {tag} is null, skipping implementation"
)
torch_body = make_body(torch_op_name, "torch_op")
infini_body = make_body(infinicore_op_name, "infinicore_op")
# 3. Fill the template
config_str = pprint.pformat(op_config, indent=4, width=120)
file_content = _TEST_FILE_TEMPLATE.replace("{op_config_json}", config_str)
file_content = file_content.replace("{project_root}", self.project_root)
# Injected Method Bodies
file_content = file_content.replace("{torch_method_body}", torch_body)
file_content = file_content.replace("{infini_method_body}", infini_body)
with open(file_path, "w", encoding="utf-8") as f:
f.write(file_content)
generated_files.append(file_path)
return generated_files
import sys
import argparse
import json
import os
from pathlib import Path
# Import components from the unified framework package
from framework.executor import TestExecutor
from framework.results import TestSummary, TestTiming
from framework import get_hardware_args_group, add_common_test_args
class TestDiscoverer:
def __init__(self, ops_dir_path=None):
self.ops_dir = self._resolve_dir(ops_dir_path)
def _resolve_dir(self, path):
if path:
p = Path(path)
if p.exists():
return p
# Default fallback logic: 'ops' directory under the parent of the current file's parent.
# Note: Since this file is in 'infinicore/', we look at parent.
# It is recommended to pass an explicit path in run.py.
fallback = Path(__file__).parent / "ops"
return fallback if fallback.exists() else None
def get_available_operators(self):
"""Returns a list of names of all available operators."""
if not self.ops_dir:
return []
files = self.scan()
return sorted([f.stem for f in files])
def get_raw_python_files(self):
"""
Get all .py files in the directory (excluding run.py) without content validation.
Used for debugging: helps identify files that exist but failed validation.
"""
if not self.ops_dir or not self.ops_dir.exists():
return []
files = list(self.ops_dir.glob("*.py"))
# Exclude run.py itself and __init__.py
return [
f.name for f in files if f.name != "run.py" and not f.name.startswith("__")
]
def scan(self, specific_ops=None):
"""Scans and returns a list of Path objects that meet the criteria."""
if not self.ops_dir or not self.ops_dir.exists():
return []
# 1. Find all .py files
files = list(self.ops_dir.glob("*.py"))
target_ops_set = set(specific_ops) if specific_ops else None
# 2. Filter out non-test files (via content check)
valid_files = []
for f in files:
# A. Basic Name Filtering
if f.name.startswith("_") or f.name == "run.py":
continue
# B. Specific Ops Filtering
if target_ops_set and f.stem not in target_ops_set:
continue
# C. Content Check (Expensive I/O)
# Only perform this check if the file passed the name filters above.
if self._is_operator_test(f):
valid_files.append(f)
return valid_files
def _is_operator_test(self, file_path):
"""Checks if the file content contains operator test characteristics."""
try:
with open(file_path, "r", encoding="utf-8") as f:
content = f.read()
return "infinicore" in content and (
"BaseOperatorTest" in content or "GenericTestRunner" in content
)
except:
return False
from framework import (
get_hardware_args_group,
add_common_test_args,
InfiniDeviceEnum,
InfiniDeviceNames,
)
from framework.test_manager import TestCollector, TestManager
def generate_help_epilog(ops_dir=None):
......@@ -90,10 +18,10 @@ def generate_help_epilog(ops_dir=None):
Generate dynamic help epilog containing available operators and hardware platforms.
Maintains the original output format for backward compatibility.
"""
# === Adapter: Use TestDiscoverer to get operator list ===
# Temporarily instantiate a Discoverer just to fetch the list
discoverer = TestDiscoverer(ops_dir)
operators = discoverer.get_available_operators()
# === Adapter: Use TestCollector to get operator list ===
# Temporarily instantiate a Collector just to fetch the list
collector = TestCollector(ops_dir)
operators = collector.get_available_operators()
# Build epilog text (fully replicating original logic)
epilog_parts = []
......@@ -161,6 +89,135 @@ def generate_help_epilog(ops_dir=None):
return "\n".join(epilog_parts)
def fill_defaults_for_local_mode(args):
"""
Helper function specifically for Local Scan mode to fill default arguments.
Since parser defaults are set to None (to handle override logic in load mode),
we need to manually fill None with default values in local mode.
"""
# 1. Copy args to avoid modifying the original object and affecting other logic
# argparse.Namespace can be converted to dict and back, or copied directly
local_args = argparse.Namespace(**vars(args))
# 2. Fill default values for numeric arguments
if local_args.num_prerun is None:
local_args.num_prerun = 10
if local_args.num_iterations is None:
local_args.num_iterations = 1000
return local_args
def load_and_override_cases(load_paths, args):
"""
Load JSON, apply CLI overrides, and handle all argument logic.
"""
cases = []
files_to_read = []
# 1. Scan
for p_str in load_paths:
p = Path(p_str)
if p.is_dir():
files_to_read.extend(p.glob("*.json"))
elif p.is_file():
files_to_read.append(p)
# 2. Read and Validate
loaded_count = 0
skipped_count = 0
for f_path in files_to_read:
try:
with open(f_path, 'r', encoding='utf-8') as f:
data = json.load(f)
# Unify as a list to handle both single dict and list of dicts
current_batch = data if isinstance(data, list) else [data]
valid_batch = []
for item in current_batch:
# We only require the 'operator' field to identify the test case.
if isinstance(item, dict) and "operator" in item:
valid_batch.append(item)
else:
skipped_count += 1
if valid_batch:
cases.extend(valid_batch)
loaded_count += 1
except Exception as e:
# Log warning only; do not crash the program on bad files to ensure flow continuity.
print(f"❌ Error loading {f_path.name}: {e}")
if skipped_count > 0:
print(f"ℹ️ Ignored {skipped_count} items/files (invalid format).")
# ==================================================
# Device Logic using InfiniDeviceEnum
# ==================================================
# 1. Identify active devices from CLI arguments
cli_active_devices = []
# Iterate through the Enum to check corresponding CLI args
# Logic: Enum name (e.g., CAMBRICON) -> lower() -> arg name (cambricon)
# Value: InfiniDeviceNames mapping (e.g., "Cambricon")
for device_enum, device_name in InfiniDeviceNames.items():
# device_name is like "CPU", "NVIDIA", "Cambricon"
# arg_name becomes "cpu", "nvidia", "cambricon"
arg_name = device_name.lower()
if getattr(args, arg_name, False):
cli_active_devices.append(device_name)
print(f"\n[Config Processing]")
for case in cases:
if "args" not in case or case["args"] is None:
case["args"] = {}
case_args = case["args"]
# 2. Apply Device Overrides (CLI > JSON)
if cli_active_devices:
case["device"] = ",".join(cli_active_devices)
final_dev_str = case.get("device", "").upper() # Uppercase for easier matching
# 3. Set Boolean flags in case_args based on final device string
for device_enum, device_name in InfiniDeviceNames.items():
arg_name = device_name.lower()
# Check if the standard name (e.g., "Cambricon" or "NVIDIA") is in the device string
# We convert both to upper to ensure case-insensitive matching
is_active = device_name.upper() in final_dev_str
case_args[arg_name] = is_active
case_args["save"] = getattr(args, "save", None)
# Standard arguments (CLI > JSON > Default)
case_args["bench"] = (
args.bench if args.bench is not None else case_args.get("bench")
)
# Boolean Flags
case_args["verbose"] = args.verbose or case_args.get("verbose", False)
case_args["debug"] = args.debug or case_args.get("debug", False)
case_args["eq_nan"] = args.eq_nan or case_args.get("eq_nan", False)
case_args["num_prerun"] = (
args.num_prerun
if args.num_prerun is not None
else (case_args.get("num_prerun") or 10)
)
case_args["num_iterations"] = (
args.num_iterations
if args.num_iterations is not None
else (case_args.get("num_iterations") or 1000)
)
print(f"📂 Processed {len(cases)} cases ready for execution.\n")
return cases
def main():
"""Main entry point for the InfiniCore Operator Test Runner."""
parser = argparse.ArgumentParser(
......@@ -179,6 +236,15 @@ def main():
action="store_true",
help="List all available test files without running them",
)
parser.add_argument(
"--load",
nargs="+",
help="Load test cases from JSON",
)
# Default value is None to determine if user provided input
parser.add_argument("--num_prerun", type=lambda x: max(0, int(x)), default=None)
parser.add_argument("--num_iterations", type=lambda x: max(0, int(x)), default=None)
# Add common test arguments (including --save, --bench, etc.)
add_common_test_args(parser)
......@@ -190,87 +256,84 @@ def main():
print(f"Passing extra arguments to test scripts: {unknown_args}")
# 1. Discovery
discoverer = TestDiscoverer(args.ops_dir)
collector = TestCollector(args.ops_dir)
if args.list:
print("Available operators:", discoverer.get_available_operators())
print("Available operators:", collector.get_available_operators())
return
if args.verbose:
print(f"Verbose mode: ENABLED (will stop on first error with full traceback)")
if args.bench:
print(f"Benchmark mode: {args.bench.upper()} timing")
target_ops = None
if args.ops:
# Get all available operator names
available_ops = set(discoverer.get_available_operators())
requested_ops = set(args.ops)
# Classify using set operations
valid_ops = list(requested_ops & available_ops) # Intersection: Valid ops
invalid_ops = list(requested_ops - available_ops) # Difference: Invalid ops
# Warn if there are invalid operators
if invalid_ops:
print(f"⚠️ Warning: The following requested operators were not found:")
print(f" {', '.join(invalid_ops)}")
print(f" (Use --list to see available operators)")
if not valid_ops:
# Case A: User input provided, but ALL were invalid.
print(f"⚠️ No valid operators remained from your list.")
print(f"🔄 Fallback: Proceeding to run ALL available tests...")
else:
# Case B: At least some valid operators found.
print(f"🎯 Targeted operators: {', '.join(valid_ops)}")
target_ops = valid_ops
test_files = discoverer.scan(target_ops)
if not test_files:
print("No tests found.")
sys.exit(0)
# 2. Preparation
executor = TestExecutor()
cumulative_timing = TestTiming()
test_summary = TestSummary(args.verbose, args.bench)
results = []
test_summary.print_header(discoverer.ops_dir, len(test_files))
# 3. Execution Loop
for f in test_files:
result = executor.execute(f)
results.append(result)
# Real-time reporting and printing of stdout
test_summary.print_live_result(result)
# Accumulate timing
if result.success:
cumulative_timing.torch_host += result.timing.torch_host
cumulative_timing.infini_host += result.timing.infini_host
cumulative_timing.torch_device += result.timing.torch_device
cumulative_timing.infini_device += result.timing.infini_device
cumulative_timing.operators_tested += 1
# Fail fast in verbose mode
if args.verbose and not result.success:
print("\nStopping due to failure in verbose mode.")
break
# 4. Final Report & Save
all_passed = test_summary.print_summary(
results,
cumulative_timing if args.bench else None,
ops_dir=discoverer.ops_dir,
total_expected=len(test_files),
)
# ==========================================================================
# Branch 1: Load Mode (JSON Data Driven)
# ==========================================================================
if args.load:
# 1. Load and override arguments
json_cases = load_and_override_cases(args.load, args)
if not json_cases:
sys.exit(1)
# 2. Determine global Bench status (for Summary display)
bench = json_cases[0]["args"].get("bench")
verbose = json_cases[0]["args"].get("verbose")
sys.exit(0 if all_passed else 1)
if verbose:
print(
f"Verbose mode: ENABLED (will stop on first error with full traceback)"
)
if bench:
print(f"Benchmark mode: {args.bench.upper()} timing")
# 3. Initialize and Execute
test_manager = TestManager(ops_dir=args.ops_dir, verbose=verbose, bench_mode=bench)
success = test_manager.test(json_cases_list=json_cases)
# ==========================================================================
# Branch 2: Local Scan Mode
# ==========================================================================
else:
if args.verbose:
print(
f"Verbose mode: ENABLED (will stop on first error with full traceback)"
)
if args.bench:
print(f"Benchmark mode: {args.bench.upper()} timing")
# 2. Filtering
target_ops = None
if args.ops:
available_ops = set(collector.get_available_operators())
requested_ops = set(args.ops)
valid_ops = list(requested_ops & available_ops)
invalid_ops = list(requested_ops - available_ops)
if invalid_ops:
print(f"⚠️ Warning: The following requested operators were not found:")
print(f" {', '.join(invalid_ops)}")
print(f" (Use --list to see available operators)")
if not valid_ops:
# Case A: User input provided, but ALL were invalid.
print(f"⚠️ No valid operators remained from your list.")
print(f"🔄 Fallback: Proceeding to run ALL available tests...")
else:
# Case B: At least some valid operators found.
print(f"🎯 Targeted operators: {', '.join(valid_ops)}")
target_ops = valid_ops
# 3. Execution Preparation
# Fill defaults for local mode (since parser default is None)
global_exec_args = fill_defaults_for_local_mode(args)
# 4. Initialize API & Execute
test_manager = TestManager(
ops_dir=args.ops_dir, verbose=args.verbose, bench_mode=args.bench
)
success = test_manager.test(
target_ops=target_ops, global_exec_args=global_exec_args
)
sys.exit(0 if success else 1)
if __name__ == "__main__":
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment