"src/include/ConstantMatrixDescriptor.hpp" did not exist on "f35c64eb78af4754e78f8746c8e28d2ac8b68e80"
Unverified Commit 556d411e authored by Lei Wang's avatar Lei Wang Committed by GitHub
Browse files

[Typo] Remove `disable_cache` in some tests (#755)

* Update test parameters and remove debug print statement

- Adjusted test cases in `test_tilelang_dynamic_symbolic_bench.py` to use smaller matrix sizes (1024x1024) for improved performance and quicker execution.
- Removed a debug print statement from `phase.py` to clean up the code and enhance clarity.

* Refactor loop stack management in warp_specialized_rewriter

- Introduced a new `LoopInfo` struct to encapsulate loop variable details, including `loop_var`, `extent`, and `min`, enhancing clarity and maintainability.
- Updated the `loop_stack_` to utilize `LoopInfo` instead of a pair, improving type safety and readability.
- Adjusted linear index calculations to account for the new structure, ensuring correct behavior in loop transformations.

* Remove unused `torch.backends` import and `tilelang.disable_cache()` calls from multiple test files to enhance code clarity and maintainability.
parent b39aaf5b
...@@ -4,8 +4,6 @@ from tilelang import tvm as tvm ...@@ -4,8 +4,6 @@ from tilelang import tvm as tvm
import tilelang.language as T import tilelang.language as T
import torch import torch
tilelang.disable_cache()
def matmul(M, N, K, block_M, block_N, block_K, dtype="float16", accum_dtype="float"): def matmul(M, N, K, block_M, block_N, block_K, dtype="float16", accum_dtype="float"):
num_stages = 0 num_stages = 0
......
import torch import torch
import torch.backends
from tilelang import tvm as tvm from tilelang import tvm as tvm
import tilelang.testing import tilelang.testing
import tilelang.language as T import tilelang.language as T
tilelang.testing.set_random_seed(0)
tilelang.disable_cache()
def tl_matmul_block_static( def tl_matmul_block_static(
M, M,
......
import torch import torch
import torch.backends
import tilelang import tilelang
from tilelang import tvm as tvm from tilelang import tvm as tvm
import tilelang.testing import tilelang.testing
...@@ -14,7 +13,6 @@ from tilelang.intrinsics.mma_macro_generator import ( ...@@ -14,7 +13,6 @@ from tilelang.intrinsics.mma_macro_generator import (
from tilelang.transform import simplify_prim_func from tilelang.transform import simplify_prim_func
tilelang.testing.set_random_seed(42) tilelang.testing.set_random_seed(42)
tilelang.disable_cache()
# @simplify_prim_func # @simplify_prim_func
......
...@@ -3,8 +3,6 @@ import tilelang.language as T ...@@ -3,8 +3,6 @@ import tilelang.language as T
import tilelang.testing import tilelang.testing
import torch import torch
tilelang.disable_cache()
# add decorator @tilelang.jit if you want to return a torch function # add decorator @tilelang.jit if you want to return a torch function
# @tilelang.jit # @tilelang.jit
......
...@@ -3,8 +3,6 @@ import math ...@@ -3,8 +3,6 @@ import math
import tilelang import tilelang
import tilelang.language as T import tilelang.language as T
tilelang.disable_cache()
def blocksparse_flashattn(batch, heads, seq_len, dim, downsample_len, is_causal): def blocksparse_flashattn(batch, heads, seq_len, dim, downsample_len, is_causal):
block_M = 64 block_M = 64
......
...@@ -189,4 +189,4 @@ def test_sync_let_stmt(): ...@@ -189,4 +189,4 @@ def test_sync_let_stmt():
if __name__ == "__main__": if __name__ == "__main__":
tilelang.disable_cache() tilelang.testing.main()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment