Unverified Commit d54ee0fb authored by PanZezhong1725's avatar PanZezhong1725 Committed by GitHub
Browse files

Merge pull request #201 from InfiniTensor/issue/200_fix_add_swiglu_testcases

Issue/200/fix: Updata FP16 and FP32 Tolerance for Add and SwiGLU
parents 11e7df93 7e7528cd
...@@ -62,7 +62,8 @@ _TENSOR_DTYPES = [torch.float16, torch.float32] ...@@ -62,7 +62,8 @@ _TENSOR_DTYPES = [torch.float16, torch.float32]
# Tolerance map for different data types # Tolerance map for different data types
_TOLERANCE_MAP = { _TOLERANCE_MAP = {
torch.float16: {"atol": 1e-4, "rtol": 1e-2}, torch.float16: {"atol": 1e-3, "rtol": 1e-3},
torch.float32: {"atol": 1e-7, "rtol": 1e-7},
} }
DEBUG = False DEBUG = False
......
...@@ -61,7 +61,8 @@ _TENSOR_DTYPES = [torch.float16, torch.float32] ...@@ -61,7 +61,8 @@ _TENSOR_DTYPES = [torch.float16, torch.float32]
# Tolerance map for different data types # Tolerance map for different data types
_TOLERANCE_MAP = { _TOLERANCE_MAP = {
torch.float16: {"atol": 1e-4, "rtol": 1e-2}, torch.float16: {"atol": 1e-3, "rtol": 1e-3},
torch.float32: {"atol": 2e-7, "rtol": 1e-7},
} }
DEBUG = False DEBUG = False
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment