Commit 7e7528cd authored by Zimin Li's avatar Zimin Li
Browse files

issue/200: updata fp16 and fp32 tolerance for add and swiglu in python tests

parent 11e7df93
......@@ -62,7 +62,8 @@ _TENSOR_DTYPES = [torch.float16, torch.float32]
# Tolerance map for different data types
_TOLERANCE_MAP = {
torch.float16: {"atol": 1e-4, "rtol": 1e-2},
torch.float16: {"atol": 1e-3, "rtol": 1e-3},
torch.float32: {"atol": 1e-7, "rtol": 1e-7},
}
DEBUG = False
......
......@@ -61,7 +61,8 @@ _TENSOR_DTYPES = [torch.float16, torch.float32]
# Tolerance map for different data types
_TOLERANCE_MAP = {
torch.float16: {"atol": 1e-4, "rtol": 1e-2},
torch.float16: {"atol": 1e-3, "rtol": 1e-3},
torch.float32: {"atol": 2e-7, "rtol": 1e-7},
}
DEBUG = False
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment