Commit a07825ac authored by justheuristic's avatar justheuristic
Browse files

review

parent 9b7d307b
...@@ -569,12 +569,10 @@ def test_linear8bitlt_no_fp16_weights(threshold, memory_efficient_backward): ...@@ -569,12 +569,10 @@ def test_linear8bitlt_no_fp16_weights(threshold, memory_efficient_backward):
(o1 * grad_proj).sum().backward() (o1 * grad_proj).sum().backward()
grad_ref = grad_proj.flatten(2) @ w2.half() @ w1.half() grad_ref = grad_proj.flatten(2) @ w2.half() @ w1.half()
scale = grad_ref.abs().mean() scale = grad_ref.abs().mean()
assert torch.allclose(b1.grad, grad_ref, rtol=0, atol=0.05 * scale)
torch.testing.assert_allclose(b1.grad, grad_ref, rtol=0, atol=0.05 * scale)
idx = torch.isclose(b1.grad, grad_ref, atol=0.01 * scale, rtol=0.1)
assert (idx == 0).sum().item() <= b1.numel() * 0.0
def test_linear8bitlt_fp32_bias(): def test_linear8bitlt_fp32_bias():
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment