Commit 37f805bb authored by justheuristic's avatar justheuristic
Browse files

debug

parent 6a826c41
...@@ -567,7 +567,7 @@ def test_linear8bitlt_no_fp16_weights(threshold, memory_efficient_backward): ...@@ -567,7 +567,7 @@ def test_linear8bitlt_no_fp16_weights(threshold, memory_efficient_backward):
mlp.zero_grad() mlp.zero_grad()
(o1 * grad_proj).sum().backward() (o1 * grad_proj).sum().backward()
assert False, (w1, w2)
grad_ref = grad_proj.flatten(2) @ w2 @ w1 grad_ref = grad_proj.flatten(2) @ w2 @ w1
assert torch.allclose(b1.grad, grad_ref) assert torch.allclose(b1.grad, grad_ref)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment