Commit fc4a135e authored by justheuristic's avatar justheuristic
Browse files

clearer assertions

parent e29c5f5c
......@@ -232,8 +232,8 @@ class MatMul8bitLt(torch.autograd.Function):
# Cast A to fp16
A_dtype = A.dtype
if A_dtype != torch.float16:
warnings.warn(f"MatMul8bitLt: temporarily casting input matrix from {A_dtype} to float16")
A = A.to(torch.float16)
warnings.warn(f"MatMul8bitLt: input matrix will be converted from {A_dtype} to float16")
A = A.to(torch.float16)
# 1. Quantize A
if len(A.shape) == 3:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment