Commit fc4a135e authored by justheuristic's avatar justheuristic
Browse files

clearer assertions

parent e29c5f5c
...@@ -232,7 +232,7 @@ class MatMul8bitLt(torch.autograd.Function): ...@@ -232,7 +232,7 @@ class MatMul8bitLt(torch.autograd.Function):
# Cast A to fp16 # Cast A to fp16
A_dtype = A.dtype A_dtype = A.dtype
if A_dtype != torch.float16: if A_dtype != torch.float16:
warnings.warn(f"MatMul8bitLt: temporarily casting input matrix from {A_dtype} to float16") warnings.warn(f"MatMul8bitLt: input matrix will be converted from {A_dtype} to float16")
A = A.to(torch.float16) A = A.to(torch.float16)
# 1. Quantize A # 1. Quantize A
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment