Commit 656de8ed authored by dbaranchuk's avatar dbaranchuk
Browse files

minor fixes

parent 1753aa04
...@@ -368,7 +368,7 @@ class MatMul8bitLt(torch.autograd.Function): ...@@ -368,7 +368,7 @@ class MatMul8bitLt(torch.autograd.Function):
Bt = (CB * SCB).t().contiguous() Bt = (CB * SCB).t().contiguous()
CBt = (Bt / SCBt).t().to(torch.int8) CBt = (Bt / SCBt).t().to(torch.int8)
# intentionally, do not store CxBt into state # intentionally, do not store CxBt in state
CxBt, SBt = F.transform( CxBt, SBt = F.transform(
CBt, to_order=formatB, transpose=True CBt, to_order=formatB, transpose=True
) )
......
...@@ -212,7 +212,7 @@ class Int8Params(torch.nn.Parameter): ...@@ -212,7 +212,7 @@ class Int8Params(torch.nn.Parameter):
) )
new_param.CB = self.CB new_param.CB = self.CB
new_param.SCB = self.SCB new_param.SCB = self.SCB
new_param.SCB = self.SCBt new_param.SCBt = self.SCBt
return new_param return new_param
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment