Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
AutoAWQ
Commits
44470a7a
Commit
44470a7a
authored
Sep 20, 2023
by
Casper
Browse files
Added type hinting
parent
45c22ee5
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
3 additions
and
3 deletions
+3
-3
awq/quantize/apply_quantized.py
awq/quantize/apply_quantized.py
+3
-3
No files found.
awq/quantize/apply_quantized.py
View file @
44470a7a
...
...
@@ -63,7 +63,7 @@ def apply_scale(module, scales_list, input_feat_dict=None):
scales
.
cpu
()
@
torch
.
no_grad
()
def
scale_ln_fcs
(
ln
,
fcs
,
scales
):
def
scale_ln_fcs
(
ln
:
nn
.
Linear
,
fcs
:
list
[
nn
.
Linear
],
scales
:
torch
.
Tensor
):
if
not
isinstance
(
fcs
,
list
):
fcs
=
[
fcs
]
...
...
@@ -83,7 +83,7 @@ def scale_ln_fcs(ln, fcs, scales):
assert
torch
.
isnan
(
p
).
sum
()
==
0
@
torch
.
no_grad
()
def
scale_fc_fc
(
fc1
,
fc2
,
scales
):
def
scale_fc_fc
(
fc1
:
nn
.
Linear
,
fc2
:
nn
.
Linear
,
scales
:
torch
.
Tensor
):
assert
isinstance
(
fc1
,
nn
.
Linear
)
assert
isinstance
(
fc2
,
nn
.
Linear
)
...
...
@@ -102,7 +102,7 @@ def scale_fc_fc(fc1, fc2, scales):
@
torch
.
no_grad
()
def
scale_gelu_fc
(
gelu
,
fc
,
scales
):
def
scale_gelu_fc
(
gelu
:
allowed_act_fns
,
fc
:
nn
.
Linear
,
scales
:
torch
.
Tensor
):
assert
any
(
isinstance
(
gelu
,
t
)
for
t
in
allowed_act_fns
)
assert
isinstance
(
fc
,
nn
.
Linear
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment