Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
AutoAWQ
Commits
2290bdb2
Commit
2290bdb2
authored
Sep 20, 2023
by
Casper Hansen
Browse files
Change filename
parent
44b3187a
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
2 additions
and
2 deletions
+2
-2
awq/quantize/quantizer.py
awq/quantize/quantizer.py
+1
-1
awq/quantize/scale.py
awq/quantize/scale.py
+1
-1
No files found.
awq/quantize/quantizer.py
View file @
2290bdb2
...
@@ -6,8 +6,8 @@ from tqdm import tqdm
...
@@ -6,8 +6,8 @@ from tqdm import tqdm
from
collections
import
defaultdict
from
collections
import
defaultdict
from
awq.utils.utils
import
clear_memory
from
awq.utils.utils
import
clear_memory
from
awq.utils.calib_data
import
get_calib_dataset
from
awq.utils.calib_data
import
get_calib_dataset
from
awq.quantize.scale
import
apply_scale
,
apply_clip
from
awq.modules.linear
import
WQLinear_GEMM
,
WQLinear_GEMV
from
awq.modules.linear
import
WQLinear_GEMM
,
WQLinear_GEMV
from
awq.quantize.apply_quantized
import
apply_scale
,
apply_clip
from
awq.utils.module
import
append_str_prefix
,
get_op_name
,
get_named_linears
,
set_op_by_name
from
awq.utils.module
import
append_str_prefix
,
get_op_name
,
get_named_linears
,
set_op_by_name
...
...
awq/quantize/
apply_quantized
.py
→
awq/quantize/
scale
.py
View file @
2290bdb2
...
@@ -4,9 +4,9 @@ import torch.nn as nn
...
@@ -4,9 +4,9 @@ import torch.nn as nn
from
typing
import
Tuple
from
typing
import
Tuple
from
awq.modules.act
import
ScaledActivation
from
awq.modules.act
import
ScaledActivation
from
transformers.activations
import
NewGELUActivation
from
transformers.activations
import
NewGELUActivation
from
awq.utils.module
import
get_op_by_name
,
set_op_by_name
from
transformers.models.bloom.modeling_bloom
import
BloomGelu
from
transformers.models.bloom.modeling_bloom
import
BloomGelu
from
transformers.models.llama.modeling_llama
import
LlamaRMSNorm
from
transformers.models.llama.modeling_llama
import
LlamaRMSNorm
from
awq.utils.module
import
get_op_by_name
,
get_op_name
,
set_op_by_name
allowed_norms
=
[
nn
.
LayerNorm
,
LlamaRMSNorm
]
allowed_norms
=
[
nn
.
LayerNorm
,
LlamaRMSNorm
]
allowed_act_fns
=
[
nn
.
GELU
,
BloomGelu
,
NewGELUActivation
]
allowed_act_fns
=
[
nn
.
GELU
,
BloomGelu
,
NewGELUActivation
]
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment