Commit 2290bdb2 authored by Casper Hansen's avatar Casper Hansen
Browse files

Change filename

parent 44b3187a
...@@ -6,8 +6,8 @@ from tqdm import tqdm ...@@ -6,8 +6,8 @@ from tqdm import tqdm
from collections import defaultdict from collections import defaultdict
from awq.utils.utils import clear_memory from awq.utils.utils import clear_memory
from awq.utils.calib_data import get_calib_dataset from awq.utils.calib_data import get_calib_dataset
from awq.quantize.scale import apply_scale, apply_clip
from awq.modules.linear import WQLinear_GEMM, WQLinear_GEMV from awq.modules.linear import WQLinear_GEMM, WQLinear_GEMV
from awq.quantize.apply_quantized import apply_scale, apply_clip
from awq.utils.module import append_str_prefix, get_op_name, get_named_linears, set_op_by_name from awq.utils.module import append_str_prefix, get_op_name, get_named_linears, set_op_by_name
......
...@@ -4,9 +4,9 @@ import torch.nn as nn ...@@ -4,9 +4,9 @@ import torch.nn as nn
from typing import Tuple from typing import Tuple
from awq.modules.act import ScaledActivation from awq.modules.act import ScaledActivation
from transformers.activations import NewGELUActivation from transformers.activations import NewGELUActivation
from awq.utils.module import get_op_by_name, set_op_by_name
from transformers.models.bloom.modeling_bloom import BloomGelu from transformers.models.bloom.modeling_bloom import BloomGelu
from transformers.models.llama.modeling_llama import LlamaRMSNorm from transformers.models.llama.modeling_llama import LlamaRMSNorm
from awq.utils.module import get_op_by_name, get_op_name, set_op_by_name
allowed_norms = [nn.LayerNorm, LlamaRMSNorm] allowed_norms = [nn.LayerNorm, LlamaRMSNorm]
allowed_act_fns = [nn.GELU, BloomGelu, NewGELUActivation] allowed_act_fns = [nn.GELU, BloomGelu, NewGELUActivation]
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment