Unverified Commit e440c7ac authored by Qing's avatar Qing Committed by GitHub
Browse files

Add Qwen model (#182)


Co-authored-by: default avatarCasper Hansen <casperbh.96@gmail.com>
parent 84c87877
......@@ -8,4 +8,5 @@ from .gpt_bigcode import GptBigCodeAWQForCausalLM
from .mistral import MistralAWQForCausalLM
from .gpt_neox import GPTNeoXAWQForCausalLM
from .aquila import AquilaAWQForCausalLM
from .yi import YiAWQForCausalLM
\ No newline at end of file
from .yi import YiAWQForCausalLM
from .qwen import QwenAWQForCausalLM
......@@ -16,7 +16,8 @@ AWQ_CAUSAL_LM_MODEL_MAP = {
"mistral": MistralAWQForCausalLM,
"gpt_neox": GPTNeoXAWQForCausalLM,
"aquila": AquilaAWQForCausalLM,
"Yi": YiAWQForCausalLM
"Yi": YiAWQForCausalLM,
"qwen": QwenAWQForCausalLM
}
def check_and_get_model_type(model_dir, trust_remote_code=True):
......
from .base import BaseAWQForCausalLM
class QwenAWQForCausalLM(BaseAWQForCausalLM):
layer_type = "QWenBlock"
max_new_tokens_key = "seq_length"
@staticmethod
def get_model_layers(model):
return model.transformer.h
@staticmethod
def get_act_for_scaling(module):
return dict(is_scalable=False)
@staticmethod
def move_embed(model, device: str):
model.transformer.wte = model.transformer.wte.to(device)
model.transformer.rotary_emb = model.transformer.rotary_emb.to(device)
@staticmethod
def get_layers_for_scaling(module, input_feat, module_kwargs):
layers = []
# attention
layers.append(
dict(
prev_op=module.ln_1,
layers=[module.attn.c_attn, module.attn.c_proj],
inp=input_feat["attn.c_attn"],
module2inspect=module.attn,
kwargs=module_kwargs,
)
)
# mlp
layers.append(
dict(
prev_op=module.ln_2,
layers=[module.mlp.w2, module.mlp.w1],
inp=input_feat["mlp.w2"],
module2inspect=module.mlp,
)
)
# linear 2
layers.append(
dict(
prev_op=module.mlp.w1,
layers=[module.mlp.c_proj],
inp=input_feat["mlp.c_proj"],
)
)
return layers
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment