Commit da900c3b authored by yangql's avatar yangql
Browse files

Initial commit

parents
from ._base import BaseGPTQForCausalLM
class InternLMGPTQForCausalLM(BaseGPTQForCausalLM):
layer_type = "InternLMDecoderLayer"
layers_block_name = "model.layers"
outside_layer_modules = ["model.embed_tokens", "model.norm"]
inside_layer_modules = [
["self_attn.k_proj", "self_attn.v_proj", "self_attn.q_proj"],
["self_attn.o_proj"],
["mlp.up_proj", "mlp.gate_proj"],
["mlp.down_proj"],
]
__all__ = ["InternLMGPTQForCausalLM"]
from logging import getLogger
from ..utils.import_utils import compare_transformers_version
from ._base import BaseGPTQForCausalLM
if compare_transformers_version("v4.28.0", op="ge"):
from ..nn_modules.fused_llama_attn import FusedLlamaAttentionForQuantizedModel
from ..nn_modules.fused_llama_mlp import FusedLlamaMLPForQuantizedModel
else:
FusedLlamaAttentionForQuantizedModel = None
FusedLlamaMLPForQuantizedModel = None
logger = getLogger(__name__)
class LlamaGPTQForCausalLM(BaseGPTQForCausalLM):
layer_type = "LlamaDecoderLayer"
layers_block_name = "model.layers"
outside_layer_modules = ["model.embed_tokens", "model.norm"]
inside_layer_modules = [
["self_attn.k_proj", "self_attn.v_proj", "self_attn.q_proj"],
["self_attn.o_proj"],
["mlp.up_proj", "mlp.gate_proj"],
["mlp.down_proj"],
]
fused_attn_module_type = FusedLlamaAttentionForQuantizedModel
fused_mlp_module_type = FusedLlamaMLPForQuantizedModel
__all__ = ["LlamaGPTQForCausalLM"]
from logging import getLogger
from ..utils.import_utils import compare_transformers_version
from ._base import BaseGPTQForCausalLM
if compare_transformers_version("v4.28.0", op="ge"):
from ..nn_modules.fused_llama_attn import FusedLlamaAttentionForQuantizedModel
from ..nn_modules.fused_llama_mlp import FusedLlamaMLPForQuantizedModel
else:
FusedLlamaAttentionForQuantizedModel = None
FusedLlamaMLPForQuantizedModel = None
logger = getLogger(__name__)
class LongLlamaGPTQForCausalLM(BaseGPTQForCausalLM):
layer_type = "LongLlamaDecoderLayer"
layers_block_name = "model.layers"
outside_layer_modules = ["model.embed_tokens", "model.norm"]
inside_layer_modules = [
["self_attn.k_proj", "self_attn.v_proj", "self_attn.q_proj"],
["self_attn.o_proj"],
["mlp.up_proj", "mlp.gate_proj"],
["mlp.down_proj"],
]
fused_attn_module_type = FusedLlamaAttentionForQuantizedModel
fused_mlp_module_type = FusedLlamaMLPForQuantizedModel
__all__ = ["LongLlamaGPTQForCausalLM"]
from ._base import BaseGPTQForCausalLM
class MistralGPTQForCausalLM(BaseGPTQForCausalLM):
layer_type = "MistralDecoderLayer"
layers_block_name = "model.layers"
outside_layer_modules = ["model.embed_tokens", "model.norm"]
inside_layer_modules = [
["self_attn.k_proj", "self_attn.v_proj", "self_attn.q_proj"],
["self_attn.o_proj"],
["mlp.up_proj", "mlp.gate_proj"],
["mlp.down_proj"],
]
__all__ = ["MistralGPTQForCausalLM"]
from ._base import BaseGPTQForCausalLM
class MixtralGPTQForCausalLM(BaseGPTQForCausalLM):
layer_type = "MixtralDecoderLayer"
layers_block_name = "model.layers"
outside_layer_modules = ["model.embed_tokens", "model.norm"]
inside_layer_modules = [
["self_attn.k_proj", "self_attn.v_proj", "self_attn.q_proj"],
["self_attn.o_proj"],
[
"block_sparse_moe.experts.0.w1",
"block_sparse_moe.experts.1.w1",
"block_sparse_moe.experts.2.w1",
"block_sparse_moe.experts.3.w1",
"block_sparse_moe.experts.4.w1",
"block_sparse_moe.experts.5.w1",
"block_sparse_moe.experts.6.w1",
"block_sparse_moe.experts.7.w1",
"block_sparse_moe.experts.0.w3",
"block_sparse_moe.experts.1.w3",
"block_sparse_moe.experts.2.w3",
"block_sparse_moe.experts.3.w3",
"block_sparse_moe.experts.4.w3",
"block_sparse_moe.experts.5.w3",
"block_sparse_moe.experts.6.w3",
"block_sparse_moe.experts.7.w3",
],
[
"block_sparse_moe.experts.0.w2",
"block_sparse_moe.experts.1.w2",
"block_sparse_moe.experts.2.w2",
"block_sparse_moe.experts.3.w2",
"block_sparse_moe.experts.4.w2",
"block_sparse_moe.experts.5.w2",
"block_sparse_moe.experts.6.w2",
"block_sparse_moe.experts.7.w2",
],
]
__all__ = ["MixtralGPTQForCausalLM"]
from ._base import BaseGPTQForCausalLM
class MOSSGPTQForCausalLM(BaseGPTQForCausalLM):
layer_type = "MossBlock"
layers_block_name = "transformer.h"
outside_layer_modules = ["transformer.wte", "transformer.ln_f"]
inside_layer_modules = [
["attn.qkv_proj"],
["attn.out_proj"],
["mlp.fc_in"],
["mlp.fc_out"],
]
__all__ = ["MOSSGPTQForCausalLM"]
from auto_gptq.modeling import BaseGPTQForCausalLM
class MPTGPTQForCausalLM(BaseGPTQForCausalLM):
layer_type = "MPTBlock"
layers_block_name = "transformer.blocks"
outside_layer_modules = [
"transformer.wte", "transformer.norm_f"
]
inside_layer_modules = [
["attn.Wqkv"],
["attn.out_proj"],
["ffn.up_proj"],
["ffn.down_proj"]
]
__all__ = ["MPTGPTQForCausalLM"]
from ._base import BaseGPTQForCausalLM
class OPTGPTQForCausalLM(BaseGPTQForCausalLM):
layer_type = "OPTDecoderLayer"
layers_block_name = "model.decoder.layers"
outside_layer_modules = [
"model.decoder.embed_tokens",
"model.decoder.embed_positions",
"model.decoder.project_out",
"model.decoder.project_in",
"model.decoder.final_layer_norm",
]
inside_layer_modules = [
["self_attn.k_proj", "self_attn.v_proj", "self_attn.q_proj"],
["self_attn.out_proj"],
["fc1"],
["fc2"],
]
__all__ = ["OPTGPTQForCausalLM"]
from ._base import BaseGPTQForCausalLM
class PhiGPTQForCausalLM(BaseGPTQForCausalLM):
layer_type = "PhiDecoderLayer"
layers_block_name = "model.layers"
outside_layer_modules = ["model.embed_tokens", "model.final_layernorm"]
inside_layer_modules = [
["self_attn.q_proj"],
["self_attn.k_proj"],
["self_attn.v_proj"],
["self_attn.dense"],
["mlp.fc1"],
["mlp.fc2"],
]
__all__ = ["PhiGPTQForCausalLM"]
from ._base import BaseGPTQForCausalLM
class QwenGPTQForCausalLM(BaseGPTQForCausalLM):
layer_type = "QWenBlock"
layers_block_name = "transformer.h"
outside_layer_modules = [
"transformer.wte",
"transformer.wpe",
"transformer.ln_f",
"transformer.visual",
]
inside_layer_modules = [
["attn.c_attn"],
["attn.c_proj"],
["mlp.w1", "mlp.w2"],
["mlp.c_proj"],
]
__all__ = ["QwenGPTQForCausalLM"]
from ._base import BaseGPTQForCausalLM
class Qwen2GPTQForCausalLM(BaseGPTQForCausalLM):
layer_type = "Qwen2DecoderLayer"
layers_block_name = "model.layers"
outside_layer_modules = ["model.embed_tokens", "model.norm"]
inside_layer_modules = [
["self_attn.k_proj", "self_attn.v_proj", "self_attn.q_proj"],
["self_attn.o_proj"],
["mlp.up_proj", "mlp.gate_proj"],
["mlp.down_proj"],
]
__all__ = ["Qwen2GPTQForCausalLM"]
from ._base import BaseGPTQForCausalLM
class RWGPTQForCausalLM(BaseGPTQForCausalLM):
layer_type = "DecoderLayer"
layers_block_name = "transformer.h"
outside_layer_modules = ["transformer.word_embeddings", "transformer.ln_f"]
inside_layer_modules = [
["self_attention.query_key_value"],
["self_attention.dense"],
["mlp.dense_h_to_4h"],
["mlp.dense_4h_to_h"],
]
__all__ = ["RWGPTQForCausalLM"]
from logging import getLogger
from ..utils.import_utils import compare_transformers_version
from ._base import BaseGPTQForCausalLM
if compare_transformers_version("v4.28.0", op="ge"):
from ..nn_modules.fused_llama_attn import FusedLlamaAttentionForQuantizedModel
from ..nn_modules.fused_llama_mlp import FusedLlamaMLPForQuantizedModel
else:
FusedLlamaAttentionForQuantizedModel = None
FusedLlamaMLPForQuantizedModel = None
logger = getLogger(__name__)
class StableLMEpochGPTQForCausalLM(BaseGPTQForCausalLM):
layer_type = "DecoderLayer"
layers_block_name = "model.layers"
outside_layer_modules = ["model.embed_tokens", "model.norm"]
inside_layer_modules = [
["self_attn.k_proj", "self_attn.v_proj", "self_attn.q_proj"],
["self_attn.o_proj"],
["mlp.up_proj", "mlp.gate_proj"],
["mlp.down_proj"],
]
fused_attn_module_type = FusedLlamaAttentionForQuantizedModel
fused_mlp_module_type = FusedLlamaMLPForQuantizedModel
__all__ = ["StableLMEpochGPTQForCausalLM"]
from logging import getLogger
from ._base import BaseGPTQForCausalLM
logger = getLogger(__name__)
class Starcoder2GPTQForCausalLM(BaseGPTQForCausalLM):
layer_type = "Starcoder2DecoderLayer"
layers_block_name = "model.layers"
outside_layer_modules = ["model.embed_tokens", "model.norm"]
inside_layer_modules = [
["self_attn.k_proj", "self_attn.v_proj", "self_attn.q_proj"],
["self_attn.o_proj"],
["mlp.c_fc"],
["mlp.c_proj"],
]
__all__ = ["Starcoder2GPTQForCausalLM"]
from logging import getLogger
from ..utils.import_utils import compare_transformers_version
from ._base import BaseGPTQForCausalLM
if compare_transformers_version("v4.28.0", op="ge"):
from ..nn_modules.fused_llama_attn import FusedLlamaAttentionForQuantizedModel
from ..nn_modules.fused_llama_mlp import FusedLlamaMLPForQuantizedModel
else:
FusedLlamaAttentionForQuantizedModel = None
FusedLlamaMLPForQuantizedModel = None
logger = getLogger(__name__)
class XverseGPTQForCausalLM(BaseGPTQForCausalLM):
layer_type = "XverseDecoderLayer"
layers_block_name = "model.layers"
outside_layer_modules = ["model.embed_tokens", "model.norm"]
inside_layer_modules = [
["self_attn.k_proj", "self_attn.v_proj", "self_attn.q_proj"],
["self_attn.o_proj"],
["mlp.up_proj", "mlp.gate_proj"],
["mlp.down_proj"],
]
fused_attn_module_type = FusedLlamaAttentionForQuantizedModel
fused_mlp_module_type = FusedLlamaMLPForQuantizedModel
__all__ = ["XverseGPTQForCausalLM"]
from logging import getLogger
from ..utils.import_utils import compare_transformers_version
from ._base import BaseGPTQForCausalLM
if compare_transformers_version("v4.28.0", op="ge"):
from ..nn_modules.fused_llama_attn import FusedLlamaAttentionForQuantizedModel
from ..nn_modules.fused_llama_mlp import FusedLlamaMLPForQuantizedModel
else:
FusedLlamaAttentionForQuantizedModel = None
FusedLlamaMLPForQuantizedModel = None
logger = getLogger(__name__)
class YiGPTQForCausalLM(BaseGPTQForCausalLM):
layer_type = "YiDecoderLayer"
layers_block_name = "model.layers"
outside_layer_modules = ["model.embed_tokens", "model.norm"]
inside_layer_modules = [
["self_attn.k_proj", "self_attn.v_proj", "self_attn.q_proj"],
["self_attn.o_proj"],
["mlp.up_proj", "mlp.gate_proj"],
["mlp.down_proj"],
]
fused_attn_module_type = FusedLlamaAttentionForQuantizedModel
fused_mlp_module_type = FusedLlamaMLPForQuantizedModel
__all__ = ["YiGPTQForCausalLM"]
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment