Commit c9156538 authored by zhuwenwen's avatar zhuwenwen
Browse files

support glm and baichuan nn

parent 6634a0e0
...@@ -23,7 +23,7 @@ def _set_default_torch_dtype(dtype: torch.dtype): ...@@ -23,7 +23,7 @@ def _set_default_torch_dtype(dtype: torch.dtype):
def _get_model_architecture(model_config: ModelConfig) -> Type[nn.Module]: def _get_model_architecture(model_config: ModelConfig) -> Type[nn.Module]:
architectures = getattr(model_config.hf_config, "architectures", []) architectures = getattr(model_config.hf_config, "architectures", [])
if architectures == ['LlamaForCausalLM']: if architectures == ['LlamaForCausalLM'] or architectures == ['ChatGLMModel'] or architectures == ['BaichuanForCausalLM']:
if os.getenv('LLAMA_NN') != '0': if os.getenv('LLAMA_NN') != '0':
os.environ['LLAMA_NN'] = '1' os.environ['LLAMA_NN'] = '1'
# Special handling for quantized Mixtral. # Special handling for quantized Mixtral.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment