# lm_eval --model mamba_ssm --model_args pretrained=/public/model/AI-ModelScope/mamba-130m-hf --tasks lambada_openai,hellaswag,piqa,arc_easy,arc_challenge,winogrande,openbookqa --device cuda --batch_size 64 python evals/lm_harness_eval.py --model hf --model_args pretrained=/public/model/AI-ModelScope/mamba-130m-hf --tasks hellaswag --device cuda --batch_size 64