Commit 0aea7dd1 authored by Rayyyyy's avatar Rayyyyy
Browse files

Modify chat.sh examples.

parent a540af5e
#!/bin/bash
echo "Export params ..."
export HIP_VISIBLE_DEVICES=0 # 可自行修改为指定显卡号
export HIP_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 # 可自行修改为指定显卡号
export HSA_FORCE_FINE_GRAIN_PCIE=1
export USE_MIOPEN_BATCHNORM=1
export MASTER_ADDR=localhost
export MASTER_PORT=12355
export RANK=0
export WORLD_SIZE=1
echo "Starting ..."
# 8B
# torchrun --nproc_per_node 1 llama3_chat.py \
# --ckpt_dir /data/Meta-llama3-models/Meta-Llama-3-8B-Instruct/original/ \
# --tokenizer_path /data/Meta-llama3-models/Meta-Llama-3-8B-Instruct/original/tokenizer.model \
# --max_seq_len 2048 \
# --max_batch_size 6
torchrun --nproc_per_node 1 llama3_chat.py --ckpt_dir Meta-Llama-3-8B-Instruct/original/ --tokenizer_path Meta-Llama-3-8B-Instruct/original/tokenizer.model --max_seq_len 2048 --max_batch_size 6
# 70B
torchrun --nproc_per_node 8 llama3_chat.py \
--ckpt_dir /data/Meta-llama3-models/Meta-Llama-3-70B-Instruct/original/ \
--tokenizer_path /data/Meta-llama3-models/Meta-Llama-3-70B-Instruct/original/tokenizer.model \
--max_seq_len 2048 \
--max_batch_size 6
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment