# distplan in ["colossalai", "pytorch"]export DISTPAN="colossalai"# The following options only valid when DISTPAN="colossalai"export TPDEGREE=1export GPUNUM=1export PLACEMENT='cpu'export USE_SHARD_INIT=Falseexport BATCH_SIZE=4env OMP_NUM_THREADS=12 torchrun --standalone--nproc_per_node=${GPUNUM}--master_port 29501 train_new.py --tp_degree=${TPDEGREE}--batch_size=${BATCH_SIZE}--placement${PLACEMENT}--shardinit${USE_SHARD_INIT}--distplan${DISTPAN} 2>&1 | tee run.log