Unverified Commit df66741f authored by Baizhou Zhang's avatar Baizhou Zhang Committed by GitHub
Browse files

[bug] fix get_default_parser in examples (#4764)

parent c0a03370
from .initialize import initialize, launch, launch_from_openmpi, launch_from_slurm, launch_from_torch
from .initialize import (
get_default_parser,
initialize,
launch,
launch_from_openmpi,
launch_from_slurm,
launch_from_torch,
)
__all__ = [
"launch",
......@@ -6,4 +13,5 @@ __all__ = [
"launch_from_slurm",
"launch_from_torch",
"initialize",
"get_default_parser",
]
import colossalai
import argparse
__all__ = ["parse_args"]
def parse_args():
parser = colossalai.get_default_parser()
parser = argparse.ArgumentParser()
parser.add_argument(
"--distplan",
......
from colossalai import get_default_parser
import argparse
def parse_demo_args():
parser = get_default_parser()
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_name_or_path",
type=str,
......@@ -52,7 +52,7 @@ def parse_demo_args():
def parse_benchmark_args():
parser = get_default_parser()
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_name_or_path",
......
......@@ -11,9 +11,9 @@ for PLUGIN in "torch_ddp" "torch_ddp_fp16" "low_level_zero" "gemini" "hybrid_par
do
MODEL_PATH="google/vit-base-patch16-224"
torchrun \
--standalone \
--nproc_per_node 4 \
colossalai run \
--nproc_per_node ${GPUNUM} \
--master_port 29505 \
vit_benchmark.py \
--model_name_or_path ${MODEL_PATH} \
--mem_cap ${MEMCAP} \
......
......@@ -35,9 +35,9 @@ WEIGHT_DECAY=0.05
WARMUP_RATIO=0.3
# run the script for demo
torchrun \
--standalone \
colossalai run \
--nproc_per_node ${GPUNUM} \
--master_port 29505 \
vit_train_demo.py \
--model_name_or_path ${MODEL} \
--output_path ${OUTPUT_PATH} \
......
......@@ -5,9 +5,9 @@ BS=8
for PLUGIN in "torch_ddp" "torch_ddp_fp16" "low_level_zero" "gemini" "hybrid_parallel"
do
torchrun \
--standalone \
colossalai run \
--nproc_per_node 4 \
--master_port 29505 \
vit_benchmark.py \
--model_name_or_path "google/vit-base-patch16-224" \
--plugin ${PLUGIN} \
......
import argparse
import contextlib
import os
......@@ -29,7 +30,7 @@ VOCAB_SIZE = 50257
def main():
parser = colossalai.get_default_parser()
parser = argparse.ArgumentParser()
parser.add_argument("--from_torch", default=False, action="store_true")
parser.add_argument("--use_dummy_dataset", default=False, action="store_true")
args = parser.parse_args()
......
from colossalai import get_default_parser
import argparse
def parse_demo_args():
parser = get_default_parser()
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_name_or_path",
type=str,
......@@ -39,7 +39,7 @@ def parse_demo_args():
def parse_benchmark_args():
parser = get_default_parser()
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_name_or_path",
type=str,
......
......@@ -16,9 +16,9 @@ for GPUNUM in 1 4
do
MODLE_PATH="facebook/opt-${MODEL}"
torchrun \
--standalone \
colossalai run \
--nproc_per_node ${GPUNUM} \
--master_port 29505 \
opt_benchmark.py \
--model_name_or_path ${MODLE_PATH} \
--mem_cap ${MEMCAP} \
......
......@@ -30,9 +30,9 @@ WEIGHT_DECAY=0.01
WARMUP_RATIO=0.1
# run the script for demo
torchrun \
--standalone \
colossalai run \
--nproc_per_node ${GPUNUM} \
--master_port 29505 \
opt_train_demo.py \
--model_name_or_path ${MODEL} \
--output_path ${OUTPUT_PATH} \
......
......@@ -7,9 +7,9 @@ do
for GPUNUM in 1 4
do
torchrun \
--standalone \
colossalai run \
--nproc_per_node ${GPUNUM} \
--master_port 29505 \
opt_benchmark.py \
--model_name_or_path "facebook/opt-125m" \
--plugin ${PLUGIN} \
......
......@@ -8,6 +8,6 @@ export PLACEMENT='cpu'
export USE_SHARD_INIT=False
export BATCH_SIZE=1
env OMP_NUM_THREADS=12 torchrun --standalone --nproc_per_node=${GPUNUM} --master_port 29501 train.py \
env OMP_NUM_THREADS=12 colossalai run --nproc_per_node ${GPUNUM} --master_port 29505 train.py \
--dummy_data=True --tp_degree=${TPDEGREE} --batch_size=${BATCH_SIZE} --plugin='gemini' \
--placement ${PLACEMENT} --shardinit ${USE_SHARD_INIT} --distplan ${DISTPAN} 2>&1 | tee run.log
......@@ -4,6 +4,6 @@ for BATCH_SIZE in 2
do
for GPUNUM in 1 4
do
env OMP_NUM_THREADS=12 torchrun --standalone --nproc_per_node=${GPUNUM} --standalone train.py --dummy_data=True --batch_size=${BATCH_SIZE} --plugin='gemini' 2>&1 | tee run.log
env OMP_NUM_THREADS=12 colossalai run --nproc_per_node ${GPUNUM} --master_port 29505 train.py --dummy_data=True --batch_size=${BATCH_SIZE} --plugin='gemini' 2>&1 | tee run.log
done
done
import argparse
import gzip
from contextlib import nullcontext
from functools import partial
......@@ -33,7 +34,7 @@ SEQ_LEN = 1024
def parse_args():
parser = colossalai.get_default_parser()
parser = argparse.ArgumentParser()
parser.add_argument(
"--distplan",
type=str,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment