Unverified Commit 9ae833af authored by puhuk's avatar puhuk Committed by GitHub
Browse files

Add types and improve descriptions to ArgumentParser parameters (#4724)



* Add type to default argument

To resolve issue #4694

* Resolve issue #4694

Add missing types on argument parser

* Update with ufmt

formatted with ufmt

* Updated with review

Updated with review

* Update type of arguments

Add train.py from video_classification, similarity and train_quantization.py
Co-authored-by: default avatarVasilis Vryniotis <datumbox@users.noreply.github.com>
parent b37c8a3c
......@@ -346,10 +346,12 @@ def get_args_parser(add_help=True):
parser = argparse.ArgumentParser(description="PyTorch Classification Training", add_help=add_help)
parser.add_argument("--data-path", default="/datasets01/imagenet_full_size/061417/", help="dataset")
parser.add_argument("--model", default="resnet18", help="model")
parser.add_argument("--device", default="cuda", help="device")
parser.add_argument("-b", "--batch-size", default=32, type=int)
parser.add_argument("--data-path", default="/datasets01/imagenet_full_size/061417/", type=str, help="dataset path")
parser.add_argument("--model", default="resnet18", type=str, help="model name")
parser.add_argument("--device", default="cuda", type=str, help="device (Use cuda or cpu Default: cuda)")
parser.add_argument(
"-b", "--batch-size", default=32, type=int, help="images per gpu, the total batch size is $NGPU x batch_size"
)
parser.add_argument("--epochs", default=90, type=int, metavar="N", help="number of total epochs to run")
parser.add_argument(
"-j", "--workers", default=16, type=int, metavar="N", help="number of data loading workers (default: 16)"
......@@ -377,7 +379,7 @@ def get_args_parser(add_help=True):
)
parser.add_argument("--mixup-alpha", default=0.0, type=float, help="mixup alpha (default: 0.0)")
parser.add_argument("--cutmix-alpha", default=0.0, type=float, help="cutmix alpha (default: 0.0)")
parser.add_argument("--lr-scheduler", default="steplr", help="the lr scheduler (default: steplr)")
parser.add_argument("--lr-scheduler", default="steplr", type=str, help="the lr scheduler (default: steplr)")
parser.add_argument("--lr-warmup-epochs", default=0, type=int, help="the number of epochs to warmup (default: 0)")
parser.add_argument(
"--lr-warmup-method", default="constant", type=str, help="the warmup method (default: constant)"
......@@ -386,8 +388,8 @@ def get_args_parser(add_help=True):
parser.add_argument("--lr-step-size", default=30, type=int, help="decrease lr every step-size epochs")
parser.add_argument("--lr-gamma", default=0.1, type=float, help="decrease lr by a factor of lr-gamma")
parser.add_argument("--print-freq", default=10, type=int, help="print frequency")
parser.add_argument("--output-dir", default=".", help="path where to save")
parser.add_argument("--resume", default="", help="resume from checkpoint")
parser.add_argument("--output-dir", default=".", type=str, help="path to save outputs")
parser.add_argument("--resume", default="", type=str, help="path of checkpoint")
parser.add_argument("--start-epoch", default=0, type=int, metavar="N", help="start epoch")
parser.add_argument(
"--cache-dataset",
......@@ -413,7 +415,7 @@ def get_args_parser(add_help=True):
help="Use pre-trained models from the modelzoo",
action="store_true",
)
parser.add_argument("--auto-augment", default=None, help="auto augment policy (default: None)")
parser.add_argument("--auto-augment", default=None, type=str, help="auto augment policy (default: None)")
parser.add_argument("--random-erase", default=0.0, type=float, help="random erasing probability (default: 0.0)")
# Mixed precision training parameters
......@@ -421,7 +423,7 @@ def get_args_parser(add_help=True):
# distributed training parameters
parser.add_argument("--world-size", default=1, type=int, help="number of distributed processes")
parser.add_argument("--dist-url", default="env://", help="url used to set up distributed training")
parser.add_argument("--dist-url", default="env://", type=str, help="url used to set up distributed training")
parser.add_argument(
"--model-ema", action="store_true", help="enable tracking Exponential Moving Average of model parameters"
)
......
......@@ -155,12 +155,14 @@ def get_args_parser(add_help=True):
parser = argparse.ArgumentParser(description="PyTorch Quantized Classification Training", add_help=add_help)
parser.add_argument("--data-path", default="/datasets01/imagenet_full_size/061417/", help="dataset")
parser.add_argument("--model", default="mobilenet_v2", help="model")
parser.add_argument("--backend", default="qnnpack", help="fbgemm or qnnpack")
parser.add_argument("--device", default="cuda", help="device")
parser.add_argument("--data-path", default="/datasets01/imagenet_full_size/061417/", type=str, help="dataset path")
parser.add_argument("--model", default="mobilenet_v2", type=str, help="model name")
parser.add_argument("--backend", default="qnnpack", type=str, help="fbgemm or qnnpack")
parser.add_argument("--device", default="cuda", type=str, help="device (Use cuda or cpu Default: cuda)")
parser.add_argument("-b", "--batch-size", default=32, type=int, help="batch size for calibration/training")
parser.add_argument(
"-b", "--batch-size", default=32, type=int, help="images per gpu, the total batch size is $NGPU x batch_size"
)
parser.add_argument("--eval-batch-size", default=128, type=int, help="batch size for evaluation")
parser.add_argument("--epochs", default=90, type=int, metavar="N", help="number of total epochs to run")
parser.add_argument(
......@@ -203,8 +205,8 @@ def get_args_parser(add_help=True):
parser.add_argument("--lr-step-size", default=30, type=int, help="decrease lr every step-size epochs")
parser.add_argument("--lr-gamma", default=0.1, type=float, help="decrease lr by a factor of lr-gamma")
parser.add_argument("--print-freq", default=10, type=int, help="print frequency")
parser.add_argument("--output-dir", default=".", help="path where to save")
parser.add_argument("--resume", default="", help="resume from checkpoint")
parser.add_argument("--output-dir", default=".", type=str, help="path to save outputs")
parser.add_argument("--resume", default="", type=str, help="path of checkpoint")
parser.add_argument("--start-epoch", default=0, type=int, metavar="N", help="start epoch")
parser.add_argument(
"--cache-dataset",
......@@ -234,7 +236,7 @@ def get_args_parser(add_help=True):
# distributed training parameters
parser.add_argument("--world-size", default=1, type=int, help="number of distributed processes")
parser.add_argument("--dist-url", default="env://", help="url used to set up distributed training")
parser.add_argument("--dist-url", default="env://", type=str, help="url used to set up distributed training")
parser.add_argument(
"--interpolation", default="bilinear", type=str, help="the interpolation method (default: bilinear)"
......
......@@ -50,10 +50,10 @@ def get_args_parser(add_help=True):
parser = argparse.ArgumentParser(description="PyTorch Detection Training", add_help=add_help)
parser.add_argument("--data-path", default="/datasets01/COCO/022719/", help="dataset")
parser.add_argument("--dataset", default="coco", help="dataset")
parser.add_argument("--model", default="maskrcnn_resnet50_fpn", help="model")
parser.add_argument("--device", default="cuda", help="device")
parser.add_argument("--data-path", default="/datasets01/COCO/022719/", type=str, help="dataset path")
parser.add_argument("--dataset", default="coco", type=str, help="dataset name")
parser.add_argument("--model", default="maskrcnn_resnet50_fpn", type=str, help="model name")
parser.add_argument("--device", default="cuda", type=str, help="device (Use cuda or cpu Default: cuda)")
parser.add_argument(
"-b", "--batch-size", default=2, type=int, help="images per gpu, the total batch size is $NGPU x batch_size"
)
......@@ -77,7 +77,9 @@ def get_args_parser(add_help=True):
help="weight decay (default: 1e-4)",
dest="weight_decay",
)
parser.add_argument("--lr-scheduler", default="multisteplr", help="the lr scheduler (default: multisteplr)")
parser.add_argument(
"--lr-scheduler", default="multisteplr", type=str, help="name of lr scheduler (default: multisteplr)"
)
parser.add_argument(
"--lr-step-size", default=8, type=int, help="decrease lr every step-size epochs (multisteplr scheduler only)"
)
......@@ -92,15 +94,17 @@ def get_args_parser(add_help=True):
"--lr-gamma", default=0.1, type=float, help="decrease lr by a factor of lr-gamma (multisteplr scheduler only)"
)
parser.add_argument("--print-freq", default=20, type=int, help="print frequency")
parser.add_argument("--output-dir", default=".", help="path where to save")
parser.add_argument("--resume", default="", help="resume from checkpoint")
parser.add_argument("--output-dir", default=".", type=str, help="path to save outputs")
parser.add_argument("--resume", default="", type=str, help="path of checkpoint")
parser.add_argument("--start_epoch", default=0, type=int, help="start epoch")
parser.add_argument("--aspect-ratio-group-factor", default=3, type=int)
parser.add_argument("--rpn-score-thresh", default=None, type=float, help="rpn score threshold for faster-rcnn")
parser.add_argument(
"--trainable-backbone-layers", default=None, type=int, help="number of trainable layers of backbone"
)
parser.add_argument("--data-augmentation", default="hflip", help="data augmentation policy (default: hflip)")
parser.add_argument(
"--data-augmentation", default="hflip", type=str, help="data augmentation policy (default: hflip)"
)
parser.add_argument(
"--sync-bn",
dest="sync_bn",
......@@ -122,7 +126,7 @@ def get_args_parser(add_help=True):
# distributed training parameters
parser.add_argument("--world-size", default=1, type=int, help="number of distributed processes")
parser.add_argument("--dist-url", default="env://", help="url used to set up distributed training")
parser.add_argument("--dist-url", default="env://", type=str, help="url used to set up distributed training")
return parser
......
......@@ -201,12 +201,14 @@ def get_args_parser(add_help=True):
parser = argparse.ArgumentParser(description="PyTorch Segmentation Training", add_help=add_help)
parser.add_argument("--data-path", default="/datasets01/COCO/022719/", help="dataset path")
parser.add_argument("--dataset", default="coco", help="dataset name")
parser.add_argument("--model", default="fcn_resnet101", help="model")
parser.add_argument("--data-path", default="/datasets01/COCO/022719/", type=str, help="dataset path")
parser.add_argument("--dataset", default="coco", type=str, help="dataset name")
parser.add_argument("--model", default="fcn_resnet101", type=str, help="model name")
parser.add_argument("--aux-loss", action="store_true", help="auxiliar loss")
parser.add_argument("--device", default="cuda", help="device")
parser.add_argument("-b", "--batch-size", default=8, type=int)
parser.add_argument("--device", default="cuda", type=str, help="device (Use cuda or cpu Default: cuda)")
parser.add_argument(
"-b", "--batch-size", default=8, type=int, help="images per gpu, the total batch size is $NGPU x batch_size"
)
parser.add_argument("--epochs", default=30, type=int, metavar="N", help="number of total epochs to run")
parser.add_argument(
......@@ -227,8 +229,8 @@ def get_args_parser(add_help=True):
parser.add_argument("--lr-warmup-method", default="linear", type=str, help="the warmup method (default: linear)")
parser.add_argument("--lr-warmup-decay", default=0.01, type=float, help="the decay for lr")
parser.add_argument("--print-freq", default=10, type=int, help="print frequency")
parser.add_argument("--output-dir", default=".", help="path where to save")
parser.add_argument("--resume", default="", help="resume from checkpoint")
parser.add_argument("--output-dir", default=".", type=str, help="path to save outputs")
parser.add_argument("--resume", default="", type=str, help="path of checkpoint")
parser.add_argument("--start-epoch", default=0, type=int, metavar="N", help="start epoch")
parser.add_argument(
"--test-only",
......@@ -244,7 +246,7 @@ def get_args_parser(add_help=True):
)
# distributed training parameters
parser.add_argument("--world-size", default=1, type=int, help="number of distributed processes")
parser.add_argument("--dist-url", default="env://", help="url used to set up distributed training")
parser.add_argument("--dist-url", default="env://", type=str, help="url used to set up distributed training")
return parser
......
......@@ -137,19 +137,19 @@ def parse_args():
parser = argparse.ArgumentParser(description="PyTorch Embedding Learning")
parser.add_argument("--dataset-dir", default="/tmp/fmnist/", help="FashionMNIST dataset directory path")
parser.add_argument("--dataset-dir", default="/tmp/fmnist/", type=str, help="FashionMNIST dataset directory path")
parser.add_argument(
"-p", "--labels-per-batch", default=8, type=int, help="Number of unique labels/classes per batch"
)
parser.add_argument("-k", "--samples-per-label", default=8, type=int, help="Number of samples per label in a batch")
parser.add_argument("--eval-batch-size", default=512, type=int)
parser.add_argument("--epochs", default=10, type=int, metavar="N", help="Number of training epochs to run")
parser.add_argument("-j", "--workers", default=4, type=int, metavar="N", help="Number of data loading workers")
parser.add_argument("--lr", default=0.0001, type=float, help="Learning rate")
parser.add_argument("--eval-batch-size", default=512, type=int, help="batch size for evaluation")
parser.add_argument("--epochs", default=10, type=int, metavar="N", help="number of total epochs to run")
parser.add_argument("-j", "--workers", default=4, type=int, metavar="N", help="number of data loading workers")
parser.add_argument("--lr", default=0.0001, type=float, help="initial learning rate")
parser.add_argument("--margin", default=0.2, type=float, help="Triplet loss margin")
parser.add_argument("--print-freq", default=20, type=int, help="Print frequency")
parser.add_argument("--save-dir", default=".", help="Model save directory")
parser.add_argument("--resume", default="", help="Resume from checkpoint")
parser.add_argument("--print-freq", default=20, type=int, help="print frequency")
parser.add_argument("--save-dir", default=".", type=str, help="Model save directory")
parser.add_argument("--resume", default="", type=str, help="path of checkpoint")
return parser.parse_args()
......
......@@ -288,16 +288,18 @@ def parse_args():
parser = argparse.ArgumentParser(description="PyTorch Video Classification Training")
parser.add_argument("--data-path", default="/datasets01_101/kinetics/070618/", help="dataset")
parser.add_argument("--train-dir", default="train_avi-480p", help="name of train dir")
parser.add_argument("--val-dir", default="val_avi-480p", help="name of val dir")
parser.add_argument("--model", default="r2plus1d_18", help="model")
parser.add_argument("--device", default="cuda", help="device")
parser.add_argument("--data-path", default="/datasets01_101/kinetics/070618/", type=str, help="dataset path")
parser.add_argument("--train-dir", default="train_avi-480p", type=str, help="name of train dir")
parser.add_argument("--val-dir", default="val_avi-480p", type=str, help="name of val dir")
parser.add_argument("--model", default="r2plus1d_18", type=str, help="model name")
parser.add_argument("--device", default="cuda", type=str, help="device (Use cuda or cpu Default: cuda)")
parser.add_argument("--clip-len", default=16, type=int, metavar="N", help="number of frames per clip")
parser.add_argument(
"--clips-per-video", default=5, type=int, metavar="N", help="maximum number of clips per video to consider"
)
parser.add_argument("-b", "--batch-size", default=24, type=int)
parser.add_argument(
"-b", "--batch-size", default=24, type=int, help="images per gpu, the total batch size is $NGPU x batch_size"
)
parser.add_argument("--epochs", default=45, type=int, metavar="N", help="number of total epochs to run")
parser.add_argument(
"-j", "--workers", default=10, type=int, metavar="N", help="number of data loading workers (default: 10)"
......@@ -319,8 +321,8 @@ def parse_args():
parser.add_argument("--lr-warmup-method", default="linear", type=str, help="the warmup method (default: linear)")
parser.add_argument("--lr-warmup-decay", default=0.001, type=float, help="the decay for lr")
parser.add_argument("--print-freq", default=10, type=int, help="print frequency")
parser.add_argument("--output-dir", default=".", help="path where to save")
parser.add_argument("--resume", default="", help="resume from checkpoint")
parser.add_argument("--output-dir", default=".", type=str, help="path to save outputs")
parser.add_argument("--resume", default="", type=str, help="path of checkpoint")
parser.add_argument("--start-epoch", default=0, type=int, metavar="N", help="start epoch")
parser.add_argument(
"--cache-dataset",
......@@ -360,7 +362,7 @@ def parse_args():
# distributed training parameters
parser.add_argument("--world-size", default=1, type=int, help="number of distributed processes")
parser.add_argument("--dist-url", default="env://", help="url used to set up distributed training")
parser.add_argument("--dist-url", default="env://", type=str, help="url used to set up distributed training")
args = parser.parse_args()
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment