Commit e129194a authored by Sugon_ldc's avatar Sugon_ldc
Browse files

add new model resnet50v1.5

parents
Pipeline #571 failed with stages
in 0 seconds
import os
from pathlib import Path
from dataclasses import dataclass
from typing import Dict, Any
import yaml
from main import main, add_parser_arguments, available_models
import torch.backends.cudnn as cudnn
import argparse
def get_config_path():
return Path(os.path.dirname(os.path.abspath(__file__))) / "configs.yml"
if __name__ == "__main__":
yaml_cfg_parser = argparse.ArgumentParser(add_help=False)
yaml_cfg_parser.add_argument(
"--cfg_file",
default=get_config_path(),
type=str,
help="path to yaml config file",
)
yaml_cfg_parser.add_argument("--model", default=None, type=str, required=True)
yaml_cfg_parser.add_argument("--mode", default=None, type=str, required=True)
yaml_cfg_parser.add_argument("--precision", default=None, type=str, required=True)
yaml_cfg_parser.add_argument("--platform", default=None, type=str, required=True)
yaml_args, rest = yaml_cfg_parser.parse_known_args()
with open(yaml_args.cfg_file, "r") as cfg_file:
config = yaml.load(cfg_file, Loader=yaml.FullLoader)
cfg = {
**config["precision"][yaml_args.precision],
**config["platform"][yaml_args.platform],
**config["models"][yaml_args.model][yaml_args.platform][yaml_args.precision],
**config["mode"][yaml_args.mode],
}
parser = argparse.ArgumentParser(description="PyTorch ImageNet Training")
add_parser_arguments(parser)
parser.set_defaults(**cfg)
args, rest = parser.parse_known_args(rest)
model_arch = available_models()[args.arch]
model_args, rest = model_arch.parser().parse_known_args(rest)
assert len(rest) == 0, f"Unknown args passed: {rest}"
cudnn.benchmark = True
main(args, model_args, model_arch)
# Copyright (c) 2018-2019, NVIDIA CORPORATION
# Copyright (c) 2017- Facebook, Inc
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
os.environ[
"KMP_AFFINITY"
] = "disabled" # We need to do this before importing anything else as a workaround for this bug: https://github.com/pytorch/pytorch/issues/28389
import argparse
import random
from copy import deepcopy
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import image_classification.logger as log
from image_classification.smoothing import LabelSmoothing
from image_classification.mixup import NLLMultiLabelSmooth, MixUpWrapper
from image_classification.dataloaders import *
from image_classification.training import *
from image_classification.utils import *
from image_classification.models import (
resnet50,
resnext101_32x4d,
se_resnext101_32x4d,
efficientnet_b0,
efficientnet_b4,
efficientnet_widese_b0,
efficientnet_widese_b4,
)
from image_classification.optimizers import (
get_optimizer,
lr_cosine_policy,
lr_linear_policy,
lr_step_policy,
)
from image_classification.gpu_affinity import set_affinity, AffinityMode
import dllogger
def available_models():
models = {
m.name: m
for m in [
resnet50,
resnext101_32x4d,
se_resnext101_32x4d,
efficientnet_b0,
efficientnet_b4,
efficientnet_widese_b0,
efficientnet_widese_b4,
]
}
return models
def add_parser_arguments(parser, skip_arch=False):
parser.add_argument("data", metavar="DIR", help="path to dataset")
parser.add_argument(
"--data-backend",
metavar="BACKEND",
default="dali-cpu",
choices=DATA_BACKEND_CHOICES,
help="data backend: "
+ " | ".join(DATA_BACKEND_CHOICES)
+ " (default: dali-cpu)",
)
parser.add_argument(
"--interpolation",
metavar="INTERPOLATION",
default="bilinear",
help="interpolation type for resizing images: bilinear, bicubic or triangular(DALI only)",
)
if not skip_arch:
model_names = available_models().keys()
parser.add_argument(
"--arch",
"-a",
metavar="ARCH",
default="resnet50",
choices=model_names,
help="model architecture: "
+ " | ".join(model_names)
+ " (default: resnet50)",
)
parser.add_argument(
"-j",
"--workers",
default=5,
type=int,
metavar="N",
help="number of data loading workers (default: 5)",
)
parser.add_argument(
"--prefetch",
default=2,
type=int,
metavar="N",
help="number of samples prefetched by each loader",
)
parser.add_argument(
"--epochs",
default=90,
type=int,
metavar="N",
help="number of total epochs to run",
)
parser.add_argument(
"--run-epochs",
default=-1,
type=int,
metavar="N",
help="run only N epochs, used for checkpointing runs",
)
parser.add_argument(
"--early-stopping-patience",
default=-1,
type=int,
metavar="N",
help="early stopping after N epochs without validation accuracy improving",
)
parser.add_argument(
"--image-size", default=None, type=int, help="resolution of image"
)
parser.add_argument(
"-b",
"--batch-size",
default=256,
type=int,
metavar="N",
help="mini-batch size (default: 256) per gpu",
)
parser.add_argument(
"--optimizer-batch-size",
default=-1,
type=int,
metavar="N",
help="size of a total batch size, for simulating bigger batches using gradient accumulation",
)
parser.add_argument(
"--lr",
"--learning-rate",
default=0.1,
type=float,
metavar="LR",
help="initial learning rate",
)
parser.add_argument(
"--lr-schedule",
default="step",
type=str,
metavar="SCHEDULE",
choices=["step", "linear", "cosine"],
help="Type of LR schedule: {}, {}, {}".format("step", "linear", "cosine"),
)
parser.add_argument("--end-lr", default=0, type=float)
parser.add_argument(
"--warmup", default=0, type=int, metavar="E", help="number of warmup epochs"
)
parser.add_argument(
"--label-smoothing",
default=0.0,
type=float,
metavar="S",
help="label smoothing",
)
parser.add_argument(
"--mixup", default=0.0, type=float, metavar="ALPHA", help="mixup alpha"
)
parser.add_argument(
"--optimizer", default="sgd", type=str, choices=("sgd", "rmsprop")
)
parser.add_argument(
"--momentum", default=0.9, type=float, metavar="M", help="momentum"
)
parser.add_argument(
"--weight-decay",
"--wd",
default=1e-4,
type=float,
metavar="W",
help="weight decay (default: 1e-4)",
)
parser.add_argument(
"--bn-weight-decay",
action="store_true",
help="use weight_decay on batch normalization learnable parameters, (default: false)",
)
parser.add_argument(
"--rmsprop-alpha",
default=0.9,
type=float,
help="value of alpha parameter in rmsprop optimizer (default: 0.9)",
)
parser.add_argument(
"--rmsprop-eps",
default=1e-3,
type=float,
help="value of eps parameter in rmsprop optimizer (default: 1e-3)",
)
parser.add_argument(
"--nesterov",
action="store_true",
help="use nesterov momentum, (default: false)",
)
parser.add_argument(
"--print-freq",
"-p",
default=10,
type=int,
metavar="N",
help="print frequency (default: 10)",
)
parser.add_argument(
"--resume",
default=None,
type=str,
metavar="PATH",
help="path to latest checkpoint (default: none)",
)
parser.add_argument(
"--static-loss-scale",
type=float,
default=1,
help="Static loss scale, positive power of 2 values can improve amp convergence.",
)
parser.add_argument(
"--prof", type=int, default=-1, metavar="N", help="Run only N iterations"
)
parser.add_argument(
"--amp",
action="store_true",
help="Run model AMP (automatic mixed precision) mode.",
)
parser.add_argument(
"--seed", default=None, type=int, help="random seed used for numpy and pytorch"
)
parser.add_argument(
"--gather-checkpoints",
default="0",
type=int,
help=(
"Gather N last checkpoints throughout the training,"
" without this flag only best and last checkpoints will be stored. "
"Use -1 for all checkpoints"
),
)
parser.add_argument(
"--raport-file",
default="experiment_raport.json",
type=str,
help="file in which to store JSON experiment raport",
)
parser.add_argument(
"--evaluate", action="store_true", help="evaluate checkpoint/model"
)
parser.add_argument("--training-only", action="store_true", help="do not evaluate")
parser.add_argument(
"--no-checkpoints",
action="store_false",
dest="save_checkpoints",
help="do not store any checkpoints, useful for benchmarking",
)
parser.add_argument(
"--jit",
type=str,
default="no",
choices=["no", "script"],
help="no -> do not use torch.jit; script -> use torch.jit.script",
)
parser.add_argument("--checkpoint-filename", default="checkpoint.pth.tar", type=str)
parser.add_argument(
"--workspace",
type=str,
default="./",
metavar="DIR",
help="path to directory where checkpoints will be stored",
)
parser.add_argument(
"--memory-format",
type=str,
default="nchw",
choices=["nchw", "nhwc"],
help="memory layout, nchw or nhwc",
)
parser.add_argument("--use-ema", default=None, type=float, help="use EMA")
parser.add_argument(
"--augmentation",
type=str,
default=None,
choices=[None, "autoaugment"],
help="augmentation method",
)
parser.add_argument(
"--gpu-affinity",
type=str,
default="none",
required=False,
choices=[am.name for am in AffinityMode],
)
parser.add_argument(
"--topk",
type=int,
default=5,
required=False,
)
def prepare_for_training(args, model_args, model_arch):
args.distributed = False
if "WORLD_SIZE" in os.environ:
args.distributed = int(os.environ["WORLD_SIZE"]) > 1
args.local_rank = int(os.environ["LOCAL_RANK"])
else:
args.local_rank = 0
args.gpu = 0
args.world_size = 1
if args.distributed:
args.gpu = args.local_rank % torch.cuda.device_count()
torch.cuda.set_device(args.gpu)
dist.init_process_group(backend="nccl", init_method="env://")
args.world_size = torch.distributed.get_world_size()
affinity = set_affinity(args.gpu, mode=args.gpu_affinity)
print(f"Training process {args.local_rank} affinity: {affinity}")
if args.seed is not None:
print("Using seed = {}".format(args.seed))
torch.manual_seed(args.seed + args.local_rank)
torch.cuda.manual_seed(args.seed + args.local_rank)
np.random.seed(seed=args.seed + args.local_rank)
random.seed(args.seed + args.local_rank)
def _worker_init_fn(id):
# Worker process should inherit its affinity from parent
affinity = os.sched_getaffinity(0)
print(f"Process {args.local_rank} Worker {id} set affinity to: {affinity}")
np.random.seed(seed=args.seed + args.local_rank + id)
random.seed(args.seed + args.local_rank + id)
else:
def _worker_init_fn(id):
# Worker process should inherit its affinity from parent
affinity = os.sched_getaffinity(0)
print(f"Process {args.local_rank} Worker {id} set affinity to: {affinity}")
if args.static_loss_scale != 1.0:
if not args.amp:
print("Warning: if --amp is not used, static_loss_scale will be ignored.")
if args.optimizer_batch_size < 0:
batch_size_multiplier = 1
else:
tbs = args.world_size * args.batch_size
if args.optimizer_batch_size % tbs != 0:
print(
"Warning: simulated batch size {} is not divisible by actual batch size {}".format(
args.optimizer_batch_size, tbs
)
)
batch_size_multiplier = int(args.optimizer_batch_size / tbs)
print("BSM: {}".format(batch_size_multiplier))
start_epoch = 0
best_prec1 = 0
# optionally resume from a checkpoint
if args.resume is not None:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(
args.resume, map_location=lambda storage, loc: storage.cuda(args.gpu)
)
start_epoch = checkpoint["epoch"]
best_prec1 = checkpoint["best_prec1"]
model_state = checkpoint["state_dict"]
optimizer_state = checkpoint["optimizer"]
if "state_dict_ema" in checkpoint:
model_state_ema = checkpoint["state_dict_ema"]
print(
"=> loaded checkpoint '{}' (epoch {})".format(
args.resume, checkpoint["epoch"]
)
)
if start_epoch >= args.epochs:
print(
f"Launched training for {args.epochs}, checkpoint already run {start_epoch}"
)
exit(1)
else:
print("=> no checkpoint found at '{}'".format(args.resume))
model_state = None
model_state_ema = None
optimizer_state = None
else:
model_state = None
model_state_ema = None
optimizer_state = None
loss = nn.CrossEntropyLoss
if args.mixup > 0.0:
loss = lambda: NLLMultiLabelSmooth(args.label_smoothing)
elif args.label_smoothing > 0.0:
loss = lambda: LabelSmoothing(args.label_smoothing)
memory_format = (
torch.channels_last if args.memory_format == "nhwc" else torch.contiguous_format
)
model = model_arch(
**{
k: v
if k != "pretrained"
else v and (not args.distributed or dist.get_rank() == 0)
for k, v in model_args.__dict__.items()
}
)
image_size = (
args.image_size
if args.image_size is not None
else model.arch.default_image_size
)
scaler = torch.cuda.amp.GradScaler(
init_scale=args.static_loss_scale,
growth_factor=2,
backoff_factor=0.5,
growth_interval=100,
enabled=args.amp,
)
executor = Executor(
model,
loss(),
cuda=True,
memory_format=memory_format,
amp=args.amp,
scaler=scaler,
divide_loss=batch_size_multiplier,
ts_script=args.jit == "script",
)
# Create data loaders and optimizers as needed
if args.data_backend == "pytorch":
get_train_loader = get_pytorch_train_loader
get_val_loader = get_pytorch_val_loader
elif args.data_backend == "dali-gpu":
get_train_loader = get_dali_train_loader(dali_cpu=False)
get_val_loader = get_dali_val_loader()
elif args.data_backend == "dali-cpu":
get_train_loader = get_dali_train_loader(dali_cpu=True)
get_val_loader = get_dali_val_loader()
elif args.data_backend == "synthetic":
get_val_loader = get_synthetic_loader
get_train_loader = get_synthetic_loader
else:
print("Bad databackend picked")
exit(1)
train_loader, train_loader_len = get_train_loader(
args.data,
image_size,
args.batch_size,
model_args.num_classes,
args.mixup > 0.0,
interpolation=args.interpolation,
augmentation=args.augmentation,
start_epoch=start_epoch,
workers=args.workers,
_worker_init_fn=_worker_init_fn,
memory_format=memory_format,
prefetch_factor=args.prefetch,
)
if args.mixup != 0.0:
train_loader = MixUpWrapper(args.mixup, train_loader)
val_loader, val_loader_len = get_val_loader(
args.data,
image_size,
args.batch_size,
model_args.num_classes,
False,
interpolation=args.interpolation,
workers=args.workers,
_worker_init_fn=_worker_init_fn,
memory_format=memory_format,
prefetch_factor=args.prefetch,
)
if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0:
logger = log.Logger(
args.print_freq,
[
dllogger.StdOutBackend(
dllogger.Verbosity.DEFAULT, step_format=log.format_step
),
dllogger.JSONStreamBackend(
dllogger.Verbosity.VERBOSE,
os.path.join(args.workspace, args.raport_file),
),
],
start_epoch=start_epoch - 1,
)
else:
logger = log.Logger(args.print_freq, [], start_epoch=start_epoch - 1)
logger.log_parameter(args.__dict__, verbosity=dllogger.Verbosity.DEFAULT)
logger.log_parameter(
{f"model.{k}": v for k, v in model_args.__dict__.items()},
verbosity=dllogger.Verbosity.DEFAULT,
)
optimizer = get_optimizer(
list(executor.model.named_parameters()),
args.lr,
args=args,
state=optimizer_state,
)
if args.lr_schedule == "step":
lr_policy = lr_step_policy(args.lr, [30, 60, 80], 0.1, args.warmup)
elif args.lr_schedule == "cosine":
lr_policy = lr_cosine_policy(
args.lr, args.warmup, args.epochs, end_lr=args.end_lr
)
elif args.lr_schedule == "linear":
lr_policy = lr_linear_policy(args.lr, args.warmup, args.epochs)
if args.distributed:
executor.distributed(args.gpu)
if model_state is not None:
executor.model.load_state_dict(model_state)
trainer = Trainer(
executor,
optimizer,
grad_acc_steps=batch_size_multiplier,
ema=args.use_ema,
)
if (args.use_ema is not None) and (model_state_ema is not None):
trainer.ema_executor.model.load_state_dict(model_state_ema)
return (
trainer,
lr_policy,
train_loader,
train_loader_len,
val_loader,
logger,
start_epoch,
best_prec1,
)
def main(args, model_args, model_arch):
exp_start_time = time.time()
(
trainer,
lr_policy,
train_loader,
train_loader_len,
val_loader,
logger,
start_epoch,
best_prec1,
) = prepare_for_training(args, model_args, model_arch)
train_loop(
trainer,
lr_policy,
train_loader,
train_loader_len,
val_loader,
logger,
start_epoch=start_epoch,
end_epoch=min((start_epoch + args.run_epochs), args.epochs)
if args.run_epochs != -1
else args.epochs,
early_stopping_patience=args.early_stopping_patience,
best_prec1=best_prec1,
prof=args.prof,
skip_training=args.evaluate,
skip_validation=args.training_only,
save_checkpoints=args.save_checkpoints and not args.evaluate,
checkpoint_dir=args.workspace,
checkpoint_filename=args.checkpoint_filename,
keep_last_n_checkpoints=args.gather_checkpoints,
topk=args.topk,
)
exp_duration = time.time() - exp_start_time
if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0:
logger.end()
print("Experiment ended")
if __name__ == "__main__":
epilog = [
"Based on the architecture picked by --arch flag, you may use the following options:\n"
]
for model, ep in available_models().items():
model_help = "\n".join(ep.parser().format_help().split("\n")[2:])
epilog.append(model_help)
parser = argparse.ArgumentParser(
description="PyTorch ImageNet Training",
epilog="\n".join(epilog),
formatter_class=argparse.RawDescriptionHelpFormatter,
)
add_parser_arguments(parser)
args, rest = parser.parse_known_args()
model_arch = available_models()[args.arch]
model_args, rest = model_arch.parser().parse_known_args(rest)
print(model_args)
assert len(rest) == 0, f"Unknown args passed: {rest}"
cudnn.benchmark = True
main(args, model_args, model_arch)
import argparse
import torch
import pytorch_quantization
from image_classification.models import (
resnet50,
resnext101_32x4d,
se_resnext101_32x4d,
efficientnet_b0,
efficientnet_b4,
efficientnet_widese_b0,
efficientnet_widese_b4,
efficientnet_quant_b0,
efficientnet_quant_b4,
)
def available_models():
models = {
m.name: m
for m in [
resnet50,
resnext101_32x4d,
se_resnext101_32x4d,
efficientnet_b0,
efficientnet_b4,
efficientnet_widese_b0,
efficientnet_widese_b4,
efficientnet_quant_b0,
efficientnet_quant_b4,
]
}
return models
def parse_args(parser):
"""
Parse commandline arguments.
"""
model_names = available_models().keys()
parser.add_argument("--arch", "-a", metavar="ARCH", default="resnet50", choices=model_names,
help="model architecture: " + " | ".join(model_names) + " (default: resnet50)")
parser.add_argument("--device", metavar="DEVICE", default="cuda", choices=['cpu', 'cuda'],
help="device on which model is settled: cpu, cuda (default: cuda)")
parser.add_argument("--image-size", default=None, type=int, help="resolution of image")
parser.add_argument('--output', type=str, help='Path to converted model')
parser.add_argument("-b", "--batch-size", default=256, type=int, metavar="N",
help="mini-batch size (default: 256) per gpu")
return parser
def final_name(base_name):
splitted = base_name.split('.')
if 'pt' in splitted:
fin_name = base_name.replace('pt', 'onnx')
elif 'pth' in splitted:
fin_name = base_name.replace('pth', 'onnx')
elif len(splitted) > 1:
fin_name = '.'.join(splitted[:-1] + ['onnx'])
else:
fin_name = base_name + '.onnx'
return fin_name
def get_dataloader(image_size, bs, num_classes):
"""return dataloader for inference"""
from image_classification.dataloaders import get_synthetic_loader
def data_loader():
loader, _ = get_synthetic_loader(None, image_size, bs, num_classes, False)
for inp, _ in loader:
yield inp
break
return data_loader()
def prepare_inputs(dataloader, device):
"""load sample inputs to device"""
inputs = []
for batch in dataloader:
if type(batch) is torch.Tensor:
batch_d = batch.to(device)
batch_d = (batch_d, )
inputs.append(batch_d)
else:
batch_d = []
for x in batch:
assert type(x) is torch.Tensor, "input is not a tensor"
batch_d.append(x.to(device))
batch_d = tuple(batch_d)
inputs.append(batch_d)
return inputs
def check_quant_weight_correctness(checkpoint_path, model):
state_dict = torch.load(checkpoint_path, map_location=torch.device('cpu'))
state_dict = {k[len("module."):] if k.startswith("module.") else k: v for k, v in state_dict.items()}
quantizers_sd_keys = {f'{n[0]}._amax' for n in model.named_modules() if 'quantizer' in n[0]}
sd_all_keys = quantizers_sd_keys | set(model.state_dict().keys())
assert set(state_dict.keys()) == sd_all_keys, (f'Passed quantized architecture, but following keys are missing in '
f'checkpoint: {list(sd_all_keys - set(state_dict.keys()))}')
def main(args, model_args, model_arch):
quant_arch = args.arch in ['efficientnet-quant-b0', 'efficientnet-quant-b4']
if quant_arch:
pytorch_quantization.nn.modules.tensor_quantizer.TensorQuantizer.use_fb_fake_quant = True
model = model_arch(**model_args.__dict__)
if quant_arch and model_args.pretrained_from_file is not None:
check_quant_weight_correctness(model_args.pretrained_from_file, model)
image_size = args.image_size if args.image_size is not None else model.arch.default_image_size
train_loader = get_dataloader(image_size, args.batch_size, model_args.num_classes)
inputs = prepare_inputs(train_loader, args.device)
final_model_path = args.output if args.output is not None else final_name(model_args.pretrained_from_file)
model.to(args.device)
model.eval()
with torch.no_grad():
torch.onnx.export(model,
inputs[0],
final_model_path,
verbose=True,
opset_version=13,
enable_onnx_checker=True,
do_constant_folding=True)
if __name__ == '__main__':
epilog = [
"Based on the architecture picked by --arch flag, you may use the following options:\n"
]
for model, ep in available_models().items():
model_help = "\n".join(ep.parser().format_help().split("\n")[2:])
epilog.append(model_help)
parser = argparse.ArgumentParser(
description="PyTorch ImageNet Training",
epilog="\n".join(epilog),
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser = parse_args(parser)
args, rest = parser.parse_known_args()
model_arch = available_models()[args.arch]
model_args, rest = model_arch.parser().parse_known_args(rest)
assert len(rest) == 0, f"Unknown args passed: {rest}"
main(args, model_args, model_arch)
# From PyTorch:
#
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2016- Facebook, Inc (Adam Paszke)
# Copyright (c) 2014- Facebook, Inc (Soumith Chintala)
# Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)
# Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
# Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
# Copyright (c) 2011-2013 NYU (Clement Farabet)
# Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)
# Copyright (c) 2006 Idiap Research Institute (Samy Bengio)
# Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)
#
# From Caffe2:
#
# Copyright (c) 2016-present, Facebook Inc. All rights reserved.
#
# All contributions by Facebook:
# Copyright (c) 2016 Facebook Inc.
#
# All contributions by Google:
# Copyright (c) 2015 Google Inc.
# All rights reserved.
#
# All contributions by Yangqing Jia:
# Copyright (c) 2015 Yangqing Jia
# All rights reserved.
#
# All contributions from Caffe:
# Copyright(c) 2013, 2014, 2015, the respective contributors
# All rights reserved.
#
# All other contributions:
# Copyright(c) 2015, 2016 the respective contributors
# All rights reserved.
#
# Caffe2 uses a copyright model similar to Caffe: each contributor holds
# copyright over their contributions to Caffe2. The project versioning records
# all such contribution and copyright details. If a contributor wants to further
# mark their specific copyright on a particular contribution, they should
# indicate their copyright solely in the commit message of the change when it is
# committed.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America
# and IDIAP Research Institute nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
import subprocess
import os
import socket
import time
from argparse import ArgumentParser, REMAINDER
import torch
def parse_args():
"""
Helper function parsing the command line options
@retval ArgumentParser
"""
parser = ArgumentParser(
description="PyTorch distributed training launch "
"helper utilty that will spawn up "
"multiple distributed processes"
)
# Optional arguments for the launch helper
parser.add_argument(
"--nnodes",
type=int,
default=1,
help="The number of nodes to use for distributed " "training",
)
parser.add_argument(
"--node_rank",
type=int,
default=0,
help="The rank of the node for multi-node distributed " "training",
)
parser.add_argument(
"--nproc_per_node",
type=int,
default=1,
help="The number of processes to launch on each node, "
"for GPU training, this is recommended to be set "
"to the number of GPUs in your system so that "
"each process can be bound to a single GPU.",
)
parser.add_argument(
"--master_addr",
default="127.0.0.1",
type=str,
help="Master node (rank 0)'s address, should be either "
"the IP address or the hostname of node 0, for "
"single node multi-proc training, the "
"--master_addr can simply be 127.0.0.1",
)
parser.add_argument(
"--master_port",
default=29500,
type=int,
help="Master node (rank 0)'s free port that needs to "
"be used for communciation during distributed "
"training",
)
# positional
parser.add_argument(
"training_script",
type=str,
help="The full path to the single GPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script",
)
# rest from the training program
parser.add_argument("training_script_args", nargs=REMAINDER)
return parser.parse_args()
def main():
args = parse_args()
# world size in terms of number of processes
dist_world_size = args.nproc_per_node * args.nnodes
# set PyTorch distributed related environmental variables
current_env = os.environ.copy()
current_env["MASTER_ADDR"] = args.master_addr
current_env["MASTER_PORT"] = str(args.master_port)
current_env["WORLD_SIZE"] = str(dist_world_size)
processes = []
for local_rank in range(0, args.nproc_per_node):
# each process's rank
dist_rank = args.nproc_per_node * args.node_rank + local_rank
current_env["RANK"] = str(dist_rank)
current_env["LOCAL_RANK"] = str(local_rank)
# spawn the processes
cmd = [sys.executable, "-u", args.training_script] + args.training_script_args
print(cmd)
stdout = (
None if local_rank == 0 else open("GPU_" + str(local_rank) + ".log", "w")
)
process = subprocess.Popen(cmd, env=current_env, stdout=stdout, stderr=stdout)
processes.append(process)
try:
up = True
error = False
while up and not error:
up = False
for p in processes:
ret = p.poll()
if ret is None:
up = True
elif ret != 0:
error = True
time.sleep(1)
if error:
for p in processes:
if p.poll() is None:
p.terminate()
exit(1)
except KeyboardInterrupt:
for p in processes:
p.terminate()
raise
except SystemExit:
for p in processes:
p.terminate()
raise
except:
for p in processes:
p.terminate()
raise
if __name__ == "__main__":
main()
# Copyright (c) 2018-2019, NVIDIA CORPORATION
# Copyright (c) 2017- Facebook, Inc
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import random
from copy import deepcopy
import torch
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from image_classification.training import *
from image_classification.utils import *
from image_classification.quantization import *
from image_classification.models import efficientnet_quant_b0, efficientnet_quant_b4
from main import prepare_for_training, add_parser_arguments as parse_training
import dllogger
def available_models():
models = {
m.name: m
for m in [
efficientnet_quant_b0,
efficientnet_quant_b4,
]
}
return models
def parse_quantization(parser):
model_names = available_models().keys()
parser.add_argument(
"--arch",
"-a",
metavar="ARCH",
default="efficientnet-quant-b0",
choices=model_names,
help="model architecture: "
+ " | ".join(model_names)
+ " (default: efficientnet-quant-b0)",
)
parser.add_argument(
"--skip-calibration",
action="store_true",
help="skip calibration before training, (default: false)",
)
def parse_training_args(parser):
from main import add_parser_arguments
return add_parser_arguments(parser)
def main(args, model_args, model_arch):
exp_start_time = time.time()
global best_prec1
best_prec1 = 0
skip_calibration = args.skip_calibration or args.evaluate or args.resume is not None
select_default_calib_method()
(
trainer,
lr_policy,
train_loader,
train_loader_len,
val_loader,
logger,
start_epoch,
) = prepare_for_training(args, model_args, model_arch)
print(f"RUNNING QUANTIZATION")
if not skip_calibration:
calibrate(trainer.model_and_loss.model, train_loader, logger, calib_iter=10)
train_loop(
trainer,
lr_policy,
train_loader,
train_loader_len,
val_loader,
logger,
should_backup_checkpoint(args),
start_epoch=start_epoch,
end_epoch=min((start_epoch + args.run_epochs), args.epochs)
if args.run_epochs != -1
else args.epochs,
best_prec1=best_prec1,
prof=args.prof,
skip_training=args.evaluate,
skip_validation=args.training_only,
save_checkpoints=args.save_checkpoints,
checkpoint_dir=args.workspace,
checkpoint_filename="quantized_" + args.checkpoint_filename,
)
if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0:
logger.end()
print("Experiment ended")
if __name__ == "__main__":
epilog = [
"Based on the architecture picked by --arch flag, you may use the following options:\n"
]
for model, ep in available_models().items():
model_help = "\n".join(ep.parser().format_help().split("\n")[2:])
epilog.append(model_help)
parser = argparse.ArgumentParser(
description="PyTorch ImageNet Training",
epilog="\n".join(epilog),
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parse_quantization(parser)
parse_training(parser, skip_arch=True)
args, rest = parser.parse_known_args()
model_arch = available_models()[args.arch]
model_args, rest = model_arch.parser().parse_known_args(rest)
print(model_args)
assert len(rest) == 0, f"Unknown args passed: {rest}"
cudnn.benchmark = True
main(args, model_args, model_arch)
git+https://github.com/NVIDIA/dllogger@v1.0.0#egg=dllogger
pynvml==11.0.0
# ResNet50 v1.5 For PyTorch
This repository provides a script and recipe to train the ResNet50 model to
achieve state-of-the-art accuracy, and is tested and maintained by NVIDIA.
## Table Of Contents
* [Model overview](#model-overview)
* [Default configuration](#default-configuration)
* [Optimizer](#optimizer)
* [Data augmentation](#data-augmentation)
* [DALI](#dali)
* [Feature support matrix](#feature-support-matrix)
* [Features](#features)
* [Mixed precision training](#mixed-precision-training)
* [Enabling mixed precision](#enabling-mixed-precision)
* [Enabling TF32](#enabling-tf32)
* [Setup](#setup)
* [Requirements](#requirements)
* [Quick Start Guide](#quick-start-guide)
* [Advanced](#advanced)
* [Scripts and sample code](#scripts-and-sample-code)
* [Command-line options](#command-line-options)
* [Dataset guidelines](#dataset-guidelines)
* [Training process](#training-process)
* [Inference process](#inference-process)
* [Performance](#performance)
* [Benchmarking](#benchmarking)
* [Training performance benchmark](#training-performance-benchmark)
* [Inference performance benchmark](#inference-performance-benchmark)
* [Results](#results)
* [Training accuracy results](#training-accuracy-results)
* [Training accuracy: NVIDIA DGX A100 (8x A100 80GB)](#training-accuracy-nvidia-dgx-a100-8x-a100-80gb)
* [Training accuracy: NVIDIA DGX-1 (8x V100 16GB)](#training-accuracy-nvidia-dgx-1-8x-v100-16gb)
* [Training accuracy: NVIDIA DGX-2 (16x V100 32GB)](#training-accuracy-nvidia-dgx-2-16x-v100-32gb)
* [Example plots](#example-plots)
* [Training performance results](#training-performance-results)
* [Training performance: NVIDIA DGX A100 (8x A100 80GB)](#training-performance-nvidia-dgx-a100-8x-a100-80gb)
* [Training performance: NVIDIA DGX-1 16GB (8x V100 16GB)](#training-performance-nvidia-dgx-1-16gb-8x-v100-16gb)
* [Training performance: NVIDIA DGX-1 32GB (8x V100 32GB)](#training-performance-nvidia-dgx-1-32gb-8x-v100-32gb)
* [Inference performance results](#inference-performance-results)
* [Inference performance: NVIDIA DGX-1 16GB (1x V100 16GB)](#inference-performance-nvidia-dgx-1-1x-v100-16gb)
* [Inference performance: NVIDIA T4](#inference-performance-nvidia-t4)
* [Release notes](#release-notes)
* [Changelog](#changelog)
* [Known issues](#known-issues)
## Model overview
The ResNet50 v1.5 model is a modified version of the [original ResNet50 v1 model](https://arxiv.org/abs/1512.03385).
The difference between v1 and v1.5 is that, in the bottleneck blocks which requires
downsampling, v1 has stride = 2 in the first 1x1 convolution, whereas v1.5 has stride = 2 in the 3x3 convolution.
This difference makes ResNet50 v1.5 slightly more accurate (~0.5% top1) than v1, but comes with a smallperformance drawback (~5% imgs/sec).
The model is initialized as described in [Delving deep into rectifiers: Surpassing human-level performance on ImageNet classification](https://arxiv.org/pdf/1502.01852.pdf)
This model is trained with mixed precision using Tensor Cores on Volta, Turing, and the NVIDIA Ampere GPU architectures. Therefore, researchers can get results over 2x faster than training without Tensor Cores, while experiencing the benefits of mixed precision training. This model is tested against each NGC monthly container release to ensure consistent accuracy and performance over time.
We are currently working on adding [NHWC data layout](https://pytorch.org/tutorials/intermediate/memory_format_tutorial.html) support for Mixed Precision training.
### Default configuration
The following sections highlight the default configurations for the ResNet50 model.
#### Optimizer
This model uses SGD with momentum optimizer with the following hyperparameters:
* Momentum (0.875)
* Learning rate (LR) = 0.256 for 256 batch size, for other batch sizes we linearly
scale the learning rate.
* Learning rate schedule - we use cosine LR schedule
* For bigger batch sizes (512 and up) we use linear warmup of the learning rate
during the first couple of epochs
according to [Training ImageNet in 1 hour](https://arxiv.org/abs/1706.02677).
Warmup length depends on the total training length.
* Weight decay (WD)= 3.0517578125e-05 (1/32768).
* We do not apply WD on Batch Norm trainable parameters (gamma/bias)
* Label smoothing = 0.1
* We train for:
* 50 Epochs -> configuration that reaches 75.9% top1 accuracy
* 90 Epochs -> 90 epochs is a standard for ImageNet networks
* 250 Epochs -> best possible accuracy.
* For 250 epoch training we also use [MixUp regularization](https://arxiv.org/pdf/1710.09412.pdf).
#### Data augmentation
This model uses the following data augmentation:
* For training:
* Normalization
* Random resized crop to 224x224
* Scale from 8% to 100%
* Aspect ratio from 3/4 to 4/3
* Random horizontal flip
* For inference:
* Normalization
* Scale to 256x256
* Center crop to 224x224
#### Other training recipes
This script does not target any specific benchmark.
There are changes that others have made which can speed up convergence and/or increase accuracy.
One of the more popular training recipes is provided by [fast.ai](https://github.com/fastai/imagenet-fast).
The fast.ai recipe introduces many changes to the training procedure, one of which is progressive resizing of the training images.
The first part of training uses 128px images, the middle part uses 224px images, and the last part uses 288px images.
The final validation is performed on 288px images.
Training script in this repository performs validation on 224px images, just like the original paper described.
These two approaches can't be directly compared, since the fast.ai recipe requires validation on 288px images,
and this recipe keeps the original assumption that validation is done on 224px images.
Using 288px images means that a lot more FLOPs are needed during inference to reach the same accuracy.
### Feature support matrix
The following features are supported by this model:
| Feature | ResNet50
|-----------------------|--------------------------
|[DALI](https://docs.nvidia.com/deeplearning/sdk/dali-release-notes/index.html) | Yes
|[APEX AMP](https://nvidia.github.io/apex/amp.html) | Yes |
#### Features
- NVIDIA DALI - DALI is a library accelerating data preparation pipeline. To accelerate your input pipeline, you only need to define your data loader
with the DALI library. For more information about DALI, refer to the [DALI product documentation](https://docs.nvidia.com/deeplearning/dali/user-guide/docs/index.html).
- [APEX](https://github.com/NVIDIA/apex) is a PyTorch extension that contains utility libraries, such as [Automatic Mixed Precision (AMP)](https://nvidia.github.io/apex/amp.html), which require minimal network code changes to leverage Tensor Cores performance. Refer to the [Enabling mixed precision](#enabling-mixed-precision) section for more details.
### DALI
We use [NVIDIA DALI](https://github.com/NVIDIA/DALI),
which speeds up data loading when CPU becomes a bottleneck.
DALI can use CPU or GPU, and outperforms the PyTorch native dataloader.
Run training with `--data-backends dali-gpu` or `--data-backends dali-cpu` to enable DALI.
For DGXA100 and DGX1 we recommend `--data-backends dali-cpu`, for DGX2 we recommend `--data-backends dali-gpu`.
### Mixed precision training
Mixed precision is the combined use of different numerical precisions in a computational method. [Mixed precision](https://arxiv.org/abs/1710.03740) training offers significant computational speedup by performing operations in half-precision format, while storing minimal information in single-precision to retain as much information as possible in critical parts of the network. Since the introduction of [Tensor Cores](https://developer.nvidia.com/tensor-cores) in Volta, and following with both the Turing and Ampere architectures, significant training speedups are experienced by switching to mixed precision -- up to 3x overall speedup on the most arithmetically intense model architectures. Using mixed precision training requires two steps:
1. Porting the model to use the FP16 data type where appropriate.
2. Adding loss scaling to preserve small gradient values.
The ability to train deep learning networks with lower precision was introduced in the Pascal architecture and first supported in CUDA 8 in the NVIDIA Deep Learning SDK.
For information about:
- How to train using mixed precision, see the [Mixed Precision Training](https://arxiv.org/abs/1710.03740) paper and [Training With Mixed Precision](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html) documentation.
- Techniques used for mixed precision training, see the [Mixed-Precision Training of Deep Neural Networks](https://devblogs.nvidia.com/mixed-precision-training-deep-neural-networks/) blog.
- APEX tools for mixed precision training, see the [NVIDIA Apex: Tools for Easy Mixed-Precision Training in PyTorch](https://devblogs.nvidia.com/apex-pytorch-easy-mixed-precision-training/).
#### Enabling mixed precision
Mixed precision is enabled in PyTorch by using the Automatic Mixed Precision (AMP), a library from [APEX](https://github.com/NVIDIA/apex) that casts variables to half-precision upon retrieval,
while storing variables in single-precision format. Furthermore, to preserve small gradient magnitudes in backpropagation, a [loss scaling](https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html#lossscaling) step must be included when applying gradients.
In PyTorch, loss scaling can be easily applied by using scale_loss() method provided by AMP. The scaling value to be used can be [dynamic](https://nvidia.github.io/apex/fp16_utils.html#apex.fp16_utils.DynamicLossScaler) or fixed.
For an in-depth walk through on AMP, check out sample usage [here](https://github.com/NVIDIA/apex/tree/master/apex/amp#usage-and-getting-started). [APEX](https://github.com/NVIDIA/apex) is a PyTorch extension that contains utility libraries, such as AMP, which require minimal network code changes to leverage tensor cores performance.
To enable mixed precision, you can:
- Import AMP from APEX:
```python
from apex import amp
```
- Wrap model and optimizer in amp.initialize:
```python
model, optimizer = amp.initialize(model, optimizer, opt_level="O1", loss_scale="dynamic")
```
- Scale loss before backpropagation:
```python
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
```
#### Enabling TF32
TensorFloat-32 (TF32) is the new math mode in [NVIDIA A100](https://www.nvidia.com/en-us/data-center/a100/) GPUs for handling the matrix math also called tensor operations. TF32 running on Tensor Cores in A100 GPUs can provide up to 10x speedups compared to single-precision floating-point math (FP32) on Volta GPUs.
TF32 Tensor Cores can speed up networks using FP32, typically with no loss of accuracy. It is more robust than FP16 for models which require high dynamic range for weights or activations.
For more information, refer to the [TensorFloat-32 in the A100 GPU Accelerates AI Training, HPC up to 20x](https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format/) blog post.
TF32 is supported in the NVIDIA Ampere GPU architecture and is enabled by default.
## Setup
The following section lists the requirements that you need to meet in order to start training the ResNet50 model.
### Requirements
This repository contains Dockerfile which extends the PyTorch NGC container and encapsulates some dependencies. Aside from these dependencies, ensure you have the following components:
* [NVIDIA Docker](https://github.com/NVIDIA/nvidia-docker)
* [PyTorch 21.03-py3 NGC container](https://ngc.nvidia.com/registry/nvidia-pytorch) or newer
* Supported GPUs:
* [NVIDIA Volta architecture](https://www.nvidia.com/en-us/data-center/volta-gpu-architecture/)
* [NVIDIA Turing architecture](https://www.nvidia.com/en-us/geforce/turing/)
* [NVIDIA Ampere architecture](https://www.nvidia.com/en-us/data-center/nvidia-ampere-gpu-architecture/)
For more information about how to get started with NGC containers, see the
following sections from the NVIDIA GPU Cloud Documentation and the Deep Learning
DGX Documentation:
* [Getting Started Using NVIDIA GPU Cloud](https://docs.nvidia.com/ngc/ngc-getting-started-guide/index.html)
* [Accessing And Pulling From The NGC Container Registry](https://docs.nvidia.com/deeplearning/dgx/user-guide/index.html#accessing_registry)
* [Running PyTorch](https://docs.nvidia.com/deeplearning/dgx/pytorch-release-notes/running.html#running)
For those unable to use the PyTorch NGC container, to set up the required environment or create your own container, see the versioned [NVIDIA Container Support Matrix](https://docs.nvidia.com/deeplearning/frameworks/support-matrix/index.html).
## Quick Start Guide
### 1. Clone the repository.
```
git clone https://github.com/NVIDIA/DeepLearningExamples
cd DeepLearningExamples/PyTorch/Classification/
```
### 2. Download and preprocess the dataset.
The ResNet50 script operates on ImageNet 1k, a widely popular image classification dataset from the ILSVRC challenge.
PyTorch can work directly on JPEGs, therefore, preprocessing/augmentation is not needed.
To train your model using mixed or TF32 precision with Tensor Cores or using FP32,
perform the following steps using the default parameters of the resnet50 model on the ImageNet dataset.
For the specifics concerning training and inference, see the [Advanced](#advanced) section.
1. [Download the images](http://image-net.org/download-images).
2. Extract the training data:
```bash
mkdir train && mv ILSVRC2012_img_train.tar train/ && cd train
tar -xvf ILSVRC2012_img_train.tar && rm -f ILSVRC2012_img_train.tar
find . -name "*.tar" | while read NAME ; do mkdir -p "${NAME%.tar}"; tar -xvf "${NAME}" -C "${NAME%.tar}"; rm -f "${NAME}"; done
cd ..
```
3. Extract the validation data and move the images to subfolders:
```bash
mkdir val && mv ILSVRC2012_img_val.tar val/ && cd val && tar -xvf ILSVRC2012_img_val.tar
wget -qO- https://raw.githubusercontent.com/soumith/imagenetloader.torch/master/valprep.sh | bash
```
The directory in which the `train/` and `val/` directories are placed, is referred to as `<path to imagenet>` in this document.
### 3. Build the ResNet50 PyTorch NGC container.
```
docker build . -t nvidia_resnet50
```
### 4. Start an interactive session in the NGC container to run training/inference.
```
nvidia-docker run --rm -it -v <path to imagenet>:/imagenet --ipc=host nvidia_resnet50
```
### 5. Start training
To run training for a standard configuration (DGXA100/DGX1V/DGX2V, AMP/TF32/FP32, 90/250 Epochs),
run one of the scripts in the `./resnet50v1.5/training` directory
called `./resnet50v1.5/training/{AMP, TF32, FP32}/{ DGXA100, DGX1V, DGX2V }_resnet50_{AMP, TF32, FP32}_{ 90, 250 }E.sh`.
Ensure ImageNet is mounted in the `/imagenet` directory.
Example:
`bash ./resnet50v1.5/training/AMP/DGX1_resnet50_AMP_250E.sh <path were to store checkpoints and logs>`
### 6. Start inference
You can download pretrained weights from NGC:
```bash
wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/resnet50_pyt_amp/versions/20.06.0/zip -O resnet50_pyt_amp_20.06.0.zip
unzip resnet50_pyt_amp_20.06.0.zip
```
To run inference on ImageNet, run:
`python ./main.py --arch resnet50 --evaluate --epochs 1 --pretrained-from-file nvidia_resnet50_200821.pth.tar -b <batch size> <path to imagenet>`
To run inference on JPEG image using pretrained weights:
`python classify.py --arch resnet50 --pretrained-from-file nvidia_resnet50_200821.pth.tar --precision AMP|FP32 --image <path to JPEG image>`
## Advanced
The following sections provide greater details of the dataset, running training and inference, and the training results.
### Scripts and sample code
To run a non standard configuration use:
* For 1 GPU
* FP32
`python ./main.py --arch resnet50 -c fanin --label-smoothing 0.1 <path to imagenet>`
`python ./main.py --arch resnet50 -c fanin --label-smoothing 0.1 --amp --static-loss-scale 256 <path to imagenet>`
* For multiple GPUs
* FP32
`python ./multiproc.py --nproc_per_node 8 ./main.py --arch resnet50 -c fanin --label-smoothing 0.1 <path to imagenet>`
* AMP
`python ./multiproc.py --nproc_per_node 8 ./main.py --arch resnet50 -c fanin --label-smoothing 0.1 --amp --static-loss-scale 256 <path to imagenet>`
Use `python ./main.py -h` to obtain the list of available options in the `main.py` script.
### Command-line options:
To see the full list of available options and their descriptions, use the `-h` or `--help` command-line option, for example:
`python main.py -h`
```
usage: main.py [-h] [--data-backend BACKEND] [--arch ARCH]
[--model-config CONF] [-j N] [--epochs N]
[--run-epochs N] [-b N] [--optimizer-batch-size N] [--lr LR]
[--lr-schedule SCHEDULE] [--warmup E] [--label-smoothing S]
[--mixup ALPHA] [--momentum M] [--weight-decay W]
[--bn-weight-decay] [--nesterov] [--print-freq N]
[--resume PATH] [--pretrained-from-file PATH]
[--static-loss-scale STATIC_LOSS_SCALE] [--dynamic-loss-scale]
[--prof N] [--amp] [--seed SEED] [--gather-checkpoints]
[--raport-file RAPORT_FILE] [--evaluate] [--training-only]
[--no-checkpoints] [--checkpoint-filename CHECKPOINT_FILENAME]
[--workspace DIR] [--memory-format {nchw,nhwc}]
DIR
PyTorch ImageNet Training
positional arguments:
DIR path to dataset
optional arguments:
-h, --help show this help message and exit
--data-backend BACKEND
data backend: pytorch | synthetic | dali-gpu | dali-cpu
(default: dali-cpu)
--arch ARCH, -a ARCH model architecture: resnet18 | resnet34 | resnet50 |
resnet101 | resnet152 | resnext50-32x4d |
resnext101-32x4d | resnext101-32x8d |
resnext101-32x8d-basic | se-resnext101-32x4d (default:
resnet50)
--model-config CONF, -c CONF
model configs: classic | fanin | grp-fanin | grp-
fanout(default: classic)
-j N, --workers N number of data loading workers (default: 5)
--epochs N number of total epochs to run
--run-epochs N run only N epochs, used for checkpointing runs
-b N, --batch-size N mini-batch size (default: 256) per gpu
--optimizer-batch-size N
size of a total batch size, for simulating bigger
batches using gradient accumulation
--lr LR, --learning-rate LR
initial learning rate
--lr-schedule SCHEDULE
Type of LR schedule: step, linear, cosine
--warmup E number of warmup epochs
--label-smoothing S label smoothing
--mixup ALPHA mixup alpha
--momentum M momentum
--weight-decay W, --wd W
weight decay (default: 1e-4)
--bn-weight-decay use weight_decay on batch normalization learnable
parameters, (default: false)
--nesterov use nesterov momentum, (default: false)
--print-freq N, -p N print frequency (default: 10)
--resume PATH path to latest checkpoint (default: none)
--pretrained-from-file PATH
load weights from here
--static-loss-scale STATIC_LOSS_SCALE
Static loss scale, positive power of 2 values can
improve amp convergence.
--dynamic-loss-scale Use dynamic loss scaling. If supplied, this argument
supersedes --static-loss-scale.
--prof N Run only N iterations
--amp Run model AMP (automatic mixed precision) mode.
--seed SEED random seed used for numpy and pytorch
--gather-checkpoints Gather checkpoints throughout the training, without
this flag only best and last checkpoints will be
stored
--raport-file RAPORT_FILE
file in which to store JSON experiment raport
--evaluate evaluate checkpoint/model
--training-only do not evaluate
--no-checkpoints do not store any checkpoints, useful for benchmarking
--checkpoint-filename CHECKPOINT_FILENAME
--workspace DIR path to directory where checkpoints will be stored
--memory-format {nchw,nhwc}
memory layout, nchw or nhwc
```
### Dataset guidelines
To use your own dataset, divide it in directories as in the following scheme:
- Training images - `train/<class id>/<image>`
- Validation images - `val/<class id>/<image>`
If your dataset's has number of classes different than 1000, you need to pass `--num_classes N` flag to the training script.
### Training process
All the results of the training will be stored in the directory specified with `--workspace` argument.
Script will store:
- most recent checkpoint - `checkpoint.pth.tar` (unless `--no-checkpoints` flag is used).
- checkpoint with best validation accuracy - `model_best.pth.tar` (unless `--no-checkpoints` flag is used).
- JSON log - in the file specified with `--raport-file` flag.
Metrics gathered through training:
- `train.loss` - training loss
- `train.total_ips` - training speed measured in images/second
- `train.compute_ips` - training speed measured in images/second, not counting data loading
- `train.data_time` - time spent on waiting on data
- `train.compute_time` - time spent in forward/backward pass
To restart training from checkpoint use `--resume` option.
To start training from pretrained weights (e.g. downloaded from NGC) use `--pretrained-from-file` option.
The difference between those two is that the pretrained weights contain only model weights,
and checkpoints, apart from model weights, contain optimizer state, LR scheduler state.
Checkpoints are suitable for dividing the training into parts, for example in order
to divide the training job into shorter stages, or restart training after infrastructure fail.
Pretrained weights can be used as a base for finetuning the model to a different dataset,
or as a backbone to detection models.
### Inference process
Validation is done every epoch, and can be also run separately on a checkpointed model.
`python ./main.py --arch resnet50 --evaluate --epochs 1 --resume <path to checkpoint> -b <batch size> <path to imagenet>`
Metrics gathered through training:
- `val.loss` - validation loss
- `val.top1` - validation top1 accuracy
- `val.top5` - validation top5 accuracy
- `val.total_ips` - inference speed measured in images/second
- `val.compute_ips` - inference speed measured in images/second, not counting data loading
- `val.data_time` - time spent on waiting on data
- `val.compute_time` - time spent on inference
To run inference on JPEG image, you have to first extract the model weights from checkpoint:
`python checkpoint2model.py --checkpoint-path <path to checkpoint> --weight-path <path where weights will be stored>`
Then run classification script:
`python classify.py --arch resnet50 --pretrained-from-file <path to weights from previous step> --precision AMP|FP32 --image <path to JPEG image>`
You can also run ImageNet validation on pretrained weights:
`python ./main.py --arch resnet50 --evaluate --epochs 1 --pretrained-from-file <path to pretrained weights> -b <batch size> <path to imagenet>`
#### NGC Pretrained weights:
Pretrained weights can be downloaded from NGC:
```bash
wget --content-disposition https://api.ngc.nvidia.com/v2/models/nvidia/resnet50_pyt_amp/versions/20.06.0/zip -O resnet50_pyt_amp_20.06.0.zip
unzip resnet50_pyt_amp_20.06.0.zip
```
To run inference on ImageNet, run:
`python ./main.py --arch resnet50 --evaluate --epochs 1 --pretrained-from-file nvidia_resnet50_200821.pth.tar -b <batch size> <path to imagenet>`
To run inference on JPEG image using pretrained weights:
`python classify.py --arch resnet50 --weights nvidia_resnet50_200821.pth.tar --precision AMP|FP32 --image <path to JPEG image>`
## Performance
The performance measurements in this document were conducted at the time of publication and may not reflect the performance achieved from NVIDIA’s latest software release. For the most up-to-date performance measurements, go to [NVIDIA Data Center Deep Learning Product Performance](https://developer.nvidia.com/deep-learning-performance-training-inference).
### Benchmarking
The following section shows how to run benchmarks measuring the model performance in training and inference modes.
#### Training performance benchmark
To benchmark training, run:
* For 1 GPU
* FP32 (V100 GPUs only)
`python ./launch.py --model resnet50 --precision FP32 --mode benchmark_training --platform DGX1V <path to imagenet> --raport-file benchmark.json --epochs 1 --prof 100`
* TF32 (A100 GPUs only)
`python ./launch.py --model resnet50 --precision TF32 --mode benchmark_training --platform DGXA100 <path to imagenet> --raport-file benchmark.json --epochs 1 --prof 100`
* AMP
`python ./launch.py --model resnet50 --precision AMP --mode benchmark_training --platform <DGX1V|DGXA100> <path to imagenet> --raport-file benchmark.json --epochs 1 --prof 100`
* For multiple GPUs
* FP32 (V100 GPUs only)
`python ./launch.py --model resnet50 --precision FP32 --mode benchmark_training --platform DGX1V <path to imagenet> --raport-file benchmark.json --epochs 1 --prof 100`
* TF32 (A100 GPUs only)
`python ./multiproc.py --nproc_per_node 8 ./launch.py --model resnet50 --precision TF32 --mode benchmark_training --platform DGXA100 <path to imagenet> --raport-file benchmark.json --epochs 1 --prof 100`
* AMP
`python ./multiproc.py --nproc_per_node 8 ./launch.py --model resnet50 --precision AMP --mode benchmark_training --platform <DGX1V|DGXA100> <path to imagenet> --raport-file benchmark.json --epochs 1 --prof 100`
Each of these scripts will run 100 iterations and save results in the `benchmark.json` file.
#### Inference performance benchmark
To benchmark inference, run:
* FP32 (V100 GPUs only)
`python ./launch.py --model resnet50 --precision FP32 --mode benchmark_inference --platform DGX1V <path to imagenet> --raport-file benchmark.json --epochs 1 --prof 100`
* TF32 (A100 GPUs only)
`python ./launch.py --model resnet50 --precision TF32 --mode benchmark_inference --platform DGXA100 <path to imagenet> --raport-file benchmark.json --epochs 1 --prof 100`
* AMP
`python ./launch.py --model resnet50 --precision AMP --mode benchmark_inference --platform <DGX1V|DGXA100> <path to imagenet> --raport-file benchmark.json --epochs 1 --prof 100`
Each of these scripts will run 100 iterations and save results in the `benchmark.json` file.
### Results
#### Training accuracy results
Our results were obtained by running the applicable training script in the pytorch-20.12 NGC container.
To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide).
##### Training accuracy: NVIDIA DGX A100 (8x A100 80GB)
| **Epochs** | **Mixed Precision Top1** | **TF32 Top1** |
|:----------:|:------------------------:|:--------------:|
| 90 | 77.12 +/- 0.11 | 76.95 +/- 0.18 |
| 250 | 78.43 +/- 0.11 | 78.38 +/- 0.17 |
##### Training accuracy: NVIDIA DGX-1 (8x V100 16GB)
| **Epochs** | **Mixed Precision Top1** | **FP32 Top1** |
|:----------:|:------------------------:|:--------------:|
| 90 | 76.88 +/- 0.16 | 77.01 +/- 0.16 |
| 250 | 78.25 +/- 0.12 | 78.30 +/- 0.16 |
##### Training accuracy: NVIDIA DGX-2 (16x V100 32GB)
| **epochs** | **Mixed Precision Top1** | **FP32 Top1** |
|:-:|:-:|:-:|
| 50 | 75.81 +/- 0.08 | 76.04 +/- 0.05 |
| 90 | 77.10 +/- 0.06 | 77.23 +/- 0.04 |
| 250 | 78.59 +/- 0.13 | 78.46 +/- 0.03 |
##### Example plots
The following images show a 250 epochs configuration on a DGX-1V.
![ValidationLoss](./img/loss_plot.png)
![ValidationTop1](./img/top1_plot.png)
![ValidationTop5](./img/top5_plot.png)
#### Training performance results
Our results were obtained by running the applicable training script in the pytorch-21.03 NGC container.
To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide).
##### Training performance: NVIDIA DGX A100 (8x A100 80GB)
| **GPUs** | **Throughput - TF32** | **Throughput - mixed precision** | **Throughput speedup (TF32 to mixed precision)** | **TF32 Strong Scaling** | **Mixed Precision Strong Scaling** | **Mixed Precision Training Time (90E)** | **TF32 Training Time (90E)** |
|:--------:|:----------:|:--------------------------------:|:------------------------------------------------:|:-----------------------:|:----------------------------------:|:---------------------------------------:|:----------------------------:|
| 1 | 938 img/s | 2470 img/s | 2.63 x | 1.0 x | 1.0 x | ~14 hours | ~36 hours |
| 8 | 7248 img/s | 16621 img/s | 2.29 x | 7.72 x | 6.72 x | ~3 hours | ~5 hours |
##### Training performance: NVIDIA DGX-1 16GB (8x V100 16GB)
| **GPUs** | **Throughput - FP32** | **Throughput - mixed precision** | **Throughput speedup (FP32 to mixed precision)** | **FP32 Strong Scaling** | **Mixed Precision Strong Scaling** | **Mixed Precision Training Time (90E)** | **FP32 Training Time (90E)** |
|:--------:|:----------:|:--------------------------------:|:------------------------------------------------:|:-----------------------:|:----------------------------------:|:---------------------------------------:|:----------------------------:|
| 1 | 367 img/s | 1200 img/s | 3.26 x | 1.0 x | 1.0 x | ~29 hours | ~92 hours |
| 8 | 2855 img/s | 8322 img/s | 2.91 x | 7.76 x | 6.93 x | ~5 hours | ~12 hours |
##### Training performance: NVIDIA DGX-1 32GB (8x V100 32GB)
| **GPUs** | **Throughput - FP32** | **Throughput - mixed precision** | **Throughput speedup (FP32 to mixed precision)** | **FP32 Strong Scaling** | **Mixed Precision Strong Scaling** | **Mixed Precision Training Time (90E)** | **FP32 Training Time (90E)** |
|:--------:|:----------:|:--------------------------------:|:------------------------------------------------:|:-----------------------:|:----------------------------------:|:---------------------------------------:|:----------------------------:|
| 1 | 356 img/s | 1156 img/s | 3.24 x | 1.0 x | 1.0 x | ~30 hours | ~95 hours |
| 8 | 2766 img/s | 8056 img/s | 2.91 x | 7.75 x | 6.96 x | ~5 hours | ~13 hours |
#### Inference performance results
Our results were obtained by running the applicable training script in the pytorch-21.03 NGC container.
To achieve these same results, follow the steps in the [Quick Start Guide](#quick-start-guide).
##### Inference performance: NVIDIA DGX-1 (1x V100 16GB)
###### FP32 Inference Latency
| **Batch Size** | **Throughput Avg** | **Latency Avg** | **Latency 95%** | **Latency 99%** |
|:--------------:|:------------------:|:---------------:|:---------------:|:---------------:|
| 1 | 96 img/s | 10.37 ms | 10.81 ms | 11.73 ms |
| 2 | 196 img/s | 10.24 ms | 11.18 ms | 12.89 ms |
| 4 | 386 img/s | 10.46 ms | 11.01 ms | 11.75 ms |
| 8 | 709 img/s | 11.5 ms | 12.36 ms | 13.12 ms |
| 16 | 1023 img/s | 16.07 ms | 15.69 ms | 15.97 ms |
| 32 | 1127 img/s | 29.37 ms | 28.53 ms | 28.67 ms |
| 64 | 1200 img/s | 55.4 ms | 53.5 ms | 53.71 ms |
| 128 | 1229 img/s | 109.26 ms | 104.04 ms | 104.34 ms |
| 256 | 1261 img/s | 214.48 ms | 202.51 ms | 202.88 ms |
###### Mixed Precision Inference Latency
| **Batch Size** | **Throughput Avg** | **Latency Avg** | **Latency 95%** | **Latency 99%** |
|:--------------:|:------------------:|:---------------:|:---------------:|:---------------:|
| 1 | 78 img/s | 12.78 ms | 13.27 ms | 14.36 ms |
| 2 | 154 img/s | 13.01 ms | 13.74 ms | 15.19 ms |
| 4 | 300 img/s | 13.41 ms | 14.25 ms | 15.68 ms |
| 8 | 595 img/s | 13.65 ms | 14.51 ms | 15.6 ms |
| 16 | 1178 img/s | 14.0 ms | 15.07 ms | 16.26 ms |
| 32 | 2146 img/s | 15.84 ms | 17.25 ms | 18.53 ms |
| 64 | 2984 img/s | 23.18 ms | 21.51 ms | 21.93 ms |
| 128 | 3249 img/s | 43.55 ms | 39.36 ms | 40.1 ms |
| 256 | 3382 img/s | 84.14 ms | 75.3 ms | 80.08 ms |
##### Inference performance: NVIDIA T4
###### FP32 Inference Latency
| **Batch Size** | **Throughput Avg** | **Latency Avg** | **Latency 95%** | **Latency 99%** |
|:--------------:|:------------------:|:---------------:|:---------------:|:---------------:|
| 1 | 98 img/s | 10.7 ms | 12.82 ms | 16.71 ms |
| 2 | 186 img/s | 11.26 ms | 13.79 ms | 16.99 ms |
| 4 | 325 img/s | 12.73 ms | 13.89 ms | 18.03 ms |
| 8 | 363 img/s | 22.41 ms | 22.57 ms | 22.9 ms |
| 16 | 409 img/s | 39.77 ms | 39.8 ms | 40.23 ms |
| 32 | 420 img/s | 77.62 ms | 76.92 ms | 77.28 ms |
| 64 | 428 img/s | 152.73 ms | 152.03 ms | 153.02 ms |
| 128 | 426 img/s | 309.26 ms | 303.38 ms | 305.13 ms |
| 256 | 415 img/s | 635.98 ms | 620.16 ms | 625.21 ms |
###### Mixed Precision Inference Latency
| **Batch Size** | **Throughput Avg** | **Latency Avg** | **Latency 95%** | **Latency 99%** |
|:--------------:|:------------------:|:---------------:|:---------------:|:---------------:|
| 1 | 79 img/s | 12.96 ms | 15.47 ms | 20.0 ms |
| 2 | 156 img/s | 13.18 ms | 14.9 ms | 18.73 ms |
| 4 | 317 img/s | 12.99 ms | 14.69 ms | 19.05 ms |
| 8 | 652 img/s | 12.82 ms | 16.04 ms | 19.43 ms |
| 16 | 1050 img/s | 15.8 ms | 16.57 ms | 20.62 ms |
| 32 | 1128 img/s | 29.54 ms | 28.79 ms | 28.97 ms |
| 64 | 1165 img/s | 57.41 ms | 55.67 ms | 56.11 ms |
| 128 | 1190 img/s | 114.24 ms | 109.17 ms | 110.41 ms |
| 256 | 1198 img/s | 225.95 ms | 215.28 ms | 222.94 ms |
## Release notes
### Changelog
1. September 2018
* Initial release
2. January 2019
* Added options Label Smoothing, fan-in initialization, skipping weight decay on batch norm gamma and bias.
3. May 2019
* Cosine LR schedule
* MixUp regularization
* DALI support
* DGX2 configurations
* gradients accumulation
4. July 2019
* DALI-CPU dataloader
* Updated README
5. July 2020
* Added A100 scripts
* Updated README
6. February 2021
* Moved from APEX AMP to Native AMP
### Known issues
There are no known issues with this model.
python ./multiproc.py --nproc_per_node 8 ./launch.py --model resnet50 --precision AMP --mode convergence --platform DGX1V /imagenet --workspace ${1:-./} --raport-file raport.json
python ./multiproc.py --nproc_per_node 8 ./launch.py --model resnet50 --precision AMP --mode convergence --platform DGX1V /imagenet --epochs 90 --mixup 0.0 --workspace ${1:-./} --raport-file raport.json
python ./multiproc.py --nproc_per_node 8 ./launch.py --model resnet50 --precision AMP --mode convergence --platform DGX2V /imagenet --workspace ${1:-./} --raport-file raport.json
python ./multiproc.py --nproc_per_node 8 ./launch.py --model resnet50 --precision AMP --mode convergence --platform DGX2V /imagenet --epochs 90 --mixup 0.0 --workspace ${1:-./} --raport-file raport.json
python ./multiproc.py --nproc_per_node 8 ./launch.py --model resnet50 --precision AMP --mode convergence --platform DGXA100 /imagenet --workspace ${1:-./} --raport-file raport.json
python ./multiproc.py --nproc_per_node 8 ./launch.py --model resnet50 --precision AMP --mode convergence --platform DGXA100 /imagenet --epochs 90 --mixup 0.0 --workspace ${1:-./} --raport-file raport.json
python ./multiproc.py --nproc_per_node 8 ./launch.py --model resnet50 --precision FP32 --mode convergence --platform DGX1V /imagenet --workspace ${1:-./} --raport-file raport.json
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment