Commit d24c25b9 authored by Michael Carilli's avatar Michael Carilli
Browse files

Comprehensive tests for cross product of options

parent 613997ea
# amp: Automatic Mixed Precision # amp: Automatic Mixed Precision
## This README documents the legacy (pre-Amp 1.0) API.
## Documentation for the new 1.0 API can be found [here](https://nvidia.github.io/apex/)
amp is an experimental tool to enable mixed precision training in amp is an experimental tool to enable mixed precision training in
PyTorch with _extreme_ simplicity and overall numerical safety. It PyTorch with extreme simplicity and overall numerical safety. It
does so by employing a whitelist / blacklist model: does so by employing a whitelist / blacklist model:
- Any function on the whitelist casts its input arguments to - Any function on the whitelist casts its input arguments to
fp16. These are functions like `torch.conv2d` that can take fp16. These are functions like `torch.conv2d` that can take
......
...@@ -4,8 +4,8 @@ from ._amp_state import _amp_state ...@@ -4,8 +4,8 @@ from ._amp_state import _amp_state
class Properties(object): class Properties(object):
""" """
The purpose of this class is twofold: to establish a set of default properties, This class has two purposes: to establish a set of default properties,
and to route setting of these attributes through __setattr__ so that (in theory) and to route setting of these attributes through __setattr__ so that (in theory)
they can be checked for consistency with other existing args. they can be checked for consistency with other existing args.
""" """
...@@ -18,22 +18,26 @@ class Properties(object): ...@@ -18,22 +18,26 @@ class Properties(object):
"keep_batchnorm_fp32" : None, "keep_batchnorm_fp32" : None,
"master_weights" : False, "master_weights" : False,
"loss_scale" : 1.0, "loss_scale" : 1.0,
"fused_optimizer" : False, # Reserved for future functionality
"enable_ddp_interop" : False} # "fused_optimizer" : False,
# "enable_ddp_interop" : False,
}
""" """
This function allows updating several options at a time without routing through This function allows updating several options at a time without routing through
__setattr__ checks, to avoid "you can't get there from here" scenarios. __setattr__ checks, to avoid "you can't get there from here" scenarios.
Currently not intended to be exposed; users are expected to select an opt_level
and apply consistent modifications.
""" """
def update_options_dict(new_options): def _update_options_dict(new_options):
for k, v in new_options: for k, v in new_options:
if k in self.options: if k in self.options:
self.options[k] = v self.options[k] = v
else: else:
raise ValueError("Tried to set unexpected option {}".format(k)) raise ValueError("Tried to set unexpected option {}".format(k))
""" """
The members of options are not direct attributes of self, so __getattr__ is ok. The members of "options" are not direct attributes of self, so access attempts
This borrows from the logic in torch.nn.Module. will roll down to __getattr__. This borrows from the logic in torch.nn.Module.
""" """
def __getattr__(self, name): def __getattr__(self, name):
if "options" in self.__dict__: if "options" in self.__dict__:
...@@ -42,12 +46,28 @@ class Properties(object): ...@@ -42,12 +46,28 @@ class Properties(object):
return options[name] return options[name]
raise AttributeError("'{}' object has no attribute '{}'".format( raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, name)) type(self).__name__, name))
def __setattr__(self, name, value): def __setattr__(self, name, value):
if "options" in self.__dict__: if "options" in self.__dict__:
if name in self.options: if name in self.options:
print("setting {} {}".format(name, value)) # print("setting {} {}".format(name, value))
self.options[name] = value if name == "loss_scale":
if value == "dynamic":
self.options[name] = value
else:
self.options[name] = float(value)
elif name == "keep_batchnorm_fp32":
if value == "False":
self.options[name] = False
elif value == "True":
self.options[name] = True
else:
assert (value is True or value is False or value is None),\
"keep_batchnorm_fp32 must be a boolean, the string 'True' or 'False', "\
"or None"
self.options[name] = value
else:
self.options[name] = value
else: else:
super(Properties, self).__setattr__(name, value) super(Properties, self).__setattr__(name, value)
...@@ -71,8 +91,8 @@ class O3: ...@@ -71,8 +91,8 @@ class O3:
properties.keep_batchnorm_fp32 = False properties.keep_batchnorm_fp32 = False
properties.master_weights = False properties.master_weights = False
properties.loss_scale = 1.0 properties.loss_scale = 1.0
properties.fused_optimizer = False # properties.fused_optimizer = False
properties.enable_ddp_interop = False # properties.enable_ddp_interop = False
return properties # modified in place so this isn't really necessary return properties # modified in place so this isn't really necessary
...@@ -94,8 +114,8 @@ class O2: ...@@ -94,8 +114,8 @@ class O2:
properties.keep_batchnorm_fp32 = True properties.keep_batchnorm_fp32 = True
properties.master_weights = True properties.master_weights = True
properties.loss_scale = "dynamic" properties.loss_scale = "dynamic"
properties.fused_optimizer = False # properties.fused_optimizer = False
properties.enable_ddp_interop = False # properties.enable_ddp_interop = False
return properties # modified in place so this isn't really necessary return properties # modified in place so this isn't really necessary
...@@ -113,11 +133,11 @@ class O1: ...@@ -113,11 +133,11 @@ class O1:
properties.opt_level = "O1" properties.opt_level = "O1"
properties.cast_model_type = False properties.cast_model_type = False
properties.patch_torch_functions = True properties.patch_torch_functions = True
properties.keep_batchnorm_fp32 = False properties.keep_batchnorm_fp32 = None
properties.master_weights = False properties.master_weights = False
properties.loss_scale = "dynamic" properties.loss_scale = "dynamic"
properties.fused_optimizer = False # properties.fused_optimizer = False
properties.enable_ddp_interop = False # properties.enable_ddp_interop = False
return properties # modified in place so this isn't really necessary return properties # modified in place so this isn't really necessary
...@@ -132,11 +152,11 @@ class O0: ...@@ -132,11 +152,11 @@ class O0:
properties.opt_level = "O0" properties.opt_level = "O0"
properties.cast_model_type = torch.float32 properties.cast_model_type = torch.float32
properties.patch_torch_functions = False properties.patch_torch_functions = False
properties.keep_batchnorm_fp32 = False properties.keep_batchnorm_fp32 = None
properties.master_weights = False properties.master_weights = False
properties.loss_scale = 1.0 properties.loss_scale = 1.0
properties.fused_optimizer = False # properties.fused_optimizer = False
properties.enable_ddp_interop = False # properties.enable_ddp_interop = False
return properties # modified in place so this isn't really necessary return properties # modified in place so this isn't really necessary
...@@ -170,8 +190,7 @@ def initialize(models, optimizers, enabled=True, opt_level=None, **kwargs): ...@@ -170,8 +190,7 @@ def initialize(models, optimizers, enabled=True, opt_level=None, **kwargs):
patch_torch_functions=None, patch_torch_functions=None,
keep_batchnorm_fp32=None, keep_batchnorm_fp32=None,
master_weights=None, master_weights=None,
loss_scale=None, loss_scale=None,)
enable_ddp_interop=None):
""" """
if not enabled: if not enabled:
_amp_state.opt_properties = Properties() _amp_state.opt_properties = Properties()
......
...@@ -15,6 +15,19 @@ def scale_loss(loss, ...@@ -15,6 +15,19 @@ def scale_loss(loss,
optimizer, optimizer,
model=None, model=None,
delay_unscale=False): delay_unscale=False):
"""
On context manager entrance, scale the loss in a way consistent with the current loss scale.
Yield the loss
On context manager exit (if ``delay_unscale=False``), unscale the gradients so that
``optimizer.step()`` can be called.
.. note::
If Amp is using explicit FP32 master params (which is the default for ``opt_level=O2``, and
can also be manually enabled by supplying ``master_weights=True`` to ``amp.initialize``)
any FP16 gradients are copied to FP32 master gradients before being unscaled. ``optimizer.step()``
will then apply the unscaled master gradients to the master params.
"""
if not _amp_state.opt_properties.enabled: if not _amp_state.opt_properties.enabled:
yield loss yield loss
return return
...@@ -57,7 +70,8 @@ def scale_loss(loss, ...@@ -57,7 +70,8 @@ def scale_loss(loss,
optimizer_step = optimizer.step optimizer_step = optimizer.step
def skip_step(): def skip_step():
logger = logging.getLogger('apex.amp') logger = logging.getLogger('apex.amp')
logger.warning('Gradient overflow, skipping update') logger.warning("Gradient overflow. Skipping step, reducing " +
"loss scale to {}".format(optimizer.loss_scaler.loss_scale()))
optimizer.step = optimizer_step optimizer.step = optimizer_step
optimizer.step = skip_step optimizer.step = skip_step
......
...@@ -2,6 +2,7 @@ import torch ...@@ -2,6 +2,7 @@ import torch
import logging import logging
from ..multi_tensor_apply import multi_tensor_applier from ..multi_tensor_apply import multi_tensor_applier
from ._amp_state import _amp_state from ._amp_state import _amp_state
from itertools import product
# from apex_C import scale_check_overflow # from apex_C import scale_check_overflow
...@@ -90,47 +91,60 @@ class LossScaler(object): ...@@ -90,47 +91,60 @@ class LossScaler(object):
model_master_params = [(model, master) for model, master model_master_params = [(model, master) for model, master
in zip(model_params, master_params)] # some of these may be None in zip(model_params, master_params)] # some of these may be None
# Sync the None-ness of model and master params.
all_same = True
for model, master in model_master_params:
if model.grad is None and master.grad is not None:
master.grad = None
if model.grad is not None and master.grad is None:
master.grad = torch.empty_like(master)
if model.grad is not master.grad:
all_same = False
model_grads = [mmp[0].grad.data for mmp in model_master_params if mmp[0].grad is not None]
master_grads = [mmp[1].grad.data for mmp in model_master_params if mmp[1].grad is not None]
if LossScaler.has_fused_kernel: if LossScaler.has_fused_kernel:
# The master grads should never be fp16. The kernel can't handle that, so bail out src_dst_pairs = {torch.float16 : {torch.float16 : [[],[]], torch.float32 : [[],[]]},
# and print a warning. This is overly conservative, and maybe we do want to enable torch.float32 : {torch.float16 : [[],[]], torch.float32 : [[],[]]}}
# fast downscaling of fp16 grads eventually.
if not LossScaler.warned_unscaling_non_fp32_grad: for model, master in model_master_params:
if any(grad.type() != "torch.cuda.FloatTensor" for grad in master_grads): # Sync the None-ness of model and master params
logger = logging.getLogger("apex.amp") if model.grad is None and master.grad is not None:
logger.warning( master.grad = None
"Attempting to unscale grads that are not FP32. " if model.grad is not None and master.grad is None:
"Unscaling non-fp32 grads may indicate an error. " master.grad = torch.empty_like(master)
"When using Amp, you don't need to call .half() on your model.")
# Warning: setting this to True unconditionally allows the possibility of an escape if model.grad is not None:
# if never-before-seen non-fp32 grads are created in some later iteration. if model.grad is master.grad and scale == 1.0 and not self.dynamic:
LossScaler.warned_unscaling_non_fp32_grad = True continue
# handle case of opt_level O1 and loss_scale 1.0. There's also some else:
# special-cased yields in scale_loss to potentially short-circuit earlier. src_dst_pairs[model.dtype][master.dtype][0].append(model.grad.data)
# TODO: Profile and find out if all the O(N) list processing in unscale() src_dst_pairs[model.dtype][master.dtype][1].append(master.grad.data)
# is a bottleneck.
assert len(src_dst_pairs[torch.float32][torch.float16][0]) == 0, "The loss scaler is "\
"being asked to unscale FP32 model gradients into FP16 master gradients. This is "\
"almost certainly an error."
for src, dst in product((torch.float16, torch.float32),
(torch.float16, torch.float32)):
if len(src_dst_pairs[src][dst][0]) > 0:
if not LossScaler.warned_unscaling_non_fp32_grad and dst is torch.float16:
print("Warning: unscaling grads that are not FP32. "
"Unscaling non-fp32 grads may indicate an error. "
"When using Amp, you don't need to call .half() on your model.")
# Setting this to True unconditionally allows the possibility of an escape
# if never-before-seen non-fp32 grads are created in some later iteration.
LossScaler.warned_unscaling_non_fp32_grad = True
multi_tensor_applier(
LossScaler.multi_tensor_scale_cuda,
self._overflow_buf,
src_dst_pairs[src][dst],
1./scale)
else:
# Sync the None-ness of model and master params.
all_same = True
for model, master in model_master_params:
if model.grad is None and master.grad is not None:
master.grad = None
if model.grad is not None and master.grad is None:
master.grad = torch.empty_like(master)
if model.grad is not master.grad:
all_same = False
if scale == 1.0 and all_same and not self.dynamic: if scale == 1.0 and all_same and not self.dynamic:
# torch.cuda.nvtx.range_pop()
return return
else:
multi_tensor_applier( model_grads = [mmp[0].grad.data for mmp in model_master_params if mmp[0].grad is not None]
LossScaler.multi_tensor_scale_cuda, master_grads = [mmp[1].grad.data for mmp in model_master_params if mmp[1].grad is not None]
self._overflow_buf,
[model_grads, master_grads],
1./scale)
else:
self.unscale_grads_python(model_grads, master_grads, scale) self.unscale_grads_python(model_grads, master_grads, scale)
# If the fused kernel is available, we only need one D2H memcopy and sync. # If the fused kernel is available, we only need one D2H memcopy and sync.
......
...@@ -5,6 +5,7 @@ from torch.nn.parameter import Parameter ...@@ -5,6 +5,7 @@ from torch.nn.parameter import Parameter
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from ..amp.scaler import LossScaler from ..amp.scaler import LossScaler
from ..multi_tensor_apply import multi_tensor_applier
from .fp16util import model_grads_to_master_grads, master_params_to_model_params, clip_grad_norm from .fp16util import model_grads_to_master_grads, master_params_to_model_params, clip_grad_norm
# TODO: Update overflow check + downscale to use Carl's fused kernel. # TODO: Update overflow check + downscale to use Carl's fused kernel.
...@@ -186,6 +187,12 @@ class FP16_Optimizer(object): ...@@ -186,6 +187,12 @@ class FP16_Optimizer(object):
self.clip_grad_norm = clip_grad_norm self.clip_grad_norm = clip_grad_norm
# TODO: Centralize exposure and import error checking for the C backend.
if multi_tensor_applier.available:
import amp_C
self.multi_tensor_scale = amp_C.multi_tensor_scale
self._dummy_overflow_buf = torch.cuda.IntTensor([0]);
def maybe_print(self, msg): def maybe_print(self, msg):
if self.verbose: if self.verbose:
print(msg) print(msg)
...@@ -237,8 +244,16 @@ class FP16_Optimizer(object): ...@@ -237,8 +244,16 @@ class FP16_Optimizer(object):
# self.loss_scaler.update_scale(has_overflow) # self.loss_scaler.update_scale(has_overflow)
def _master_params_to_model_params(self): def _master_params_to_model_params(self):
for fp16_group, fp32_from_fp16_group in zip(self.fp16_groups, self.fp32_from_fp16_groups): if multi_tensor_applier.available:
master_params_to_model_params(fp16_group, fp32_from_fp16_group) if len(self.all_fp16_params) > 0:
multi_tensor_applier(
self.multi_tensor_scale,
self._dummy_overflow_buf,
[self.all_fp32_from_fp16_params, self.all_fp16_params],
1.0)
else:
for fp16_group, fp32_from_fp16_group in zip(self.fp16_groups, self.fp32_from_fp16_groups):
master_params_to_model_params(fp16_group, fp32_from_fp16_group)
# To consider: Integrate distributed with this wrapper by registering a hook on each variable # To consider: Integrate distributed with this wrapper by registering a hook on each variable
# that does the overflow check, gradient copy + downscale, and fp32 allreduce in a different stream. # that does the overflow check, gradient copy + downscale, and fp32 allreduce in a different stream.
...@@ -386,8 +401,8 @@ class FP16_Optimizer(object): ...@@ -386,8 +401,8 @@ class FP16_Optimizer(object):
# self._update_scale(self.overflow) # self._update_scale(self.overflow)
if self.overflow: if self.overflow:
print("OVERFLOW! Skipping step, reducing loss scale to {}".format( print("Gradient overflow. Skipping step, reducing " +
self.loss_scaler.loss_scale())) "loss scale to {}".format(self.loss_scaler.loss_scale()))
return return
if closure is not None: if closure is not None:
......
...@@ -31,7 +31,7 @@ __global__ void multi_tensor_apply_kernel( ...@@ -31,7 +31,7 @@ __global__ void multi_tensor_apply_kernel(
volatile int* noop_flag, volatile int* noop_flag,
T tl, T tl,
U callable, U callable,
ArgTypes... args) // in_t** in, float** out, float scale ArgTypes... args)
{ {
// Hand the chunk information to the user-supplied functor to process however it likes. // Hand the chunk information to the user-supplied functor to process however it likes.
callable(chunk_size, noop_flag, tl, args...); callable(chunk_size, noop_flag, tl, args...);
......
...@@ -4,8 +4,41 @@ ...@@ -4,8 +4,41 @@
apex.amp apex.amp
=================================== ===================================
Amp (Automatic Mixed Precision) is a tool designed for ease of use and maximum safety in FP16 training. All potentially unsafe ops are performed in FP32 under the hood, while safe ops are performed using faster, Tensor Core-friendly FP16 math. Amp also automatically implements dynamic loss scaling. This page documents Amp (Automatic Mixed Precision) 1.0, a tool to enable Tensor Core-accelerated
training in only 3 lines of Python.
The intention of Amp is to be the "on-ramp" to easy FP16 training: achieve all the numerical stability of full FP32 training, with most of the performance benefits of full FP16 training. Amp allows users to easily experiment with different pure and mixed precision modes, including
pure FP16 training and pure FP32 training. Commonly-used default modes are chosen by
selecting an "optimization level" or ``opt_level``; each ``opt_level`` establishes a set of
properties that govern Amp's implementation of pure or mixed precision training.
Finer-grained control of how a given ``opt_level`` behaves can be achieved by also passing values for
particular properties directly to ``amp.initialize``. These manually specified values will
override the defaults established by the ``opt_level``. If you attempt to override a property
that does not make sense for the current ``opt_level``, Amp will raise an error with an explanation.
Currently, complete API documentation resides on the Github page: https://github.com/NVIDIA/apex/tree/master/apex/amp. Users **should not** manually cast their model or data to ``.half()``, regardless of what ``opt_level``
or properties are chosen. Amp intends that users start with an existing default (FP32) script,
add the three lines corresponding to the Amp 1.0 API, and begin training with mixed precision.
Amp can also be disabled, in which case the original script will behave exactly as it used to.
In this way, there's no risk adhering to the Amp 1.0 API, and a lot of potential performance benefit.
Example::
model = torch.nn.Linear(D_in, D_out).cuda().half()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
model, optimizer = amp.initialize(model, optimizer, opt_level="O1")
...
# loss.backward() becomes:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
...
.. automodule:: apex.amp
.. currentmodule:: apex.amp
.. autofunction:: initialize
.. autofunction:: scale_loss
Legacy documentation for the old "Amp" API (equivalent to ``opt_level="O1"`` in the new Amp 1.0 API) can be found on the Github README: https://github.com/NVIDIA/apex/tree/master/apex/amp.
...@@ -28,7 +28,7 @@ Installation requires CUDA 9 or later, PyTorch 0.4 or later, and Python 3. Insta ...@@ -28,7 +28,7 @@ Installation requires CUDA 9 or later, PyTorch 0.4 or later, and Python 3. Insta
.. toctree:: .. toctree::
:maxdepth: 1 :maxdepth: 1
:caption: FP16/Mixed Precision Utilities :caption: Legacy mixed precision utilities
fp16_utils fp16_utils
......
import argparse
import torch
parser = argparse.ArgumentParser(description='Compare')
parser.add_argument('--opt-level', type=str)
parser.add_argument('--keep-batchnorm-fp32', type=str, default=None)
parser.add_argument('--loss-scale', type=str, default=None)
args = parser.parse_args()
base_file = str(args.opt_level) + "_" + str(args.loss_scale) + "_" + str(args.keep_batchnorm_fp32)
file_e = "True_" + base_file
file_p = "False_" + base_file
dict_e = torch.load(file_e)
dict_p = torch.load(file_p)
torch.set_printoptions(precision=10)
print(file_e)
print(file_p)
for n, (i_e, i_p) in enumerate(zip(dict_e["Iteration"], dict_p["Iteration"])):
assert i_e == i_p, "i_e = {}, i_p = {}".format(i_e, i_p)
loss_e = dict_e["Loss"][n]
loss_p = dict_p["Loss"][n]
assert loss_e == loss_p, "Iteration {}, loss_e = {}, loss_p = {}".format(i_e, loss_e, loss_p)
print("{:4} {:15.10f} {:15.10f} {:15.10f} {:15.10f}".format(
i_e,
loss_e,
loss_p,
dict_e["Speed"][n],
dict_p["Speed"][n]))
import argparse
import os
import shutil
import time
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import numpy as np
try:
from apex.parallel import DistributedDataParallel as DDP
from apex.fp16_utils import *
from apex import amp
from apex.multi_tensor_apply import multi_tensor_applier
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to run this example.")
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet18',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size per process (default: 256)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='Initial learning rate. Will be scaled by <global batch size>/256: args.lr = args.lr*float(args.batch_size*args.world_size)/256. A warmup schedule will also be applied over the first 5 epochs.')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--prof', dest='prof', action='store_true',
help='Only run 10 iterations for profiling.')
parser.add_argument('--deterministic', action='store_true')
parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument('--sync_bn', action='store_true',
help='enabling apex sync BN.')
parser.add_argument('--has-ext', action='store_true')
parser.add_argument('--opt-level', type=str)
parser.add_argument('--keep-batchnorm-fp32', type=str, default=None)
parser.add_argument('--loss-scale', type=str, default=None)
parser.add_argument('--prints-to-process', type=int, default=10)
cudnn.benchmark = True
def fast_collate(batch):
imgs = [img[0] for img in batch]
targets = torch.tensor([target[1] for target in batch], dtype=torch.int64)
w = imgs[0].size[0]
h = imgs[0].size[1]
tensor = torch.zeros( (len(imgs), 3, h, w), dtype=torch.uint8 )
for i, img in enumerate(imgs):
nump_array = np.asarray(img, dtype=np.uint8)
tens = torch.from_numpy(nump_array)
if(nump_array.ndim < 3):
nump_array = np.expand_dims(nump_array, axis=-1)
nump_array = np.rollaxis(nump_array, 2)
tensor[i] += torch.from_numpy(nump_array)
return tensor, targets
best_prec1 = 0
args = parser.parse_args()
# Let multi_tensor_applier be the canary in the coalmine
# that verifies if the backend is what we think it is
assert multi_tensor_applier.available == args.has_ext
print("opt_level = {}".format(args.opt_level))
print("keep_batchnorm_fp32 = {}".format(args.keep_batchnorm_fp32), type(args.keep_batchnorm_fp32))
print("loss_scale = {}".format(args.loss_scale), type(args.loss_scale))
if args.deterministic:
cudnn.benchmark = False
cudnn.deterministic = True
torch.manual_seed(args.local_rank)
torch.set_printoptions(precision=10)
def main():
global best_prec1, args
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
args.gpu = 0
args.world_size = 1
if args.distributed:
args.gpu = args.local_rank % torch.cuda.device_count()
torch.cuda.set_device(args.gpu)
torch.distributed.init_process_group(backend='nccl',
init_method='env://')
args.world_size = torch.distributed.get_world_size()
assert torch.backends.cudnn.enabled, "Amp requires cudnn backend to be enabled."
# create model
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
if args.sync_bn:
import apex
print("using apex synced BN")
model = apex.parallel.convert_syncbn_model(model)
model = model.cuda()
# Scale learning rate based on global batch size
args.lr = args.lr*float(args.batch_size*args.world_size)/256.
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
model, optimizer = amp.initialize(
model, optimizer,
# enabled=False,
opt_level=args.opt_level,
keep_batchnorm_fp32=args.keep_batchnorm_fp32,
loss_scale=args.loss_scale
)
if args.distributed:
# By default, apex.parallel.DistributedDataParallel overlaps communication with
# computation in the backward pass.
# model = DDP(model)
# delay_allreduce delays all communication to the end of the backward pass.
model = DDP(model, delay_allreduce=True)
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
# Optionally resume from a checkpoint
if args.resume:
# Use a local scope to avoid dangling references
def resume():
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume, map_location = lambda storage, loc: storage.cuda(args.gpu))
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
resume()
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
if(args.arch == "inception_v3"):
crop_size = 299
val_size = 320 # I chose this value arbitrarily, we can adjust.
else:
crop_size = 224
val_size = 256
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(crop_size),
transforms.RandomHorizontalFlip(),
# transforms.ToTensor(), Too slow
# normalize,
]))
val_dataset = datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(val_size),
transforms.CenterCrop(crop_size),
]))
train_sampler = None
val_sampler = None
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler, collate_fn=fast_collate)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True,
sampler=val_sampler,
collate_fn=fast_collate)
if args.evaluate:
validate(val_loader, model, criterion)
return
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch)
if args.prof:
break
# evaluate on validation set
prec1 = validate(val_loader, model, criterion)
# remember best prec@1 and save checkpoint
if args.local_rank == 0:
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer' : optimizer.state_dict(),
}, is_best)
class data_prefetcher():
def __init__(self, loader):
self.loader = iter(loader)
self.stream = torch.cuda.Stream()
self.mean = torch.tensor([0.485 * 255, 0.456 * 255, 0.406 * 255]).cuda().view(1,3,1,1)
self.std = torch.tensor([0.229 * 255, 0.224 * 255, 0.225 * 255]).cuda().view(1,3,1,1)
# With Amp, it isn't necessary to manually convert data to half.
# if args.fp16:
# self.mean = self.mean.half()
# self.std = self.std.half()
self.preload()
def preload(self):
try:
self.next_input, self.next_target = next(self.loader)
except StopIteration:
self.next_input = None
self.next_target = None
return
with torch.cuda.stream(self.stream):
self.next_input = self.next_input.cuda(non_blocking=True)
self.next_target = self.next_target.cuda(non_blocking=True)
# With Amp, it isn't necessary to manually convert data to half.
# if args.fp16:
# self.next_input = self.next_input.half()
# else:
self.next_input = self.next_input.float()
self.next_input = self.next_input.sub_(self.mean).div_(self.std)
def next(self):
torch.cuda.current_stream().wait_stream(self.stream)
input = self.next_input
target = self.next_target
self.preload()
return input, target
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
run_info_dict = {"Iteration" : [],
"Loss" : [],
"Speed" : []}
prefetcher = data_prefetcher(train_loader)
input, target = prefetcher.next()
i = -1
while input is not None:
i += 1
# No learning rate warmup for this test, to expose bitwise inaccuracies more quickly
# adjust_learning_rate(optimizer, epoch, i, len(train_loader))
if args.prof:
if i > 10:
break
# measure data loading time
data_time.update(time.time() - end)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
if args.distributed:
reduced_loss = reduce_tensor(loss.data)
prec1 = reduce_tensor(prec1)
prec5 = reduce_tensor(prec5)
else:
reduced_loss = loss.data
losses.update(to_python_float(reduced_loss), input.size(0))
top1.update(to_python_float(prec1), input.size(0))
top5.update(to_python_float(prec5), input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
# for param in model.parameters():
# print(param.data.double().sum().item(), param.grad.data.double().sum().item())
# torch.cuda.synchronize()
torch.cuda.nvtx.range_push("step")
optimizer.step()
torch.cuda.nvtx.range_pop()
torch.cuda.synchronize()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
input, target = prefetcher.next()
if args.local_rank == 0 and i % args.print_freq == 0 and i > 1:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Speed {3:.3f} ({4:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.10f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader),
args.world_size * args.batch_size / batch_time.val,
args.world_size * args.batch_size / batch_time.avg,
batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
run_info_dict["Iteration"].append(i)
run_info_dict["Loss"].append(losses.val)
run_info_dict["Speed"].append(args.world_size * args.batch_size / batch_time.val)
if len(run_info_dict["Loss"]) == args.prints_to_process:
torch.save(run_info_dict,
str(args.has_ext) + "_" + str(args.opt_level) + "_" +
str(args.loss_scale) + "_" + str(args.keep_batchnorm_fp32))
quit()
def validate(val_loader, model, criterion):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
prefetcher = data_prefetcher(val_loader)
input, target = prefetcher.next()
i = -1
while input is not None:
i += 1
# compute output
with torch.no_grad():
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
if args.distributed:
reduced_loss = reduce_tensor(loss.data)
prec1 = reduce_tensor(prec1)
prec5 = reduce_tensor(prec5)
else:
reduced_loss = loss.data
losses.update(to_python_float(reduced_loss), input.size(0))
top1.update(to_python_float(prec1), input.size(0))
top5.update(to_python_float(prec5), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if args.local_rank == 0 and i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Speed {2:.3f} ({3:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader),
args.world_size * args.batch_size / batch_time.val,
args.world_size * args.batch_size / batch_time.avg,
batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
input, target = prefetcher.next()
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch, step, len_epoch):
"""LR schedule that should yield 76% converged accuracy with batch size 256"""
factor = epoch // 30
if epoch >= 80:
factor = factor + 1
lr = args.lr*(0.1**factor)
"""Warmup"""
if epoch < 5:
lr = lr*float(1 + step + epoch*len_epoch)/(5.*len_epoch)
# if(args.local_rank == 0):
# print("epoch = {}, step = {}, lr = {}".format(epoch, step, lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def reduce_tensor(tensor):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.reduce_op.SUM)
rt /= args.world_size
return rt
if __name__ == '__main__':
main()
#!/bin/bash
DATADIR="/home/mcarilli/Desktop/pt18data/apex/examples/imagenet/bare_metal_train_val/"
BASE_CMD="python main_amp.py -a resnet50 --b 128 --workers 4 --deterministic --prints-to-process 5"
print_banner() {
printf "\n\n\n\e[30m\e[42m$1\e[0m\n\n\n\n"
}
keep_batchnorms=(
""
"--keep-batchnorm-fp32 True"
"--keep-batchnorm-fp32 False"
)
loss_scales=(
""
"--loss-scale 1.0"
"--loss-scale 128.0"
"--loss-scale dynamic"
)
opt_levels=(
"O0"
"O1"
"O2"
"O3"
)
rm True*
rm False*
set -e
pushd ../../..
python setup.py install --cuda_ext --cpp_ext
popd
for opt_level in "${opt_levels[@]}"
do
for loss_scale in "${loss_scales[@]}"
do
for keep_batchnorm in "${keep_batchnorms[@]}"
do
print_banner "$BASE_CMD --opt-level $opt_level ${loss_scale} ${keep_batchnorm} --has-ext $DATADIR"
set -x
$BASE_CMD --opt-level $opt_level ${loss_scale} ${keep_batchnorm} --has-ext $DATADIR
set +x
done
done
done
pushd ../../..
python setup.py install
popd
for opt_level in "${opt_levels[@]}"
do
for loss_scale in "${loss_scales[@]}"
do
for keep_batchnorm in "${keep_batchnorms[@]}"
do
print_banner "$BASE_CMD --opt-level $opt_level ${loss_scale} ${keep_batchnorm} $DATADIR"
set -x
$BASE_CMD --opt-level $opt_level ${loss_scale} ${keep_batchnorm} $DATADIR
set +x
done
done
done
for opt_level in "${opt_levels[@]}"
do
for loss_scale in "${loss_scales[@]}"
do
for keep_batchnorm in "${keep_batchnorms[@]}"
do
set -x
python compare.py --opt-level $opt_level ${loss_scale} ${keep_batchnorm}
set +x
done
done
done
pushd ../../..
python setup.py install --cuda_ext --cpp_ext
popd
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment