Unverified Commit e08c9e31 authored by Dmytro's avatar Dmytro Committed by GitHub
Browse files

Replaced all 'no_grad()' instances with 'inference_mode()' (#4629)

parent fba4f42e
......@@ -57,7 +57,7 @@ def evaluate(model, criterion, data_loader, device, print_freq=100, log_suffix="
header = f"Test: {log_suffix}"
num_processed_samples = 0
with torch.no_grad():
with torch.inference_mode():
for image, target in metric_logger.log_every(data_loader, print_freq, header):
image = image.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
......
......@@ -112,7 +112,7 @@ def main(args):
print("Starting training for epoch", epoch)
train_one_epoch(model, criterion, optimizer, data_loader, device, epoch, args.print_freq)
lr_scheduler.step()
with torch.no_grad():
with torch.inference_mode():
if epoch >= args.num_observer_update_epochs:
print("Disabling observer for subseq epochs, epoch = ", epoch)
model.apply(torch.quantization.disable_observer)
......
......@@ -181,7 +181,7 @@ class ExponentialMovingAverage(torch.optim.swa_utils.AveragedModel):
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
with torch.inference_mode():
maxk = max(topk)
batch_size = target.size(0)
if target.ndim == 2:
......
......@@ -68,7 +68,7 @@ def _get_iou_types(model):
return iou_types
@torch.no_grad()
@torch.inference_mode()
def evaluate(model, data_loader, device):
n_threads = torch.get_num_threads()
# FIXME remove this and make paste_masks_in_image run on the GPU
......
......@@ -95,7 +95,7 @@ def reduce_dict(input_dict, average=True):
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
with torch.inference_mode():
names = []
values = []
# sort the keys so that they are consistent across processes
......
......@@ -49,7 +49,7 @@ def evaluate(model, data_loader, device, num_classes):
confmat = utils.ConfusionMatrix(num_classes)
metric_logger = utils.MetricLogger(delimiter=" ")
header = "Test:"
with torch.no_grad():
with torch.inference_mode():
for image, target in metric_logger.log_every(data_loader, 100, header):
image, target = image.to(device), target.to(device)
output = model(image)
......
......@@ -76,7 +76,7 @@ class ConfusionMatrix(object):
n = self.num_classes
if self.mat is None:
self.mat = torch.zeros((n, n), dtype=torch.int64, device=a.device)
with torch.no_grad():
with torch.inference_mode():
k = (a >= 0) & (a < n)
inds = n * a[k].to(torch.int64) + b[k]
self.mat += torch.bincount(inds, minlength=n ** 2).reshape(n, n)
......
......@@ -51,7 +51,7 @@ def find_best_threshold(dists, targets, device):
return best_thresh, accuracy
@torch.no_grad()
@torch.inference_mode()
def evaluate(model, loader, device):
model.eval()
embeds, labels = [], []
......
......@@ -52,7 +52,7 @@ def evaluate(model, criterion, data_loader, device):
model.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
header = "Test:"
with torch.no_grad():
with torch.inference_mode():
for video, target in metric_logger.log_every(data_loader, 100, header):
video = video.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
......
......@@ -159,7 +159,7 @@ class MetricLogger(object):
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
with torch.inference_mode():
maxk = max(topk)
batch_size = target.size(0)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment