Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
vision
Commits
e08c9e31
Unverified
Commit
e08c9e31
authored
Oct 17, 2021
by
Dmytro
Committed by
GitHub
Oct 17, 2021
Browse files
Replaced all 'no_grad()' instances with 'inference_mode()' (#4629)
parent
fba4f42e
Changes
10
Hide whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
10 additions
and
10 deletions
+10
-10
references/classification/train.py
references/classification/train.py
+1
-1
references/classification/train_quantization.py
references/classification/train_quantization.py
+1
-1
references/classification/utils.py
references/classification/utils.py
+1
-1
references/detection/engine.py
references/detection/engine.py
+1
-1
references/detection/utils.py
references/detection/utils.py
+1
-1
references/segmentation/train.py
references/segmentation/train.py
+1
-1
references/segmentation/utils.py
references/segmentation/utils.py
+1
-1
references/similarity/train.py
references/similarity/train.py
+1
-1
references/video_classification/train.py
references/video_classification/train.py
+1
-1
references/video_classification/utils.py
references/video_classification/utils.py
+1
-1
No files found.
references/classification/train.py
View file @
e08c9e31
...
...
@@ -57,7 +57,7 @@ def evaluate(model, criterion, data_loader, device, print_freq=100, log_suffix="
header
=
f
"Test:
{
log_suffix
}
"
num_processed_samples
=
0
with
torch
.
no_grad
():
with
torch
.
inference_mode
():
for
image
,
target
in
metric_logger
.
log_every
(
data_loader
,
print_freq
,
header
):
image
=
image
.
to
(
device
,
non_blocking
=
True
)
target
=
target
.
to
(
device
,
non_blocking
=
True
)
...
...
references/classification/train_quantization.py
View file @
e08c9e31
...
...
@@ -112,7 +112,7 @@ def main(args):
print
(
"Starting training for epoch"
,
epoch
)
train_one_epoch
(
model
,
criterion
,
optimizer
,
data_loader
,
device
,
epoch
,
args
.
print_freq
)
lr_scheduler
.
step
()
with
torch
.
no_grad
():
with
torch
.
inference_mode
():
if
epoch
>=
args
.
num_observer_update_epochs
:
print
(
"Disabling observer for subseq epochs, epoch = "
,
epoch
)
model
.
apply
(
torch
.
quantization
.
disable_observer
)
...
...
references/classification/utils.py
View file @
e08c9e31
...
...
@@ -181,7 +181,7 @@ class ExponentialMovingAverage(torch.optim.swa_utils.AveragedModel):
def
accuracy
(
output
,
target
,
topk
=
(
1
,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with
torch
.
no_grad
():
with
torch
.
inference_mode
():
maxk
=
max
(
topk
)
batch_size
=
target
.
size
(
0
)
if
target
.
ndim
==
2
:
...
...
references/detection/engine.py
View file @
e08c9e31
...
...
@@ -68,7 +68,7 @@ def _get_iou_types(model):
return
iou_types
@
torch
.
no_grad
()
@
torch
.
inference_mode
()
def
evaluate
(
model
,
data_loader
,
device
):
n_threads
=
torch
.
get_num_threads
()
# FIXME remove this and make paste_masks_in_image run on the GPU
...
...
references/detection/utils.py
View file @
e08c9e31
...
...
@@ -95,7 +95,7 @@ def reduce_dict(input_dict, average=True):
world_size
=
get_world_size
()
if
world_size
<
2
:
return
input_dict
with
torch
.
no_grad
():
with
torch
.
inference_mode
():
names
=
[]
values
=
[]
# sort the keys so that they are consistent across processes
...
...
references/segmentation/train.py
View file @
e08c9e31
...
...
@@ -49,7 +49,7 @@ def evaluate(model, data_loader, device, num_classes):
confmat
=
utils
.
ConfusionMatrix
(
num_classes
)
metric_logger
=
utils
.
MetricLogger
(
delimiter
=
" "
)
header
=
"Test:"
with
torch
.
no_grad
():
with
torch
.
inference_mode
():
for
image
,
target
in
metric_logger
.
log_every
(
data_loader
,
100
,
header
):
image
,
target
=
image
.
to
(
device
),
target
.
to
(
device
)
output
=
model
(
image
)
...
...
references/segmentation/utils.py
View file @
e08c9e31
...
...
@@ -76,7 +76,7 @@ class ConfusionMatrix(object):
n
=
self
.
num_classes
if
self
.
mat
is
None
:
self
.
mat
=
torch
.
zeros
((
n
,
n
),
dtype
=
torch
.
int64
,
device
=
a
.
device
)
with
torch
.
no_grad
():
with
torch
.
inference_mode
():
k
=
(
a
>=
0
)
&
(
a
<
n
)
inds
=
n
*
a
[
k
].
to
(
torch
.
int64
)
+
b
[
k
]
self
.
mat
+=
torch
.
bincount
(
inds
,
minlength
=
n
**
2
).
reshape
(
n
,
n
)
...
...
references/similarity/train.py
View file @
e08c9e31
...
...
@@ -51,7 +51,7 @@ def find_best_threshold(dists, targets, device):
return
best_thresh
,
accuracy
@
torch
.
no_grad
()
@
torch
.
inference_mode
()
def
evaluate
(
model
,
loader
,
device
):
model
.
eval
()
embeds
,
labels
=
[],
[]
...
...
references/video_classification/train.py
View file @
e08c9e31
...
...
@@ -52,7 +52,7 @@ def evaluate(model, criterion, data_loader, device):
model
.
eval
()
metric_logger
=
utils
.
MetricLogger
(
delimiter
=
" "
)
header
=
"Test:"
with
torch
.
no_grad
():
with
torch
.
inference_mode
():
for
video
,
target
in
metric_logger
.
log_every
(
data_loader
,
100
,
header
):
video
=
video
.
to
(
device
,
non_blocking
=
True
)
target
=
target
.
to
(
device
,
non_blocking
=
True
)
...
...
references/video_classification/utils.py
View file @
e08c9e31
...
...
@@ -159,7 +159,7 @@ class MetricLogger(object):
def
accuracy
(
output
,
target
,
topk
=
(
1
,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with
torch
.
no_grad
():
with
torch
.
inference_mode
():
maxk
=
max
(
topk
)
batch_size
=
target
.
size
(
0
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment