Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
apex
Commits
03b0eeb8
Commit
03b0eeb8
authored
Feb 04, 2019
by
Michael Carilli
Browse files
Only warn once in LossScaler constructor
parent
a153c41a
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
13 additions
and
11 deletions
+13
-11
apex/amp/scaler.py
apex/amp/scaler.py
+13
-11
No files found.
apex/amp/scaler.py
View file @
03b0eeb8
...
...
@@ -31,13 +31,15 @@ class LossScaler(object):
try
:
import
amp_C
LossScaler
.
has_fused_kernel
=
True
LossScaler
.
scale_check_overflow
=
amp_C
.
scale_check_overflow
LossScaler
.
scale_check_overflow
_cuda
=
amp_C
.
scale_check_overflow
self
.
_overflow_buf
=
torch
.
cuda
.
ByteTensor
(
1024
,)
except
ImportError
as
err
:
print
(
"Warning: Amp fused downscale kernel is unavailable, possibly because apex "
"was installed without --cuda_ext. Using Python fallback. ImportError was: "
,
err
)
if
not
LossScaler
.
warned_no_fused_kernel
:
print
(
"Warning: Amp fused downscale kernel is unavailable, possibly because apex "
"was installed without --cuda_ext. Using Python fallback. ImportError was: "
,
err
)
LossScaler
.
has_fused_kernel
=
False
LossScaler
.
scale_check_overflow
=
scale_check_overflow_python
LossScaler
.
warned_no_fused_kernel
=
True
def
loss_scale
(
self
):
return
self
.
_loss_scale
...
...
@@ -49,19 +51,19 @@ class LossScaler(object):
for
p
in
iter_params
(
param_groups
):
if
p
.
grad
is
not
None
:
if
LossScaler
.
has_fused_kernel
and
p
.
grad
.
data
.
type
()
==
"torch.cuda.FloatTensor"
:
LossScaler
.
scale_check_overflow
(
p
.
grad
.
data
,
1.
/
scale
,
self
.
_overflow_buf
)
LossScaler
.
scale_check_overflow
_cuda
(
p
.
grad
.
data
,
1.
/
scale
,
self
.
_overflow_buf
)
else
:
if
p
.
grad
.
data
.
type
()
!=
"torch.cuda.FloatTensor"
and
not
LossScaler
.
warned_fp16_grad
:
if
(
p
.
grad
.
data
.
type
()
!=
"torch.cuda.FloatTensor"
and
not
LossScaler
.
warned_fp16_grad
)
:
logger
=
logging
.
getLogger
(
"apex.amp"
)
logger
.
warning
(
"Incoming grads are not fp32 (not master grads). "
"Downscaling non-fp32 grads may indicate an error. "
"When using Amp, you don't need to call .half() on your model."
)
LossScaler
.
warned_fp16_grad
=
True
self
.
_has_overflow
=
LossScaler
.
scale_check_overflow
(
p
.
grad
.
data
,
1.
/
scale
)
self
.
_has_overflow
=
scale_check_overflow
_python
(
p
.
grad
.
data
,
1.
/
scale
)
if
self
.
_has_overflow
:
break
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment