Commit 8d6665f2 authored by Myle Ott's avatar Myle Ott
Browse files

Warn when using FP16 on pre-Volta GPUs

parent e7422192
...@@ -42,6 +42,9 @@ def main(args): ...@@ -42,6 +42,9 @@ def main(args):
# Build trainer # Build trainer
if args.fp16: if args.fp16:
if torch.cuda.get_device_capability(0)[0] < 7:
print('| WARNING: your device does NOT support faster training with --fp16,'
' please switch to FP32 which is likely to be faster')
trainer = FP16Trainer(args, task, model, criterion) trainer = FP16Trainer(args, task, model, criterion)
else: else:
if torch.cuda.get_device_capability(0)[0] >= 7: if torch.cuda.get_device_capability(0)[0] >= 7:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment