Unverified Commit 3c0c699f authored by Lahfa Samy's avatar Lahfa Samy Committed by GitHub
Browse files

Raise ValueError instead of asserts in src/transformers/benchmark/benchmark.py (#13951)

* Raise ValueError exception instead of assert

* Remove f unnecessary f-strings

* Remove unused f-strings
parent 91758e39
...@@ -111,7 +111,8 @@ class PyTorchBenchmark(Benchmark): ...@@ -111,7 +111,8 @@ class PyTorchBenchmark(Benchmark):
if self.args.fp16: if self.args.fp16:
logger.info("Running training in Mixed Precision...") logger.info("Running training in Mixed Precision...")
assert self.args.is_gpu, "Mixed precision is possible only for GPU." if not self.args.is_gpu:
raise ValueError("Mixed precision is possible only for GPU.")
# amp seems to have memory leaks so that memory usage # amp seems to have memory leaks so that memory usage
# is measured using .half() for now https://github.com/NVIDIA/apex/issues/439 # is measured using .half() for now https://github.com/NVIDIA/apex/issues/439
model.half() model.half()
...@@ -170,7 +171,8 @@ class PyTorchBenchmark(Benchmark): ...@@ -170,7 +171,8 @@ class PyTorchBenchmark(Benchmark):
if self.args.fp16: if self.args.fp16:
logger.info("Running training in Mixed Precision...") logger.info("Running training in Mixed Precision...")
assert self.args.is_gpu, "Mixed precision is possible only for GPU." if not self.args.is_gpu:
raise ValueError("Mixed precision is possible only for GPU.")
# amp seems to have memory leaks so that memory usage # amp seems to have memory leaks so that memory usage
# is measured using .half() for now https://github.com/NVIDIA/apex/issues/439 # is measured using .half() for now https://github.com/NVIDIA/apex/issues/439
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment