Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
9dc965bb
Unverified
Commit
9dc965bb
authored
Jul 17, 2023
by
Marc Sun
Committed by
GitHub
Jul 17, 2023
Browse files
deprecate no_cuda (#24863)
* deprecate no_cuda * style * remove doc * remove doc 2 * fix style
parent
0f4502d3
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
23 additions
and
9 deletions
+23
-9
src/transformers/training_args.py
src/transformers/training_args.py
+23
-9
No files found.
src/transformers/training_args.py
View file @
9dc965bb
...
@@ -297,8 +297,8 @@ class TrainingArguments:
...
@@ -297,8 +297,8 @@ class TrainingArguments:
This should not be activated when the different nodes use the same storage as the files will be saved with
This should not be activated when the different nodes use the same storage as the files will be saved with
the same names for each node.
the same names for each node.
no_cuda
(`bool`, *optional*, defaults to `False`):
use_cpu
(`bool`, *optional*, defaults to `False`):
Whether
t
o not use
CUDA even when it
i
s
available
or not
.
Whether o
r
not
to
use
cpu. If set to False, we will use cuda or mps device
i
f
available.
seed (`int`, *optional*, defaults to 42):
seed (`int`, *optional*, defaults to 42):
Random seed that will be set at the beginning of training. To ensure reproducibility across runs, use the
Random seed that will be set at the beginning of training. To ensure reproducibility across runs, use the
[`~Trainer.model_init`] function to instantiate the model if it has some randomly initialized parameters.
[`~Trainer.model_init`] function to instantiate the model if it has some randomly initialized parameters.
...
@@ -313,7 +313,7 @@ class TrainingArguments:
...
@@ -313,7 +313,7 @@ class TrainingArguments:
installation](https://github.com/intel/intel-extension-for-pytorch).
installation](https://github.com/intel/intel-extension-for-pytorch).
bf16 (`bool`, *optional*, defaults to `False`):
bf16 (`bool`, *optional*, defaults to `False`):
Whether to use bf16 16-bit (mixed) precision training instead of 32-bit training. Requires Ampere or higher
Whether to use bf16 16-bit (mixed) precision training instead of 32-bit training. Requires Ampere or higher
NVIDIA architecture or using CPU (
no_cuda
). This is an experimental API and it may change.
NVIDIA architecture or using CPU (
use_cpu
). This is an experimental API and it may change.
fp16 (`bool`, *optional*, defaults to `False`):
fp16 (`bool`, *optional*, defaults to `False`):
Whether to use fp16 16-bit (mixed) precision training instead of 32-bit training.
Whether to use fp16 16-bit (mixed) precision training instead of 32-bit training.
fp16_opt_level (`str`, *optional*, defaults to 'O1'):
fp16_opt_level (`str`, *optional*, defaults to 'O1'):
...
@@ -793,7 +793,14 @@ class TrainingArguments:
...
@@ -793,7 +793,14 @@ class TrainingArguments:
)
)
},
},
)
)
no_cuda
:
bool
=
field
(
default
=
False
,
metadata
=
{
"help"
:
"Do not use CUDA even when it is available"
})
no_cuda
:
bool
=
field
(
default
=
False
,
metadata
=
{
"help"
:
"This argument is deprecated. It will be removed in version 5.0 of 🤗 Transformers."
},
)
use_cpu
:
bool
=
field
(
default
=
False
,
metadata
=
{
"help"
:
" Whether or not to use cpu. If set to False, we will use cuda or mps device if available."
},
)
use_mps_device
:
bool
=
field
(
use_mps_device
:
bool
=
field
(
default
=
False
,
default
=
False
,
metadata
=
{
metadata
=
{
...
@@ -820,7 +827,7 @@ class TrainingArguments:
...
@@ -820,7 +827,7 @@ class TrainingArguments:
metadata
=
{
metadata
=
{
"help"
:
(
"help"
:
(
"Whether to use bf16 (mixed) precision instead of 32-bit. Requires Ampere or higher NVIDIA"
"Whether to use bf16 (mixed) precision instead of 32-bit. Requires Ampere or higher NVIDIA"
" architecture or using CPU (
no_cuda
). This is an experimental API and it may change."
" architecture or using CPU (
use_cpu
). This is an experimental API and it may change."
)
)
},
},
)
)
...
@@ -1211,6 +1218,13 @@ class TrainingArguments:
...
@@ -1211,6 +1218,13 @@ class TrainingArguments:
)
)
# Go back to the underlying string or we won't be able to instantiate `IntervalStrategy` on it.
# Go back to the underlying string or we won't be able to instantiate `IntervalStrategy` on it.
self
.
evaluation_strategy
=
self
.
evaluation_strategy
.
value
self
.
evaluation_strategy
=
self
.
evaluation_strategy
.
value
if
self
.
no_cuda
:
warnings
.
warn
(
"using `no_cuda` is deprecated and will be removed in version 5.0 of 🤗 Transformers. "
"Use `use_cpu` instead"
,
FutureWarning
,
)
self
.
use_cpu
=
self
.
no_cuda
self
.
evaluation_strategy
=
IntervalStrategy
(
self
.
evaluation_strategy
)
self
.
evaluation_strategy
=
IntervalStrategy
(
self
.
evaluation_strategy
)
self
.
logging_strategy
=
IntervalStrategy
(
self
.
logging_strategy
)
self
.
logging_strategy
=
IntervalStrategy
(
self
.
logging_strategy
)
...
@@ -1305,10 +1319,10 @@ class TrainingArguments:
...
@@ -1305,10 +1319,10 @@ class TrainingArguments:
self
.
half_precision_backend
=
self
.
fp16_backend
self
.
half_precision_backend
=
self
.
fp16_backend
if
self
.
bf16
or
self
.
bf16_full_eval
:
if
self
.
bf16
or
self
.
bf16_full_eval
:
if
self
.
no_cuda
and
not
is_torch_bf16_cpu_available
()
and
not
is_torch_tpu_available
():
if
self
.
use_cpu
and
not
is_torch_bf16_cpu_available
()
and
not
is_torch_tpu_available
():
# cpu
# cpu
raise
ValueError
(
"Your setup doesn't support bf16/(cpu, tpu, neuroncore). You need torch>=1.10"
)
raise
ValueError
(
"Your setup doesn't support bf16/(cpu, tpu, neuroncore). You need torch>=1.10"
)
elif
not
self
.
no_cuda
and
torch
.
cuda
.
is_available
()
and
not
is_torch_bf16_gpu_available
():
elif
not
self
.
use_cpu
and
torch
.
cuda
.
is_available
()
and
not
is_torch_bf16_gpu_available
():
# gpu
# gpu
raise
ValueError
(
raise
ValueError
(
"Your setup doesn't support bf16/gpu. You need torch>=1.10, using Ampere GPU with cuda>=11.0"
"Your setup doesn't support bf16/gpu. You need torch>=1.10, using Ampere GPU with cuda>=11.0"
...
@@ -1702,7 +1716,7 @@ class TrainingArguments:
...
@@ -1702,7 +1716,7 @@ class TrainingArguments:
)
)
AcceleratorState
.
_reset_state
(
reset_partial_state
=
True
)
AcceleratorState
.
_reset_state
(
reset_partial_state
=
True
)
self
.
distributed_state
=
None
self
.
distributed_state
=
None
if
self
.
no_cuda
:
if
self
.
use_cpu
:
self
.
distributed_state
=
PartialState
(
cpu
=
True
,
backend
=
self
.
ddp_backend
)
self
.
distributed_state
=
PartialState
(
cpu
=
True
,
backend
=
self
.
ddp_backend
)
self
.
_n_gpu
=
0
self
.
_n_gpu
=
0
elif
is_sagemaker_mp_enabled
():
elif
is_sagemaker_mp_enabled
():
...
@@ -1752,7 +1766,7 @@ class TrainingArguments:
...
@@ -1752,7 +1766,7 @@ class TrainingArguments:
)
)
if
device
.
type
==
"mps"
:
if
device
.
type
==
"mps"
:
self
.
_n_gpu
=
1
self
.
_n_gpu
=
1
elif
self
.
no_cuda
:
elif
self
.
use_cpu
:
device
=
torch
.
device
(
"cpu"
)
device
=
torch
.
device
(
"cpu"
)
self
.
_n_gpu
=
0
self
.
_n_gpu
=
0
else
:
else
:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment