Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
35d48db8
Unverified
Commit
35d48db8
authored
May 02, 2022
by
Zachary Mueller
Committed by
GitHub
May 02, 2022
Browse files
Update no_trainer examples to use new logger (#17044)
* Propagate and fix imports
parent
daecae1f
Changes
13
Hide whitespace changes
Inline
Side-by-side
Showing
13 changed files
with
38 additions
and
76 deletions
+38
-76
examples/pytorch/README.md
examples/pytorch/README.md
+2
-2
examples/pytorch/image-classification/run_image_classification_no_trainer.py
...age-classification/run_image_classification_no_trainer.py
+3
-6
examples/pytorch/language-modeling/run_clm_no_trainer.py
examples/pytorch/language-modeling/run_clm_no_trainer.py
+3
-6
examples/pytorch/language-modeling/run_mlm_no_trainer.py
examples/pytorch/language-modeling/run_mlm_no_trainer.py
+3
-6
examples/pytorch/multiple-choice/run_swag_no_trainer.py
examples/pytorch/multiple-choice/run_swag_no_trainer.py
+3
-6
examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py
...torch/question-answering/run_qa_beam_search_no_trainer.py
+3
-6
examples/pytorch/question-answering/run_qa_no_trainer.py
examples/pytorch/question-answering/run_qa_no_trainer.py
+3
-6
examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
...ntic-segmentation/run_semantic_segmentation_no_trainer.py
+3
-7
examples/pytorch/speech-pretraining/run_wav2vec2_pretraining_no_trainer.py
...speech-pretraining/run_wav2vec2_pretraining_no_trainer.py
+3
-7
examples/pytorch/summarization/run_summarization_no_trainer.py
...les/pytorch/summarization/run_summarization_no_trainer.py
+3
-6
examples/pytorch/text-classification/run_glue_no_trainer.py
examples/pytorch/text-classification/run_glue_no_trainer.py
+3
-6
examples/pytorch/token-classification/run_ner_no_trainer.py
examples/pytorch/token-classification/run_ner_no_trainer.py
+3
-6
examples/pytorch/translation/run_translation_no_trainer.py
examples/pytorch/translation/run_translation_no_trainer.py
+3
-6
No files found.
examples/pytorch/README.md
View file @
35d48db8
...
@@ -167,10 +167,10 @@ python xla_spawn.py --num_cores 8 \
...
@@ -167,10 +167,10 @@ python xla_spawn.py --num_cores 8 \
Most PyTorch example scripts have a version using the
[
🤗 Accelerate
](
https://github.com/huggingface/accelerate
)
library
Most PyTorch example scripts have a version using the
[
🤗 Accelerate
](
https://github.com/huggingface/accelerate
)
library
that exposes the training loop so it's easy for you to customize or tweak them to your needs. They all require you to
that exposes the training loop so it's easy for you to customize or tweak them to your needs. They all require you to
install
`accelerate`
with
install
`accelerate`
with
the latest development version
```
bash
```
bash
pip
install
accelerate
pip
install
git+https://github.com/huggingface/
accelerate
```
```
Then you can easily launch any of the scripts by running
Then you can easily launch any of the scripts by running
...
...
examples/pytorch/image-classification/run_image_classification_no_trainer.py
View file @
35d48db8
...
@@ -37,6 +37,7 @@ from tqdm.auto import tqdm
...
@@ -37,6 +37,7 @@ from tqdm.auto import tqdm
import
transformers
import
transformers
from
accelerate
import
Accelerator
from
accelerate
import
Accelerator
from
accelerate.logging
import
get_logger
from
accelerate.utils
import
set_seed
from
accelerate.utils
import
set_seed
from
huggingface_hub
import
Repository
from
huggingface_hub
import
Repository
from
transformers
import
(
from
transformers
import
(
...
@@ -50,7 +51,7 @@ from transformers.utils import get_full_repo_name
...
@@ -50,7 +51,7 @@ from transformers.utils import get_full_repo_name
from
transformers.utils.versions
import
require_version
from
transformers.utils.versions
import
require_version
logger
=
logging
.
get
L
ogger
(
__name__
)
logger
=
get
_l
ogger
(
__name__
)
require_version
(
"datasets>=2.0.0"
,
"To fix: pip install -r examples/pytorch/image-classification/requirements.txt"
)
require_version
(
"datasets>=2.0.0"
,
"To fix: pip install -r examples/pytorch/image-classification/requirements.txt"
)
...
@@ -188,11 +189,7 @@ def main():
...
@@ -188,11 +189,7 @@ def main():
datefmt
=
"%m/%d/%Y %H:%M:%S"
,
datefmt
=
"%m/%d/%Y %H:%M:%S"
,
level
=
logging
.
INFO
,
level
=
logging
.
INFO
,
)
)
logger
.
info
(
accelerator
.
state
)
logger
.
info
(
accelerator
.
state
,
main_process_only
=
False
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger
.
setLevel
(
logging
.
INFO
if
accelerator
.
is_local_main_process
else
logging
.
ERROR
)
if
accelerator
.
is_local_main_process
:
if
accelerator
.
is_local_main_process
:
datasets
.
utils
.
logging
.
set_verbosity_warning
()
datasets
.
utils
.
logging
.
set_verbosity_warning
()
transformers
.
utils
.
logging
.
set_verbosity_info
()
transformers
.
utils
.
logging
.
set_verbosity_info
()
...
...
examples/pytorch/language-modeling/run_clm_no_trainer.py
View file @
35d48db8
...
@@ -39,6 +39,7 @@ from tqdm.auto import tqdm
...
@@ -39,6 +39,7 @@ from tqdm.auto import tqdm
import
transformers
import
transformers
from
accelerate
import
Accelerator
,
DistributedType
from
accelerate
import
Accelerator
,
DistributedType
from
accelerate.logging
import
get_logger
from
accelerate.utils
import
set_seed
from
accelerate.utils
import
set_seed
from
huggingface_hub
import
Repository
from
huggingface_hub
import
Repository
from
transformers
import
(
from
transformers
import
(
...
@@ -56,7 +57,7 @@ from transformers.utils import get_full_repo_name
...
@@ -56,7 +57,7 @@ from transformers.utils import get_full_repo_name
from
transformers.utils.versions
import
require_version
from
transformers.utils.versions
import
require_version
logger
=
logging
.
get
L
ogger
(
__name__
)
logger
=
get
_l
ogger
(
__name__
)
require_version
(
"datasets>=1.8.0"
,
"To fix: pip install -r examples/pytorch/language-modeling/requirements.txt"
)
require_version
(
"datasets>=1.8.0"
,
"To fix: pip install -r examples/pytorch/language-modeling/requirements.txt"
)
...
@@ -234,11 +235,7 @@ def main():
...
@@ -234,11 +235,7 @@ def main():
datefmt
=
"%m/%d/%Y %H:%M:%S"
,
datefmt
=
"%m/%d/%Y %H:%M:%S"
,
level
=
logging
.
INFO
,
level
=
logging
.
INFO
,
)
)
logger
.
info
(
accelerator
.
state
)
logger
.
info
(
accelerator
.
state
,
main_process_only
=
False
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger
.
setLevel
(
logging
.
INFO
if
accelerator
.
is_local_main_process
else
logging
.
ERROR
)
if
accelerator
.
is_local_main_process
:
if
accelerator
.
is_local_main_process
:
datasets
.
utils
.
logging
.
set_verbosity_warning
()
datasets
.
utils
.
logging
.
set_verbosity_warning
()
transformers
.
utils
.
logging
.
set_verbosity_info
()
transformers
.
utils
.
logging
.
set_verbosity_info
()
...
...
examples/pytorch/language-modeling/run_mlm_no_trainer.py
View file @
35d48db8
...
@@ -39,6 +39,7 @@ from tqdm.auto import tqdm
...
@@ -39,6 +39,7 @@ from tqdm.auto import tqdm
import
transformers
import
transformers
from
accelerate
import
Accelerator
,
DistributedType
from
accelerate
import
Accelerator
,
DistributedType
from
accelerate.logging
import
get_logger
from
accelerate.utils
import
set_seed
from
accelerate.utils
import
set_seed
from
huggingface_hub
import
Repository
from
huggingface_hub
import
Repository
from
transformers
import
(
from
transformers
import
(
...
@@ -56,7 +57,7 @@ from transformers.utils import get_full_repo_name
...
@@ -56,7 +57,7 @@ from transformers.utils import get_full_repo_name
from
transformers.utils.versions
import
require_version
from
transformers.utils.versions
import
require_version
logger
=
logging
.
get
L
ogger
(
__name__
)
logger
=
get
_l
ogger
(
__name__
)
require_version
(
"datasets>=1.8.0"
,
"To fix: pip install -r examples/pytorch/language-modeling/requirements.txt"
)
require_version
(
"datasets>=1.8.0"
,
"To fix: pip install -r examples/pytorch/language-modeling/requirements.txt"
)
MODEL_CONFIG_CLASSES
=
list
(
MODEL_MAPPING
.
keys
())
MODEL_CONFIG_CLASSES
=
list
(
MODEL_MAPPING
.
keys
())
MODEL_TYPES
=
tuple
(
conf
.
model_type
for
conf
in
MODEL_CONFIG_CLASSES
)
MODEL_TYPES
=
tuple
(
conf
.
model_type
for
conf
in
MODEL_CONFIG_CLASSES
)
...
@@ -245,11 +246,7 @@ def main():
...
@@ -245,11 +246,7 @@ def main():
datefmt
=
"%m/%d/%Y %H:%M:%S"
,
datefmt
=
"%m/%d/%Y %H:%M:%S"
,
level
=
logging
.
INFO
,
level
=
logging
.
INFO
,
)
)
logger
.
info
(
accelerator
.
state
)
logger
.
info
(
accelerator
.
state
,
main_process_only
=
False
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger
.
setLevel
(
logging
.
INFO
if
accelerator
.
is_local_main_process
else
logging
.
ERROR
)
if
accelerator
.
is_local_main_process
:
if
accelerator
.
is_local_main_process
:
datasets
.
utils
.
logging
.
set_verbosity_warning
()
datasets
.
utils
.
logging
.
set_verbosity_warning
()
transformers
.
utils
.
logging
.
set_verbosity_info
()
transformers
.
utils
.
logging
.
set_verbosity_info
()
...
...
examples/pytorch/multiple-choice/run_swag_no_trainer.py
View file @
35d48db8
...
@@ -37,6 +37,7 @@ from tqdm.auto import tqdm
...
@@ -37,6 +37,7 @@ from tqdm.auto import tqdm
import
transformers
import
transformers
from
accelerate
import
Accelerator
from
accelerate
import
Accelerator
from
accelerate.logging
import
get_logger
from
accelerate.utils
import
set_seed
from
accelerate.utils
import
set_seed
from
huggingface_hub
import
Repository
from
huggingface_hub
import
Repository
from
transformers
import
(
from
transformers
import
(
...
@@ -54,7 +55,7 @@ from transformers import (
...
@@ -54,7 +55,7 @@ from transformers import (
from
transformers.utils
import
PaddingStrategy
,
get_full_repo_name
from
transformers.utils
import
PaddingStrategy
,
get_full_repo_name
logger
=
logging
.
get
L
ogger
(
__name__
)
logger
=
get
_l
ogger
(
__name__
)
# You should update this to your particular problem to have better documentation of `model_type`
# You should update this to your particular problem to have better documentation of `model_type`
MODEL_CONFIG_CLASSES
=
list
(
MODEL_MAPPING
.
keys
())
MODEL_CONFIG_CLASSES
=
list
(
MODEL_MAPPING
.
keys
())
MODEL_TYPES
=
tuple
(
conf
.
model_type
for
conf
in
MODEL_CONFIG_CLASSES
)
MODEL_TYPES
=
tuple
(
conf
.
model_type
for
conf
in
MODEL_CONFIG_CLASSES
)
...
@@ -272,11 +273,7 @@ def main():
...
@@ -272,11 +273,7 @@ def main():
datefmt
=
"%m/%d/%Y %H:%M:%S"
,
datefmt
=
"%m/%d/%Y %H:%M:%S"
,
level
=
logging
.
INFO
,
level
=
logging
.
INFO
,
)
)
logger
.
info
(
accelerator
.
state
)
logger
.
info
(
accelerator
.
state
,
main_process_only
=
False
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger
.
setLevel
(
logging
.
INFO
if
accelerator
.
is_local_main_process
else
logging
.
ERROR
)
if
accelerator
.
is_local_main_process
:
if
accelerator
.
is_local_main_process
:
datasets
.
utils
.
logging
.
set_verbosity_warning
()
datasets
.
utils
.
logging
.
set_verbosity_warning
()
transformers
.
utils
.
logging
.
set_verbosity_info
()
transformers
.
utils
.
logging
.
set_verbosity_info
()
...
...
examples/pytorch/question-answering/run_qa_beam_search_no_trainer.py
View file @
35d48db8
...
@@ -35,6 +35,7 @@ from tqdm.auto import tqdm
...
@@ -35,6 +35,7 @@ from tqdm.auto import tqdm
import
transformers
import
transformers
from
accelerate
import
Accelerator
from
accelerate
import
Accelerator
from
accelerate.logging
import
get_logger
from
accelerate.utils
import
set_seed
from
accelerate.utils
import
set_seed
from
huggingface_hub
import
Repository
from
huggingface_hub
import
Repository
from
transformers
import
(
from
transformers
import
(
...
@@ -58,7 +59,7 @@ check_min_version("4.19.0.dev0")
...
@@ -58,7 +59,7 @@ check_min_version("4.19.0.dev0")
require_version
(
"datasets>=1.8.0"
,
"To fix: pip install -r examples/pytorch/question-answering/requirements.txt"
)
require_version
(
"datasets>=1.8.0"
,
"To fix: pip install -r examples/pytorch/question-answering/requirements.txt"
)
logger
=
logging
.
get
L
ogger
(
__name__
)
logger
=
get
_l
ogger
(
__name__
)
def
save_prefixed_metrics
(
results
,
output_dir
,
file_name
:
str
=
"all_results.json"
,
metric_key_prefix
:
str
=
"eval"
):
def
save_prefixed_metrics
(
results
,
output_dir
,
file_name
:
str
=
"all_results.json"
,
metric_key_prefix
:
str
=
"eval"
):
...
@@ -289,11 +290,7 @@ def main():
...
@@ -289,11 +290,7 @@ def main():
datefmt
=
"%m/%d/%Y %H:%M:%S"
,
datefmt
=
"%m/%d/%Y %H:%M:%S"
,
level
=
logging
.
INFO
,
level
=
logging
.
INFO
,
)
)
logger
.
info
(
accelerator
.
state
)
logger
.
info
(
accelerator
.
state
,
main_process_only
=
False
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger
.
setLevel
(
logging
.
INFO
if
accelerator
.
is_local_main_process
else
logging
.
ERROR
)
if
accelerator
.
is_local_main_process
:
if
accelerator
.
is_local_main_process
:
datasets
.
utils
.
logging
.
set_verbosity_warning
()
datasets
.
utils
.
logging
.
set_verbosity_warning
()
transformers
.
utils
.
logging
.
set_verbosity_info
()
transformers
.
utils
.
logging
.
set_verbosity_info
()
...
...
examples/pytorch/question-answering/run_qa_no_trainer.py
View file @
35d48db8
...
@@ -35,6 +35,7 @@ from tqdm.auto import tqdm
...
@@ -35,6 +35,7 @@ from tqdm.auto import tqdm
import
transformers
import
transformers
from
accelerate
import
Accelerator
from
accelerate
import
Accelerator
from
accelerate.logging
import
get_logger
from
accelerate.utils
import
set_seed
from
accelerate.utils
import
set_seed
from
huggingface_hub
import
Repository
from
huggingface_hub
import
Repository
from
transformers
import
(
from
transformers
import
(
...
@@ -60,7 +61,7 @@ check_min_version("4.19.0.dev0")
...
@@ -60,7 +61,7 @@ check_min_version("4.19.0.dev0")
require_version
(
"datasets>=1.8.0"
,
"To fix: pip install -r examples/pytorch/question-answering/requirements.txt"
)
require_version
(
"datasets>=1.8.0"
,
"To fix: pip install -r examples/pytorch/question-answering/requirements.txt"
)
logger
=
logging
.
get
L
ogger
(
__name__
)
logger
=
get
_l
ogger
(
__name__
)
# You should update this to your particular problem to have better documentation of `model_type`
# You should update this to your particular problem to have better documentation of `model_type`
MODEL_CONFIG_CLASSES
=
list
(
MODEL_MAPPING
.
keys
())
MODEL_CONFIG_CLASSES
=
list
(
MODEL_MAPPING
.
keys
())
MODEL_TYPES
=
tuple
(
conf
.
model_type
for
conf
in
MODEL_CONFIG_CLASSES
)
MODEL_TYPES
=
tuple
(
conf
.
model_type
for
conf
in
MODEL_CONFIG_CLASSES
)
...
@@ -318,11 +319,7 @@ def main():
...
@@ -318,11 +319,7 @@ def main():
datefmt
=
"%m/%d/%Y %H:%M:%S"
,
datefmt
=
"%m/%d/%Y %H:%M:%S"
,
level
=
logging
.
INFO
,
level
=
logging
.
INFO
,
)
)
logger
.
info
(
accelerator
.
state
)
logger
.
info
(
accelerator
.
state
,
main_process_only
=
False
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger
.
setLevel
(
logging
.
INFO
if
accelerator
.
is_local_main_process
else
logging
.
ERROR
)
if
accelerator
.
is_local_main_process
:
if
accelerator
.
is_local_main_process
:
datasets
.
utils
.
logging
.
set_verbosity_warning
()
datasets
.
utils
.
logging
.
set_verbosity_warning
()
transformers
.
utils
.
logging
.
set_verbosity_info
()
transformers
.
utils
.
logging
.
set_verbosity_info
()
...
...
examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
View file @
35d48db8
...
@@ -16,7 +16,6 @@
...
@@ -16,7 +16,6 @@
import
argparse
import
argparse
import
json
import
json
import
logging
import
math
import
math
import
os
import
os
import
random
import
random
...
@@ -34,6 +33,7 @@ from tqdm.auto import tqdm
...
@@ -34,6 +33,7 @@ from tqdm.auto import tqdm
import
transformers
import
transformers
from
accelerate
import
Accelerator
from
accelerate
import
Accelerator
from
accelerate.logging
import
get_logger
from
accelerate.utils
import
set_seed
from
accelerate.utils
import
set_seed
from
huggingface_hub
import
Repository
,
hf_hub_download
from
huggingface_hub
import
Repository
,
hf_hub_download
from
transformers
import
(
from
transformers
import
(
...
@@ -48,7 +48,7 @@ from transformers.utils import get_full_repo_name
...
@@ -48,7 +48,7 @@ from transformers.utils import get_full_repo_name
from
transformers.utils.versions
import
require_version
from
transformers.utils.versions
import
require_version
logger
=
logging
.
get
L
ogger
(
__name__
)
logger
=
get
_l
ogger
(
__name__
)
require_version
(
"datasets>=2.0.0"
,
"To fix: pip install -r examples/pytorch/semantic-segmentation/requirements.txt"
)
require_version
(
"datasets>=2.0.0"
,
"To fix: pip install -r examples/pytorch/semantic-segmentation/requirements.txt"
)
...
@@ -308,11 +308,7 @@ def main():
...
@@ -308,11 +308,7 @@ def main():
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
# If we're using tracking, we also need to initialize it here and it will pick up all supported trackers in the environment
# If we're using tracking, we also need to initialize it here and it will pick up all supported trackers in the environment
accelerator
=
Accelerator
(
log_with
=
"all"
,
logging_dir
=
args
.
output_dir
)
if
args
.
with_tracking
else
Accelerator
()
accelerator
=
Accelerator
(
log_with
=
"all"
,
logging_dir
=
args
.
output_dir
)
if
args
.
with_tracking
else
Accelerator
()
logger
.
info
(
accelerator
.
state
)
logger
.
info
(
accelerator
.
state
,
main_process_only
=
False
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger
.
setLevel
(
logging
.
INFO
if
accelerator
.
is_local_main_process
else
logging
.
ERROR
)
if
accelerator
.
is_local_main_process
:
if
accelerator
.
is_local_main_process
:
datasets
.
utils
.
logging
.
set_verbosity_warning
()
datasets
.
utils
.
logging
.
set_verbosity_warning
()
transformers
.
utils
.
logging
.
set_verbosity_info
()
transformers
.
utils
.
logging
.
set_verbosity_info
()
...
...
examples/pytorch/speech-pretraining/run_wav2vec2_pretraining_no_trainer.py
View file @
35d48db8
...
@@ -16,7 +16,6 @@
...
@@ -16,7 +16,6 @@
""" Pre-Training a 🤗 Wav2Vec2 model on unlabeled audio data """
""" Pre-Training a 🤗 Wav2Vec2 model on unlabeled audio data """
import
argparse
import
argparse
import
logging
import
math
import
math
import
os
import
os
from
dataclasses
import
dataclass
from
dataclasses
import
dataclass
...
@@ -31,6 +30,7 @@ from tqdm.auto import tqdm
...
@@ -31,6 +30,7 @@ from tqdm.auto import tqdm
import
transformers
import
transformers
from
accelerate
import
Accelerator
from
accelerate
import
Accelerator
from
accelerate.logging
import
get_logger
from
huggingface_hub
import
Repository
from
huggingface_hub
import
Repository
from
transformers
import
(
from
transformers
import
(
AdamW
,
AdamW
,
...
@@ -46,7 +46,7 @@ from transformers.models.wav2vec2.modeling_wav2vec2 import _compute_mask_indices
...
@@ -46,7 +46,7 @@ from transformers.models.wav2vec2.modeling_wav2vec2 import _compute_mask_indices
from
transformers.utils
import
get_full_repo_name
from
transformers.utils
import
get_full_repo_name
logger
=
logging
.
get
L
ogger
(
__name__
)
logger
=
get
_l
ogger
(
__name__
)
def
parse_args
():
def
parse_args
():
...
@@ -362,11 +362,7 @@ def main():
...
@@ -362,11 +362,7 @@ def main():
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
accelerator
=
Accelerator
()
accelerator
=
Accelerator
()
logger
.
info
(
accelerator
.
state
)
logger
.
info
(
accelerator
.
state
,
main_process_only
=
False
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger
.
setLevel
(
logging
.
INFO
if
accelerator
.
is_local_main_process
else
logging
.
ERROR
)
if
accelerator
.
is_local_main_process
:
if
accelerator
.
is_local_main_process
:
datasets
.
utils
.
logging
.
set_verbosity_warning
()
datasets
.
utils
.
logging
.
set_verbosity_warning
()
transformers
.
utils
.
logging
.
set_verbosity_info
()
transformers
.
utils
.
logging
.
set_verbosity_info
()
...
...
examples/pytorch/summarization/run_summarization_no_trainer.py
View file @
35d48db8
...
@@ -36,6 +36,7 @@ from tqdm.auto import tqdm
...
@@ -36,6 +36,7 @@ from tqdm.auto import tqdm
import
transformers
import
transformers
from
accelerate
import
Accelerator
from
accelerate
import
Accelerator
from
accelerate.logging
import
get_logger
from
accelerate.utils
import
set_seed
from
accelerate.utils
import
set_seed
from
filelock
import
FileLock
from
filelock
import
FileLock
from
huggingface_hub
import
Repository
from
huggingface_hub
import
Repository
...
@@ -54,7 +55,7 @@ from transformers.utils import get_full_repo_name, is_offline_mode
...
@@ -54,7 +55,7 @@ from transformers.utils import get_full_repo_name, is_offline_mode
from
transformers.utils.versions
import
require_version
from
transformers.utils.versions
import
require_version
logger
=
logging
.
get
L
ogger
(
__name__
)
logger
=
get
_l
ogger
(
__name__
)
require_version
(
"datasets>=1.8.0"
,
"To fix: pip install -r examples/pytorch/summarization/requirements.txt"
)
require_version
(
"datasets>=1.8.0"
,
"To fix: pip install -r examples/pytorch/summarization/requirements.txt"
)
# You should update this to your particular problem to have better documentation of `model_type`
# You should update this to your particular problem to have better documentation of `model_type`
...
@@ -322,11 +323,7 @@ def main():
...
@@ -322,11 +323,7 @@ def main():
datefmt
=
"%m/%d/%Y %H:%M:%S"
,
datefmt
=
"%m/%d/%Y %H:%M:%S"
,
level
=
logging
.
INFO
,
level
=
logging
.
INFO
,
)
)
logger
.
info
(
accelerator
.
state
)
logger
.
info
(
accelerator
.
state
,
main_process_only
=
False
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger
.
setLevel
(
logging
.
INFO
if
accelerator
.
is_local_main_process
else
logging
.
ERROR
)
if
accelerator
.
is_local_main_process
:
if
accelerator
.
is_local_main_process
:
datasets
.
utils
.
logging
.
set_verbosity_warning
()
datasets
.
utils
.
logging
.
set_verbosity_warning
()
transformers
.
utils
.
logging
.
set_verbosity_info
()
transformers
.
utils
.
logging
.
set_verbosity_info
()
...
...
examples/pytorch/text-classification/run_glue_no_trainer.py
View file @
35d48db8
...
@@ -29,6 +29,7 @@ from tqdm.auto import tqdm
...
@@ -29,6 +29,7 @@ from tqdm.auto import tqdm
import
transformers
import
transformers
from
accelerate
import
Accelerator
from
accelerate
import
Accelerator
from
accelerate.logging
import
get_logger
from
accelerate.utils
import
set_seed
from
accelerate.utils
import
set_seed
from
huggingface_hub
import
Repository
from
huggingface_hub
import
Repository
from
transformers
import
(
from
transformers
import
(
...
@@ -46,7 +47,7 @@ from transformers.utils import get_full_repo_name
...
@@ -46,7 +47,7 @@ from transformers.utils import get_full_repo_name
from
transformers.utils.versions
import
require_version
from
transformers.utils.versions
import
require_version
logger
=
logging
.
get
L
ogger
(
__name__
)
logger
=
get
_l
ogger
(
__name__
)
require_version
(
"datasets>=1.8.0"
,
"To fix: pip install -r examples/pytorch/text-classification/requirements.txt"
)
require_version
(
"datasets>=1.8.0"
,
"To fix: pip install -r examples/pytorch/text-classification/requirements.txt"
)
...
@@ -200,11 +201,7 @@ def main():
...
@@ -200,11 +201,7 @@ def main():
datefmt
=
"%m/%d/%Y %H:%M:%S"
,
datefmt
=
"%m/%d/%Y %H:%M:%S"
,
level
=
logging
.
INFO
,
level
=
logging
.
INFO
,
)
)
logger
.
info
(
accelerator
.
state
)
logger
.
info
(
accelerator
.
state
,
main_process_only
=
False
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger
.
setLevel
(
logging
.
INFO
if
accelerator
.
is_local_main_process
else
logging
.
ERROR
)
if
accelerator
.
is_local_main_process
:
if
accelerator
.
is_local_main_process
:
datasets
.
utils
.
logging
.
set_verbosity_warning
()
datasets
.
utils
.
logging
.
set_verbosity_warning
()
transformers
.
utils
.
logging
.
set_verbosity_info
()
transformers
.
utils
.
logging
.
set_verbosity_info
()
...
...
examples/pytorch/token-classification/run_ner_no_trainer.py
View file @
35d48db8
...
@@ -34,6 +34,7 @@ from tqdm.auto import tqdm
...
@@ -34,6 +34,7 @@ from tqdm.auto import tqdm
import
transformers
import
transformers
from
accelerate
import
Accelerator
from
accelerate
import
Accelerator
from
accelerate.logging
import
get_logger
from
accelerate.utils
import
set_seed
from
accelerate.utils
import
set_seed
from
huggingface_hub
import
Repository
from
huggingface_hub
import
Repository
from
transformers
import
(
from
transformers
import
(
...
@@ -53,7 +54,7 @@ from transformers.utils import get_full_repo_name
...
@@ -53,7 +54,7 @@ from transformers.utils import get_full_repo_name
from
transformers.utils.versions
import
require_version
from
transformers.utils.versions
import
require_version
logger
=
logging
.
get
L
ogger
(
__name__
)
logger
=
get
_l
ogger
(
__name__
)
require_version
(
"datasets>=1.8.0"
,
"To fix: pip install -r examples/pytorch/token-classification/requirements.txt"
)
require_version
(
"datasets>=1.8.0"
,
"To fix: pip install -r examples/pytorch/token-classification/requirements.txt"
)
# You should update this to your particular problem to have better documentation of `model_type`
# You should update this to your particular problem to have better documentation of `model_type`
...
@@ -253,11 +254,7 @@ def main():
...
@@ -253,11 +254,7 @@ def main():
datefmt
=
"%m/%d/%Y %H:%M:%S"
,
datefmt
=
"%m/%d/%Y %H:%M:%S"
,
level
=
logging
.
INFO
,
level
=
logging
.
INFO
,
)
)
logger
.
info
(
accelerator
.
state
)
logger
.
info
(
accelerator
.
state
,
main_process_only
=
False
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger
.
setLevel
(
logging
.
INFO
if
accelerator
.
is_local_main_process
else
logging
.
ERROR
)
if
accelerator
.
is_local_main_process
:
if
accelerator
.
is_local_main_process
:
datasets
.
utils
.
logging
.
set_verbosity_warning
()
datasets
.
utils
.
logging
.
set_verbosity_warning
()
transformers
.
utils
.
logging
.
set_verbosity_info
()
transformers
.
utils
.
logging
.
set_verbosity_info
()
...
...
examples/pytorch/translation/run_translation_no_trainer.py
View file @
35d48db8
...
@@ -35,6 +35,7 @@ from tqdm.auto import tqdm
...
@@ -35,6 +35,7 @@ from tqdm.auto import tqdm
import
transformers
import
transformers
from
accelerate
import
Accelerator
from
accelerate
import
Accelerator
from
accelerate.logging
import
get_logger
from
accelerate.utils
import
set_seed
from
accelerate.utils
import
set_seed
from
huggingface_hub
import
Repository
from
huggingface_hub
import
Repository
from
transformers
import
(
from
transformers
import
(
...
@@ -55,7 +56,7 @@ from transformers.utils import get_full_repo_name
...
@@ -55,7 +56,7 @@ from transformers.utils import get_full_repo_name
from
transformers.utils.versions
import
require_version
from
transformers.utils.versions
import
require_version
logger
=
logging
.
get
L
ogger
(
__name__
)
logger
=
get
_l
ogger
(
__name__
)
require_version
(
"datasets>=1.8.0"
,
"To fix: pip install -r examples/pytorch/translation/requirements.txt"
)
require_version
(
"datasets>=1.8.0"
,
"To fix: pip install -r examples/pytorch/translation/requirements.txt"
)
# You should update this to your particular problem to have better documentation of `model_type`
# You should update this to your particular problem to have better documentation of `model_type`
...
@@ -295,11 +296,7 @@ def main():
...
@@ -295,11 +296,7 @@ def main():
datefmt
=
"%m/%d/%Y %H:%M:%S"
,
datefmt
=
"%m/%d/%Y %H:%M:%S"
,
level
=
logging
.
INFO
,
level
=
logging
.
INFO
,
)
)
logger
.
info
(
accelerator
.
state
)
logger
.
info
(
accelerator
.
state
,
main_process_only
=
False
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger
.
setLevel
(
logging
.
INFO
if
accelerator
.
is_local_main_process
else
logging
.
ERROR
)
if
accelerator
.
is_local_main_process
:
if
accelerator
.
is_local_main_process
:
datasets
.
utils
.
logging
.
set_verbosity_warning
()
datasets
.
utils
.
logging
.
set_verbosity_warning
()
transformers
.
utils
.
logging
.
set_verbosity_info
()
transformers
.
utils
.
logging
.
set_verbosity_info
()
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment