Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
9681f052
"...git@developer.sourcefind.cn:chenpangpang/transformers.git" did not exist on "06a6fea7820dc3e89d09430a49bce1c72b173647"
Unverified
Commit
9681f052
authored
Nov 16, 2022
by
Jiahao Li
Committed by
GitHub
Nov 16, 2022
Browse files
Fix result saving errors of pytorch examples (#20276)
parent
e627e9b5
Changes
8
Hide whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
26 additions
and
27 deletions
+26
-27
examples/pytorch/image-classification/run_image_classification_no_trainer.py
...age-classification/run_image_classification_no_trainer.py
+3
-3
examples/pytorch/language-modeling/run_clm_no_trainer.py
examples/pytorch/language-modeling/run_clm_no_trainer.py
+2
-2
examples/pytorch/language-modeling/run_mlm_no_trainer.py
examples/pytorch/language-modeling/run_mlm_no_trainer.py
+2
-2
examples/pytorch/multiple-choice/run_swag_no_trainer.py
examples/pytorch/multiple-choice/run_swag_no_trainer.py
+6
-4
examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
...ntic-segmentation/run_semantic_segmentation_no_trainer.py
+2
-1
examples/pytorch/summarization/run_summarization_no_trainer.py
...les/pytorch/summarization/run_summarization_no_trainer.py
+4
-10
examples/pytorch/text-classification/run_glue_no_trainer.py
examples/pytorch/text-classification/run_glue_no_trainer.py
+2
-1
examples/pytorch/token-classification/run_ner_no_trainer.py
examples/pytorch/token-classification/run_ner_no_trainer.py
+5
-4
No files found.
examples/pytorch/image-classification/run_image_classification_no_trainer.py
View file @
9681f052
...
@@ -571,9 +571,9 @@ def main():
...
@@ -571,9 +571,9 @@ def main():
if
args
.
push_to_hub
:
if
args
.
push_to_hub
:
repo
.
push_to_hub
(
commit_message
=
"End of training"
,
auto_lfs_prune
=
True
)
repo
.
push_to_hub
(
commit_message
=
"End of training"
,
auto_lfs_prune
=
True
)
if
args
.
output_dir
is
not
None
:
all_results
=
{
f
"eval_
{
k
}
"
:
v
for
k
,
v
in
eval_metric
.
items
()}
with
open
(
os
.
path
.
join
(
args
.
output_dir
,
"all_results.json"
),
"w"
)
as
f
:
with
open
(
os
.
path
.
join
(
args
.
output_dir
,
"all_results.json"
),
"w"
)
as
f
:
json
.
dump
(
{
"eval_accuracy"
:
eval_metric
[
"accuracy"
]}
,
f
)
json
.
dump
(
all_results
,
f
)
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
...
...
examples/pytorch/language-modeling/run_clm_no_trainer.py
View file @
9681f052
...
@@ -666,8 +666,8 @@ def main():
...
@@ -666,8 +666,8 @@ def main():
if
args
.
push_to_hub
:
if
args
.
push_to_hub
:
repo
.
push_to_hub
(
commit_message
=
"End of training"
,
auto_lfs_prune
=
True
)
repo
.
push_to_hub
(
commit_message
=
"End of training"
,
auto_lfs_prune
=
True
)
with
open
(
os
.
path
.
join
(
args
.
output_dir
,
"all_results.json"
),
"w"
)
as
f
:
with
open
(
os
.
path
.
join
(
args
.
output_dir
,
"all_results.json"
),
"w"
)
as
f
:
json
.
dump
({
"perplexity"
:
perplexity
},
f
)
json
.
dump
({
"perplexity"
:
perplexity
},
f
)
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
...
...
examples/pytorch/language-modeling/run_mlm_no_trainer.py
View file @
9681f052
...
@@ -711,8 +711,8 @@ def main():
...
@@ -711,8 +711,8 @@ def main():
if
args
.
push_to_hub
:
if
args
.
push_to_hub
:
repo
.
push_to_hub
(
commit_message
=
"End of training"
,
auto_lfs_prune
=
True
)
repo
.
push_to_hub
(
commit_message
=
"End of training"
,
auto_lfs_prune
=
True
)
with
open
(
os
.
path
.
join
(
args
.
output_dir
,
"all_results.json"
),
"w"
)
as
f
:
with
open
(
os
.
path
.
join
(
args
.
output_dir
,
"all_results.json"
),
"w"
)
as
f
:
json
.
dump
({
"perplexity"
:
perplexity
},
f
)
json
.
dump
({
"perplexity"
:
perplexity
},
f
)
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
...
...
examples/pytorch/multiple-choice/run_swag_no_trainer.py
View file @
9681f052
...
@@ -85,7 +85,7 @@ def parse_args():
...
@@ -85,7 +85,7 @@ def parse_args():
"--validation_file"
,
type
=
str
,
default
=
None
,
help
=
"A csv or a json file containing the validation data."
"--validation_file"
,
type
=
str
,
default
=
None
,
help
=
"A csv or a json file containing the validation data."
)
)
parser
.
add_argument
(
parser
.
add_argument
(
"--max_length"
,
"--max_
seq_
length"
,
type
=
int
,
type
=
int
,
default
=
128
,
default
=
128
,
help
=
(
help
=
(
...
@@ -424,7 +424,7 @@ def main():
...
@@ -424,7 +424,7 @@ def main():
tokenized_examples
=
tokenizer
(
tokenized_examples
=
tokenizer
(
first_sentences
,
first_sentences
,
second_sentences
,
second_sentences
,
max_length
=
args
.
max_length
,
max_length
=
args
.
max_
seq_
length
,
padding
=
padding
,
padding
=
padding
,
truncation
=
True
,
truncation
=
True
,
)
)
...
@@ -654,8 +654,10 @@ def main():
...
@@ -654,8 +654,10 @@ def main():
tokenizer
.
save_pretrained
(
args
.
output_dir
)
tokenizer
.
save_pretrained
(
args
.
output_dir
)
if
args
.
push_to_hub
:
if
args
.
push_to_hub
:
repo
.
push_to_hub
(
commit_message
=
"End of training"
,
auto_lfs_prune
=
True
)
repo
.
push_to_hub
(
commit_message
=
"End of training"
,
auto_lfs_prune
=
True
)
with
open
(
os
.
path
.
join
(
args
.
output_dir
,
"all_results.json"
),
"w"
)
as
f
:
json
.
dump
({
"eval_accuracy"
:
eval_metric
[
"accuracy"
]},
f
)
all_results
=
{
f
"eval_
{
k
}
"
:
v
for
k
,
v
in
eval_metric
.
items
()}
with
open
(
os
.
path
.
join
(
args
.
output_dir
,
"all_results.json"
),
"w"
)
as
f
:
json
.
dump
(
all_results
,
f
)
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
...
...
examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
View file @
9681f052
...
@@ -681,8 +681,9 @@ def main():
...
@@ -681,8 +681,9 @@ def main():
if
args
.
push_to_hub
:
if
args
.
push_to_hub
:
repo
.
push_to_hub
(
commit_message
=
"End of training"
,
auto_lfs_prune
=
True
)
repo
.
push_to_hub
(
commit_message
=
"End of training"
,
auto_lfs_prune
=
True
)
all_results
=
{
f
"eval_
{
k
}
"
:
v
for
k
,
v
in
eval_metrics
.
items
()}
with
open
(
os
.
path
.
join
(
args
.
output_dir
,
"all_results.json"
),
"w"
)
as
f
:
with
open
(
os
.
path
.
join
(
args
.
output_dir
,
"all_results.json"
),
"w"
)
as
f
:
json
.
dump
(
{
"eval_overall_accuracy"
:
eval_metrics
[
"overall_accuracy"
]}
,
f
)
json
.
dump
(
all_results
,
f
)
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
...
...
examples/pytorch/summarization/run_summarization_no_trainer.py
View file @
9681f052
...
@@ -747,16 +747,10 @@ def main():
...
@@ -747,16 +747,10 @@ def main():
tokenizer
.
save_pretrained
(
args
.
output_dir
)
tokenizer
.
save_pretrained
(
args
.
output_dir
)
if
args
.
push_to_hub
:
if
args
.
push_to_hub
:
repo
.
push_to_hub
(
commit_message
=
"End of training"
,
auto_lfs_prune
=
True
)
repo
.
push_to_hub
(
commit_message
=
"End of training"
,
auto_lfs_prune
=
True
)
with
open
(
os
.
path
.
join
(
args
.
output_dir
,
"all_results.json"
),
"w"
)
as
f
:
json
.
dump
(
all_results
=
{
f
"eval_
{
k
}
"
:
v
for
k
,
v
in
result
.
items
()}
{
with
open
(
os
.
path
.
join
(
args
.
output_dir
,
"all_results.json"
),
"w"
)
as
f
:
"eval_rouge1"
:
result
[
"rouge1"
],
json
.
dump
(
all_results
,
f
)
"eval_rouge2"
:
result
[
"rouge2"
],
"eval_rougeL"
:
result
[
"rougeL"
],
"eval_rougeLsum"
:
result
[
"rougeLsum"
],
},
f
,
)
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
...
...
examples/pytorch/text-classification/run_glue_no_trainer.py
View file @
9681f052
...
@@ -625,8 +625,9 @@ def main():
...
@@ -625,8 +625,9 @@ def main():
logger
.
info
(
f
"mnli-mm:
{
eval_metric
}
"
)
logger
.
info
(
f
"mnli-mm:
{
eval_metric
}
"
)
if
args
.
output_dir
is
not
None
:
if
args
.
output_dir
is
not
None
:
all_results
=
{
f
"eval_
{
k
}
"
:
v
for
k
,
v
in
eval_metric
.
items
()}
with
open
(
os
.
path
.
join
(
args
.
output_dir
,
"all_results.json"
),
"w"
)
as
f
:
with
open
(
os
.
path
.
join
(
args
.
output_dir
,
"all_results.json"
),
"w"
)
as
f
:
json
.
dump
(
{
"eval_accuracy"
:
eval_metric
[
"accuracy"
]}
,
f
)
json
.
dump
(
all_results
,
f
)
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
...
...
examples/pytorch/token-classification/run_ner_no_trainer.py
View file @
9681f052
...
@@ -766,10 +766,11 @@ def main():
...
@@ -766,10 +766,11 @@ def main():
if
args
.
push_to_hub
:
if
args
.
push_to_hub
:
repo
.
push_to_hub
(
commit_message
=
"End of training"
,
auto_lfs_prune
=
True
)
repo
.
push_to_hub
(
commit_message
=
"End of training"
,
auto_lfs_prune
=
True
)
with
open
(
os
.
path
.
join
(
args
.
output_dir
,
"all_results.json"
),
"w"
)
as
f
:
all_results
=
{
f
"eval_
{
k
}
"
:
v
for
k
,
v
in
eval_metric
.
items
()}
json
.
dump
(
if
args
.
with_tracking
:
{
"eval_accuracy"
:
eval_metric
[
"accuracy"
],
"train_loss"
:
total_loss
.
item
()
/
len
(
train_dataloader
)},
f
all_results
.
update
({
"train_loss"
:
total_loss
.
item
()
/
len
(
train_dataloader
)})
)
with
open
(
os
.
path
.
join
(
args
.
output_dir
,
"all_results.json"
),
"w"
)
as
f
:
json
.
dump
(
all_results
,
f
)
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment