"git@developer.sourcefind.cn:chenpangpang/transformers.git" did not exist on "00620583993714f10351943818e95e00f10f9f36"
Unverified Commit 38580455 authored by Gunjan Chhablani's avatar Gunjan Chhablani Committed by GitHub
Browse files

Add model card creation snippet to example scripts (#13730)

* Update run_glue.py

* Update run_glue.py

* Add model creation snippet to other scripts

* Fix style
parent 66b01ce8
...@@ -500,7 +500,6 @@ def main(): ...@@ -500,7 +500,6 @@ def main():
trainer.log_metrics("eval", metrics) trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics) trainer.save_metrics("eval", metrics)
if training_args.push_to_hub:
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-generation"} kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-generation"}
if data_args.dataset_name is not None: if data_args.dataset_name is not None:
kwargs["dataset_tags"] = data_args.dataset_name kwargs["dataset_tags"] = data_args.dataset_name
...@@ -510,7 +509,10 @@ def main(): ...@@ -510,7 +509,10 @@ def main():
else: else:
kwargs["dataset"] = data_args.dataset_name kwargs["dataset"] = data_args.dataset_name
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs) trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
def _mp_fn(index): def _mp_fn(index):
......
...@@ -528,7 +528,6 @@ def main(): ...@@ -528,7 +528,6 @@ def main():
trainer.log_metrics("eval", metrics) trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics) trainer.save_metrics("eval", metrics)
if training_args.push_to_hub:
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "fill-mask"} kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "fill-mask"}
if data_args.dataset_name is not None: if data_args.dataset_name is not None:
kwargs["dataset_tags"] = data_args.dataset_name kwargs["dataset_tags"] = data_args.dataset_name
...@@ -538,7 +537,10 @@ def main(): ...@@ -538,7 +537,10 @@ def main():
else: else:
kwargs["dataset"] = data_args.dataset_name kwargs["dataset"] = data_args.dataset_name
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs) trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
def _mp_fn(index): def _mp_fn(index):
......
...@@ -499,7 +499,6 @@ def main(): ...@@ -499,7 +499,6 @@ def main():
trainer.log_metrics("eval", metrics) trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics) trainer.save_metrics("eval", metrics)
if training_args.push_to_hub:
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "language-modeling"} kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "language-modeling"}
if data_args.dataset_name is not None: if data_args.dataset_name is not None:
kwargs["dataset_tags"] = data_args.dataset_name kwargs["dataset_tags"] = data_args.dataset_name
...@@ -509,7 +508,10 @@ def main(): ...@@ -509,7 +508,10 @@ def main():
else: else:
kwargs["dataset"] = data_args.dataset_name kwargs["dataset"] = data_args.dataset_name
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs) trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
def _mp_fn(index): def _mp_fn(index):
......
...@@ -430,8 +430,7 @@ def main(): ...@@ -430,8 +430,7 @@ def main():
trainer.log_metrics("eval", metrics) trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics) trainer.save_metrics("eval", metrics)
if training_args.push_to_hub: kwargs = dict(
trainer.push_to_hub(
finetuned_from=model_args.model_name_or_path, finetuned_from=model_args.model_name_or_path,
tasks="multiple-choice", tasks="multiple-choice",
dataset_tags="swag", dataset_tags="swag",
...@@ -440,6 +439,11 @@ def main(): ...@@ -440,6 +439,11 @@ def main():
language="en", language="en",
) )
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
def _mp_fn(index): def _mp_fn(index):
# For xla_spawn (TPUs) # For xla_spawn (TPUs)
......
...@@ -623,7 +623,6 @@ def main(): ...@@ -623,7 +623,6 @@ def main():
trainer.log_metrics("predict", metrics) trainer.log_metrics("predict", metrics)
trainer.save_metrics("predict", metrics) trainer.save_metrics("predict", metrics)
if training_args.push_to_hub:
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "question-answering"} kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "question-answering"}
if data_args.dataset_name is not None: if data_args.dataset_name is not None:
kwargs["dataset_tags"] = data_args.dataset_name kwargs["dataset_tags"] = data_args.dataset_name
...@@ -633,7 +632,10 @@ def main(): ...@@ -633,7 +632,10 @@ def main():
else: else:
kwargs["dataset"] = data_args.dataset_name kwargs["dataset"] = data_args.dataset_name
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs) trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
def _mp_fn(index): def _mp_fn(index):
......
...@@ -656,7 +656,6 @@ def main(): ...@@ -656,7 +656,6 @@ def main():
trainer.log_metrics("predict", metrics) trainer.log_metrics("predict", metrics)
trainer.save_metrics("predict", metrics) trainer.save_metrics("predict", metrics)
if training_args.push_to_hub:
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "question-answering"} kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "question-answering"}
if data_args.dataset_name is not None: if data_args.dataset_name is not None:
kwargs["dataset_tags"] = data_args.dataset_name kwargs["dataset_tags"] = data_args.dataset_name
...@@ -666,7 +665,10 @@ def main(): ...@@ -666,7 +665,10 @@ def main():
else: else:
kwargs["dataset"] = data_args.dataset_name kwargs["dataset"] = data_args.dataset_name
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs) trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
def _mp_fn(index): def _mp_fn(index):
......
...@@ -622,7 +622,6 @@ def main(): ...@@ -622,7 +622,6 @@ def main():
with open(output_prediction_file, "w") as writer: with open(output_prediction_file, "w") as writer:
writer.write("\n".join(predictions)) writer.write("\n".join(predictions))
if training_args.push_to_hub:
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "summarization"} kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "summarization"}
if data_args.dataset_name is not None: if data_args.dataset_name is not None:
kwargs["dataset_tags"] = data_args.dataset_name kwargs["dataset_tags"] = data_args.dataset_name
...@@ -632,7 +631,10 @@ def main(): ...@@ -632,7 +631,10 @@ def main():
else: else:
kwargs["dataset"] = data_args.dataset_name kwargs["dataset"] = data_args.dataset_name
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs) trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
return results return results
......
...@@ -546,7 +546,6 @@ def main(): ...@@ -546,7 +546,6 @@ def main():
item = label_list[item] item = label_list[item]
writer.write(f"{index}\t{item}\n") writer.write(f"{index}\t{item}\n")
if training_args.push_to_hub:
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"} kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"}
if data_args.task_name is not None: if data_args.task_name is not None:
kwargs["language"] = "en" kwargs["language"] = "en"
...@@ -554,7 +553,10 @@ def main(): ...@@ -554,7 +553,10 @@ def main():
kwargs["dataset_args"] = data_args.task_name kwargs["dataset_args"] = data_args.task_name
kwargs["dataset"] = f"GLUE {data_args.task_name.upper()}" kwargs["dataset"] = f"GLUE {data_args.task_name.upper()}"
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs) trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
def _mp_fn(index): def _mp_fn(index):
......
...@@ -542,7 +542,6 @@ def main(): ...@@ -542,7 +542,6 @@ def main():
for prediction in true_predictions: for prediction in true_predictions:
writer.write(" ".join(prediction) + "\n") writer.write(" ".join(prediction) + "\n")
if training_args.push_to_hub:
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "token-classification"} kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "token-classification"}
if data_args.dataset_name is not None: if data_args.dataset_name is not None:
kwargs["dataset_tags"] = data_args.dataset_name kwargs["dataset_tags"] = data_args.dataset_name
...@@ -552,7 +551,10 @@ def main(): ...@@ -552,7 +551,10 @@ def main():
else: else:
kwargs["dataset"] = data_args.dataset_name kwargs["dataset"] = data_args.dataset_name
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs) trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
def _mp_fn(index): def _mp_fn(index):
......
...@@ -590,7 +590,6 @@ def main(): ...@@ -590,7 +590,6 @@ def main():
with open(output_prediction_file, "w", encoding="utf-8") as writer: with open(output_prediction_file, "w", encoding="utf-8") as writer:
writer.write("\n".join(predictions)) writer.write("\n".join(predictions))
if training_args.push_to_hub:
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "translation"} kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "translation"}
if data_args.dataset_name is not None: if data_args.dataset_name is not None:
kwargs["dataset_tags"] = data_args.dataset_name kwargs["dataset_tags"] = data_args.dataset_name
...@@ -604,7 +603,10 @@ def main(): ...@@ -604,7 +603,10 @@ def main():
if len(languages) > 0: if len(languages) > 0:
kwargs["language"] = languages kwargs["language"] = languages
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs) trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
return results return results
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment