Unverified Commit 38580455 authored by Gunjan Chhablani's avatar Gunjan Chhablani Committed by GitHub
Browse files

Add model card creation snippet to example scripts (#13730)

* Update run_glue.py

* Update run_glue.py

* Add model creation snippet to other scripts

* Fix style
parent 66b01ce8
...@@ -500,17 +500,19 @@ def main(): ...@@ -500,17 +500,19 @@ def main():
trainer.log_metrics("eval", metrics) trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics) trainer.save_metrics("eval", metrics)
if training_args.push_to_hub: kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-generation"}
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-generation"} if data_args.dataset_name is not None:
if data_args.dataset_name is not None: kwargs["dataset_tags"] = data_args.dataset_name
kwargs["dataset_tags"] = data_args.dataset_name if data_args.dataset_config_name is not None:
if data_args.dataset_config_name is not None: kwargs["dataset_args"] = data_args.dataset_config_name
kwargs["dataset_args"] = data_args.dataset_config_name kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" else:
else: kwargs["dataset"] = data_args.dataset_name
kwargs["dataset"] = data_args.dataset_name
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs) trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
def _mp_fn(index): def _mp_fn(index):
......
...@@ -528,17 +528,19 @@ def main(): ...@@ -528,17 +528,19 @@ def main():
trainer.log_metrics("eval", metrics) trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics) trainer.save_metrics("eval", metrics)
if training_args.push_to_hub: kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "fill-mask"}
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "fill-mask"} if data_args.dataset_name is not None:
if data_args.dataset_name is not None: kwargs["dataset_tags"] = data_args.dataset_name
kwargs["dataset_tags"] = data_args.dataset_name if data_args.dataset_config_name is not None:
if data_args.dataset_config_name is not None: kwargs["dataset_args"] = data_args.dataset_config_name
kwargs["dataset_args"] = data_args.dataset_config_name kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" else:
else: kwargs["dataset"] = data_args.dataset_name
kwargs["dataset"] = data_args.dataset_name
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs) trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
def _mp_fn(index): def _mp_fn(index):
......
...@@ -499,17 +499,19 @@ def main(): ...@@ -499,17 +499,19 @@ def main():
trainer.log_metrics("eval", metrics) trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics) trainer.save_metrics("eval", metrics)
if training_args.push_to_hub: kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "language-modeling"}
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "language-modeling"} if data_args.dataset_name is not None:
if data_args.dataset_name is not None: kwargs["dataset_tags"] = data_args.dataset_name
kwargs["dataset_tags"] = data_args.dataset_name if data_args.dataset_config_name is not None:
if data_args.dataset_config_name is not None: kwargs["dataset_args"] = data_args.dataset_config_name
kwargs["dataset_args"] = data_args.dataset_config_name kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" else:
else: kwargs["dataset"] = data_args.dataset_name
kwargs["dataset"] = data_args.dataset_name
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs) trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
def _mp_fn(index): def _mp_fn(index):
......
...@@ -430,15 +430,19 @@ def main(): ...@@ -430,15 +430,19 @@ def main():
trainer.log_metrics("eval", metrics) trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics) trainer.save_metrics("eval", metrics)
kwargs = dict(
finetuned_from=model_args.model_name_or_path,
tasks="multiple-choice",
dataset_tags="swag",
dataset_args="regular",
dataset="SWAG",
language="en",
)
if training_args.push_to_hub: if training_args.push_to_hub:
trainer.push_to_hub( trainer.push_to_hub(**kwargs)
finetuned_from=model_args.model_name_or_path, else:
tasks="multiple-choice", trainer.create_model_card(**kwargs)
dataset_tags="swag",
dataset_args="regular",
dataset="SWAG",
language="en",
)
def _mp_fn(index): def _mp_fn(index):
......
...@@ -623,17 +623,19 @@ def main(): ...@@ -623,17 +623,19 @@ def main():
trainer.log_metrics("predict", metrics) trainer.log_metrics("predict", metrics)
trainer.save_metrics("predict", metrics) trainer.save_metrics("predict", metrics)
if training_args.push_to_hub: kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "question-answering"}
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "question-answering"} if data_args.dataset_name is not None:
if data_args.dataset_name is not None: kwargs["dataset_tags"] = data_args.dataset_name
kwargs["dataset_tags"] = data_args.dataset_name if data_args.dataset_config_name is not None:
if data_args.dataset_config_name is not None: kwargs["dataset_args"] = data_args.dataset_config_name
kwargs["dataset_args"] = data_args.dataset_config_name kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" else:
else: kwargs["dataset"] = data_args.dataset_name
kwargs["dataset"] = data_args.dataset_name
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs) trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
def _mp_fn(index): def _mp_fn(index):
......
...@@ -656,17 +656,19 @@ def main(): ...@@ -656,17 +656,19 @@ def main():
trainer.log_metrics("predict", metrics) trainer.log_metrics("predict", metrics)
trainer.save_metrics("predict", metrics) trainer.save_metrics("predict", metrics)
if training_args.push_to_hub: kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "question-answering"}
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "question-answering"} if data_args.dataset_name is not None:
if data_args.dataset_name is not None: kwargs["dataset_tags"] = data_args.dataset_name
kwargs["dataset_tags"] = data_args.dataset_name if data_args.dataset_config_name is not None:
if data_args.dataset_config_name is not None: kwargs["dataset_args"] = data_args.dataset_config_name
kwargs["dataset_args"] = data_args.dataset_config_name kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" else:
else: kwargs["dataset"] = data_args.dataset_name
kwargs["dataset"] = data_args.dataset_name
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs) trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
def _mp_fn(index): def _mp_fn(index):
......
...@@ -622,17 +622,19 @@ def main(): ...@@ -622,17 +622,19 @@ def main():
with open(output_prediction_file, "w") as writer: with open(output_prediction_file, "w") as writer:
writer.write("\n".join(predictions)) writer.write("\n".join(predictions))
if training_args.push_to_hub: kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "summarization"}
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "summarization"} if data_args.dataset_name is not None:
if data_args.dataset_name is not None: kwargs["dataset_tags"] = data_args.dataset_name
kwargs["dataset_tags"] = data_args.dataset_name if data_args.dataset_config_name is not None:
if data_args.dataset_config_name is not None: kwargs["dataset_args"] = data_args.dataset_config_name
kwargs["dataset_args"] = data_args.dataset_config_name kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" else:
else: kwargs["dataset"] = data_args.dataset_name
kwargs["dataset"] = data_args.dataset_name
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs) trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
return results return results
......
...@@ -546,15 +546,17 @@ def main(): ...@@ -546,15 +546,17 @@ def main():
item = label_list[item] item = label_list[item]
writer.write(f"{index}\t{item}\n") writer.write(f"{index}\t{item}\n")
if training_args.push_to_hub: kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"}
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"} if data_args.task_name is not None:
if data_args.task_name is not None: kwargs["language"] = "en"
kwargs["language"] = "en" kwargs["dataset_tags"] = "glue"
kwargs["dataset_tags"] = "glue" kwargs["dataset_args"] = data_args.task_name
kwargs["dataset_args"] = data_args.task_name kwargs["dataset"] = f"GLUE {data_args.task_name.upper()}"
kwargs["dataset"] = f"GLUE {data_args.task_name.upper()}"
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs) trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
def _mp_fn(index): def _mp_fn(index):
......
...@@ -542,17 +542,19 @@ def main(): ...@@ -542,17 +542,19 @@ def main():
for prediction in true_predictions: for prediction in true_predictions:
writer.write(" ".join(prediction) + "\n") writer.write(" ".join(prediction) + "\n")
if training_args.push_to_hub: kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "token-classification"}
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "token-classification"} if data_args.dataset_name is not None:
if data_args.dataset_name is not None: kwargs["dataset_tags"] = data_args.dataset_name
kwargs["dataset_tags"] = data_args.dataset_name if data_args.dataset_config_name is not None:
if data_args.dataset_config_name is not None: kwargs["dataset_args"] = data_args.dataset_config_name
kwargs["dataset_args"] = data_args.dataset_config_name kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" else:
else: kwargs["dataset"] = data_args.dataset_name
kwargs["dataset"] = data_args.dataset_name
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs) trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
def _mp_fn(index): def _mp_fn(index):
......
...@@ -590,21 +590,23 @@ def main(): ...@@ -590,21 +590,23 @@ def main():
with open(output_prediction_file, "w", encoding="utf-8") as writer: with open(output_prediction_file, "w", encoding="utf-8") as writer:
writer.write("\n".join(predictions)) writer.write("\n".join(predictions))
if training_args.push_to_hub: kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "translation"}
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "translation"} if data_args.dataset_name is not None:
if data_args.dataset_name is not None: kwargs["dataset_tags"] = data_args.dataset_name
kwargs["dataset_tags"] = data_args.dataset_name if data_args.dataset_config_name is not None:
if data_args.dataset_config_name is not None: kwargs["dataset_args"] = data_args.dataset_config_name
kwargs["dataset_args"] = data_args.dataset_config_name kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" else:
else: kwargs["dataset"] = data_args.dataset_name
kwargs["dataset"] = data_args.dataset_name
languages = [l for l in [data_args.source_lang, data_args.target_lang] if l is not None]
if len(languages) > 0:
kwargs["language"] = languages
languages = [l for l in [data_args.source_lang, data_args.target_lang] if l is not None]
if len(languages) > 0:
kwargs["language"] = languages
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs) trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
return results return results
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment