Unverified Commit c6c78733 authored by Qubitium's avatar Qubitium Committed by GitHub
Browse files

Rename sanity_evaluation to eval_on_start (#31192)

* Rename sanity_evaluation to eval_on_start

* move arg back to last
parent c230504b
......@@ -2175,7 +2175,7 @@ class Trainer:
grad_norm: Optional[float] = None
self.control = self.callback_handler.on_train_begin(args, self.state, self.control)
if args.sanity_evaluation:
if args.eval_on_start:
self._evaluate(trial, ignore_keys_for_eval, skip_scheduler=True)
total_batched_samples = 0
......
......@@ -772,8 +772,8 @@ class TrainingArguments:
that takes a boolean argument `compute_result`, which when passed `True`, will trigger the final global
summary statistics from the batch-level summary statistics you've accumulated over the evaluation set.
sanity_evaluation(`bool`, *optional*, defaults to `False`):
Whether or not to perform a sanity check to ensure that the validation steps works correctly. It will be performed before the training.
eval_on_start(`bool`, *optional*, defaults to `False`):
Whether to perform a evaluation step (sanity check) before the training to ensure the validation steps works correctly.
"""
framework = "pt"
......@@ -1457,7 +1457,7 @@ class TrainingArguments:
metadata={"help": "Break eval metrics calculation into batches to save memory."},
)
sanity_evaluation: bool = field(
eval_on_start: bool = field(
default=False,
metadata={
"help": "Whether to run through the entire `evaluation` step at the very beginning of training as a sanity check."
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment