Unverified Commit 95091e15 authored by Alex Hedges's avatar Alex Hedges Committed by GitHub
Browse files

Set `cache_dir` for `evaluate.load()` in example scripts (#28422)

While using `run_clm.py`,[^1] I noticed that some files were being added
to my global cache, not the local cache. I set the `cache_dir` parameter
for the one call to `evaluate.load()`, which partially solved the
problem. I figured that while I was fixing the one script upstream, I
might as well fix the problem in all other example scripts that I could.

There are still some files being added to my global cache, but this
appears to be a bug in `evaluate` itself. This commit at least moves
some of the files into the local cache, which is better than before.

To create this PR, I made the following regex-based transformation:
`evaluate\.load\((.*?)\)` -> `evaluate\.load\($1,
cache_dir=model_args.cache_dir\)`. After using that, I manually fixed
all modified files with `ruff` serving as useful guidance. During the
process, I removed one existing usage of the `cache_dir` parameter in a
script that did not have a corresponding `--cache-dir` argument
declared.

[^1]: I specifically used `pytorch/language-modeling/run_clm.py` from
v4.34.1 of the library. For the original code, see the following URL:
https://github.com/huggingface/transformers/tree/acc394c4f5e1283c19783581790b3dc3105a3697/examples/pytorch/language-modeling/run_clm.py.
parent 5fd5ef76
...@@ -633,23 +633,23 @@ def main(): ...@@ -633,23 +633,23 @@ def main():
if data_args.metric_name is not None: if data_args.metric_name is not None:
metric = ( metric = (
evaluate.load(data_args.metric_name, config_name="multilabel") evaluate.load(data_args.metric_name, config_name="multilabel", cache_dir=model_args.cache_dir)
if is_multi_label if is_multi_label
else evaluate.load(data_args.metric_name) else evaluate.load(data_args.metric_name, cache_dir=model_args.cache_dir)
) )
logger.info(f"Using metric {data_args.metric_name} for evaluation.") logger.info(f"Using metric {data_args.metric_name} for evaluation.")
else: else:
if is_regression: if is_regression:
metric = evaluate.load("mse") metric = evaluate.load("mse", cache_dir=model_args.cache_dir)
logger.info("Using mean squared error (mse) as regression score, you can use --metric_name to overwrite.") logger.info("Using mean squared error (mse) as regression score, you can use --metric_name to overwrite.")
else: else:
if is_multi_label: if is_multi_label:
metric = evaluate.load("f1", config_name="multilabel") metric = evaluate.load("f1", config_name="multilabel", cache_dir=model_args.cache_dir)
logger.info( logger.info(
"Using multilabel F1 for multi-label classification task, you can use --metric_name to overwrite." "Using multilabel F1 for multi-label classification task, you can use --metric_name to overwrite."
) )
else: else:
metric = evaluate.load("accuracy") metric = evaluate.load("accuracy", cache_dir=model_args.cache_dir)
logger.info("Using accuracy as classification score, you can use --metric_name to overwrite.") logger.info("Using accuracy as classification score, you can use --metric_name to overwrite.")
def compute_metrics(p: EvalPrediction): def compute_metrics(p: EvalPrediction):
......
...@@ -514,11 +514,11 @@ def main(): ...@@ -514,11 +514,11 @@ def main():
# Get the metric function # Get the metric function
if data_args.task_name is not None: if data_args.task_name is not None:
metric = evaluate.load("glue", data_args.task_name) metric = evaluate.load("glue", data_args.task_name, cache_dir=model_args.cache_dir)
elif is_regression: elif is_regression:
metric = evaluate.load("mse") metric = evaluate.load("mse", cache_dir=model_args.cache_dir)
else: else:
metric = evaluate.load("accuracy") metric = evaluate.load("accuracy", cache_dir=model_args.cache_dir)
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float. # predictions and label_ids field) and has to return a dictionary string to float.
......
...@@ -385,7 +385,7 @@ def main(): ...@@ -385,7 +385,7 @@ def main():
) )
# Get the metric function # Get the metric function
metric = evaluate.load("xnli") metric = evaluate.load("xnli", cache_dir=model_args.cache_dir)
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float. # predictions and label_ids field) and has to return a dictionary string to float.
......
...@@ -539,7 +539,7 @@ def main(): ...@@ -539,7 +539,7 @@ def main():
data_collator = DataCollatorForTokenClassification(tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None) data_collator = DataCollatorForTokenClassification(tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None)
# Metrics # Metrics
metric = evaluate.load("seqeval") metric = evaluate.load("seqeval", cache_dir=model_args.cache_dir)
def compute_metrics(p): def compute_metrics(p):
predictions, labels = p predictions, labels = p
......
...@@ -564,7 +564,7 @@ def main(): ...@@ -564,7 +564,7 @@ def main():
) )
# Metric # Metric
metric = evaluate.load("sacrebleu") metric = evaluate.load("sacrebleu", cache_dir=model_args.cache_dir)
def postprocess_text(preds, labels): def postprocess_text(preds, labels):
preds = [pred.strip() for pred in preds] preds = [pred.strip() for pred in preds]
......
...@@ -440,7 +440,7 @@ def main(): ...@@ -440,7 +440,7 @@ def main():
collate_fn = DefaultDataCollator(return_tensors="np") collate_fn = DefaultDataCollator(return_tensors="np")
# Load the accuracy metric from the datasets package # Load the accuracy metric from the datasets package
metric = evaluate.load("accuracy") metric = evaluate.load("accuracy", cache_dir=model_args.cache_dir)
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float. # predictions and label_ids field) and has to return a dictionary string to float.
......
...@@ -631,7 +631,9 @@ def main(): ...@@ -631,7 +631,9 @@ def main():
references = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples] references = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=formatted_predictions, label_ids=references) return EvalPrediction(predictions=formatted_predictions, label_ids=references)
metric = evaluate.load("squad_v2" if data_args.version_2_with_negative else "squad") metric = evaluate.load(
"squad_v2" if data_args.version_2_with_negative else "squad", cache_dir=model_args.cache_dir
)
def compute_metrics(p: EvalPrediction): def compute_metrics(p: EvalPrediction):
return metric.compute(predictions=p.predictions, references=p.label_ids) return metric.compute(predictions=p.predictions, references=p.label_ids)
......
...@@ -627,7 +627,7 @@ def main(): ...@@ -627,7 +627,7 @@ def main():
# region Metric and KerasMetricCallback # region Metric and KerasMetricCallback
if training_args.do_eval: if training_args.do_eval:
metric = evaluate.load("rouge") metric = evaluate.load("rouge", cache_dir=model_args.cache_dir)
if data_args.val_max_target_length is None: if data_args.val_max_target_length is None:
data_args.val_max_target_length = data_args.max_target_length data_args.val_max_target_length = data_args.max_target_length
......
...@@ -379,7 +379,7 @@ def main(): ...@@ -379,7 +379,7 @@ def main():
# endregion # endregion
# region Metric function # region Metric function
metric = evaluate.load("glue", data_args.task_name) metric = evaluate.load("glue", data_args.task_name, cache_dir=model_args.cache_dir)
def compute_metrics(preds, label_ids): def compute_metrics(preds, label_ids):
preds = preds["logits"] preds = preds["logits"]
......
...@@ -511,7 +511,7 @@ def main(): ...@@ -511,7 +511,7 @@ def main():
# endregion # endregion
# Metrics # Metrics
metric = evaluate.load("seqeval") metric = evaluate.load("seqeval", cache_dir=model_args.cache_dir)
def get_labels(y_pred, y_true): def get_labels(y_pred, y_true):
# Transform predictions and references tensos to numpy arrays # Transform predictions and references tensos to numpy arrays
......
...@@ -589,7 +589,7 @@ def main(): ...@@ -589,7 +589,7 @@ def main():
# region Metric and postprocessing # region Metric and postprocessing
if training_args.do_eval: if training_args.do_eval:
metric = evaluate.load("sacrebleu") metric = evaluate.load("sacrebleu", cache_dir=model_args.cache_dir)
if data_args.val_max_target_length is None: if data_args.val_max_target_length is None:
data_args.val_max_target_length = data_args.max_target_length data_args.val_max_target_length = data_args.max_target_length
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment