"...git@developer.sourcefind.cn:chenpangpang/transformers.git" did not exist on "7419d807ff3d2ca45757c9e3090388b721e131ce"
Unverified Commit 95091e15 authored by Alex Hedges's avatar Alex Hedges Committed by GitHub
Browse files

Set `cache_dir` for `evaluate.load()` in example scripts (#28422)

While using `run_clm.py`,[^1] I noticed that some files were being added
to my global cache, not the local cache. I set the `cache_dir` parameter
for the one call to `evaluate.load()`, which partially solved the
problem. I figured that while I was fixing the one script upstream, I
might as well fix the problem in all other example scripts that I could.

There are still some files being added to my global cache, but this
appears to be a bug in `evaluate` itself. This commit at least moves
some of the files into the local cache, which is better than before.

To create this PR, I made the following regex-based transformation:
`evaluate\.load\((.*?)\)` -> `evaluate\.load\($1,
cache_dir=model_args.cache_dir\)`. After using that, I manually fixed
all modified files with `ruff` serving as useful guidance. During the
process, I removed one existing usage of the `cache_dir` parameter in a
script that did not have a corresponding `--cache-dir` argument
declared.

[^1]: I specifically used `pytorch/language-modeling/run_clm.py` from
v4.34.1 of the library. For the original code, see the following URL:
https://github.com/huggingface/transformers/tree/acc394c4f5e1283c19783581790b3dc3105a3697/examples/pytorch/language-modeling/run_clm.py.
parent 5fd5ef76
...@@ -853,7 +853,7 @@ def main(): ...@@ -853,7 +853,7 @@ def main():
yield batch yield batch
# Metric # Metric
metric = evaluate.load("rouge") metric = evaluate.load("rouge", cache_dir=model_args.cache_dir)
def postprocess_text(preds, labels): def postprocess_text(preds, labels):
preds = [pred.strip() for pred in preds] preds = [pred.strip() for pred in preds]
......
...@@ -807,7 +807,9 @@ def main(): ...@@ -807,7 +807,9 @@ def main():
references = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples] references = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=formatted_predictions, label_ids=references) return EvalPrediction(predictions=formatted_predictions, label_ids=references)
metric = evaluate.load("squad_v2" if data_args.version_2_with_negative else "squad") metric = evaluate.load(
"squad_v2" if data_args.version_2_with_negative else "squad", cache_dir=model_args.cache_dir
)
def compute_metrics(p: EvalPrediction): def compute_metrics(p: EvalPrediction):
return metric.compute(predictions=p.predictions, references=p.label_ids) return metric.compute(predictions=p.predictions, references=p.label_ids)
......
...@@ -577,7 +577,7 @@ def main(): ...@@ -577,7 +577,7 @@ def main():
return return
# 8. Load Metric # 8. Load Metric
metric = evaluate.load("wer") metric = evaluate.load("wer", cache_dir=model_args.cache_dir)
def compute_metrics(preds, labels): def compute_metrics(preds, labels):
# replace padded labels by the padding token # replace padded labels by the padding token
......
...@@ -710,7 +710,7 @@ def main(): ...@@ -710,7 +710,7 @@ def main():
) )
# Metric # Metric
metric = evaluate.load("rouge") metric = evaluate.load("rouge", cache_dir=model_args.cache_dir)
def postprocess_text(preds, labels): def postprocess_text(preds, labels):
preds = [pred.strip() for pred in preds] preds = [pred.strip() for pred in preds]
......
...@@ -599,9 +599,9 @@ def main(): ...@@ -599,9 +599,9 @@ def main():
p_eval_step = jax.pmap(eval_step, axis_name="batch") p_eval_step = jax.pmap(eval_step, axis_name="batch")
if data_args.task_name is not None: if data_args.task_name is not None:
metric = evaluate.load("glue", data_args.task_name) metric = evaluate.load("glue", data_args.task_name, cache_dir=model_args.cache_dir)
else: else:
metric = evaluate.load("accuracy") metric = evaluate.load("accuracy", cache_dir=model_args.cache_dir)
logger.info(f"===== Starting training ({num_epochs} epochs) =====") logger.info(f"===== Starting training ({num_epochs} epochs) =====")
train_time = 0 train_time = 0
......
...@@ -676,7 +676,7 @@ def main(): ...@@ -676,7 +676,7 @@ def main():
p_eval_step = jax.pmap(eval_step, axis_name="batch") p_eval_step = jax.pmap(eval_step, axis_name="batch")
metric = evaluate.load("seqeval") metric = evaluate.load("seqeval", cache_dir=model_args.cache_dir)
def get_labels(y_pred, y_true): def get_labels(y_pred, y_true):
# Transform predictions and references tensos to numpy arrays # Transform predictions and references tensos to numpy arrays
......
...@@ -349,7 +349,7 @@ def main(): ...@@ -349,7 +349,7 @@ def main():
id2label[str(i)] = label id2label[str(i)] = label
# Load the accuracy metric from the datasets package # Load the accuracy metric from the datasets package
metric = evaluate.load("accuracy") metric = evaluate.load("accuracy", cache_dir=model_args.cache_dir)
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float. # `predictions` and `label_ids` fields) and has to return a dictionary string to float.
......
...@@ -287,7 +287,7 @@ def main(): ...@@ -287,7 +287,7 @@ def main():
id2label[str(i)] = label id2label[str(i)] = label
# Load the accuracy metric from the datasets package # Load the accuracy metric from the datasets package
metric = evaluate.load("accuracy") metric = evaluate.load("accuracy", cache_dir=model_args.cache_dir)
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float. # predictions and label_ids field) and has to return a dictionary string to float.
......
...@@ -282,7 +282,6 @@ def main(): ...@@ -282,7 +282,6 @@ def main():
dataset = load_dataset( dataset = load_dataset(
"imagefolder", "imagefolder",
data_files=data_files, data_files=data_files,
cache_dir=args.cache_dir,
task="image-classification", task="image-classification",
) )
# See more about loading custom images at # See more about loading custom images at
......
...@@ -583,7 +583,7 @@ def main(): ...@@ -583,7 +583,7 @@ def main():
logits = logits[0] logits = logits[0]
return logits.argmax(dim=-1) return logits.argmax(dim=-1)
metric = evaluate.load("accuracy") metric = evaluate.load("accuracy", cache_dir=model_args.cache_dir)
def compute_metrics(eval_preds): def compute_metrics(eval_preds):
preds, labels = eval_preds preds, labels = eval_preds
......
...@@ -590,7 +590,7 @@ def main(): ...@@ -590,7 +590,7 @@ def main():
logits = logits[0] logits = logits[0]
return logits.argmax(dim=-1) return logits.argmax(dim=-1)
metric = evaluate.load("accuracy") metric = evaluate.load("accuracy", cache_dir=model_args.cache_dir)
def compute_metrics(eval_preds): def compute_metrics(eval_preds):
preds, labels = eval_preds preds, labels = eval_preds
......
...@@ -627,7 +627,9 @@ def main(): ...@@ -627,7 +627,9 @@ def main():
references = [{"id": str(ex["id"]), "answers": ex[answer_column_name]} for ex in examples] references = [{"id": str(ex["id"]), "answers": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=formatted_predictions, label_ids=references) return EvalPrediction(predictions=formatted_predictions, label_ids=references)
metric = evaluate.load("squad_v2" if data_args.version_2_with_negative else "squad") metric = evaluate.load(
"squad_v2" if data_args.version_2_with_negative else "squad", cache_dir=model_args.cache_dir
)
def compute_metrics(p: EvalPrediction): def compute_metrics(p: EvalPrediction):
return metric.compute(predictions=p.predictions, references=p.label_ids) return metric.compute(predictions=p.predictions, references=p.label_ids)
......
...@@ -647,7 +647,9 @@ def main(): ...@@ -647,7 +647,9 @@ def main():
references = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples] references = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=formatted_predictions, label_ids=references) return EvalPrediction(predictions=formatted_predictions, label_ids=references)
metric = evaluate.load("squad_v2" if data_args.version_2_with_negative else "squad") metric = evaluate.load(
"squad_v2" if data_args.version_2_with_negative else "squad", cache_dir=model_args.cache_dir
)
def compute_metrics(p: EvalPrediction): def compute_metrics(p: EvalPrediction):
return metric.compute(predictions=p.predictions, references=p.label_ids) return metric.compute(predictions=p.predictions, references=p.label_ids)
......
...@@ -631,7 +631,9 @@ def main(): ...@@ -631,7 +631,9 @@ def main():
pad_to_multiple_of=8 if training_args.fp16 else None, pad_to_multiple_of=8 if training_args.fp16 else None,
) )
metric = evaluate.load("squad_v2" if data_args.version_2_with_negative else "squad") metric = evaluate.load(
"squad_v2" if data_args.version_2_with_negative else "squad", cache_dir=model_args.cache_dir
)
def compute_metrics(p: EvalPrediction): def compute_metrics(p: EvalPrediction):
return metric.compute(predictions=p.predictions, references=p.label_ids) return metric.compute(predictions=p.predictions, references=p.label_ids)
......
...@@ -366,7 +366,7 @@ def main(): ...@@ -366,7 +366,7 @@ def main():
label2id = {v: str(k) for k, v in id2label.items()} label2id = {v: str(k) for k, v in id2label.items()}
# Load the mean IoU metric from the datasets package # Load the mean IoU metric from the datasets package
metric = evaluate.load("mean_iou") metric = evaluate.load("mean_iou", cache_dir=model_args.cache_dir)
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float. # predictions and label_ids field) and has to return a dictionary string to float.
......
...@@ -530,7 +530,7 @@ def main(): ...@@ -530,7 +530,7 @@ def main():
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
# Instantiate metric # Instantiate metric
metric = evaluate.load("mean_iou") metric = evaluate.load("mean_iou", cache_dir=args.cache_dir)
# We need to initialize the trackers we use, and also store our configuration. # We need to initialize the trackers we use, and also store our configuration.
# The trackers initializes automatically on the main process. # The trackers initializes automatically on the main process.
......
...@@ -680,7 +680,7 @@ def main(): ...@@ -680,7 +680,7 @@ def main():
# instantiate a data collator and the trainer # instantiate a data collator and the trainer
# Define evaluation metrics during training, *i.e.* word error rate, character error rate # Define evaluation metrics during training, *i.e.* word error rate, character error rate
eval_metrics = {metric: evaluate.load(metric) for metric in data_args.eval_metrics} eval_metrics = {metric: evaluate.load(metric, cache_dir=model_args.cache_dir) for metric in data_args.eval_metrics}
# for large datasets it is advised to run the preprocessing on a # for large datasets it is advised to run the preprocessing on a
# single machine first with ``args.preprocessing_only`` since there will mostly likely # single machine first with ``args.preprocessing_only`` since there will mostly likely
......
...@@ -702,7 +702,7 @@ def main(): ...@@ -702,7 +702,7 @@ def main():
# instantiate a data collator and the trainer # instantiate a data collator and the trainer
# Define evaluation metrics during training, *i.e.* word error rate, character error rate # Define evaluation metrics during training, *i.e.* word error rate, character error rate
eval_metrics = {metric: evaluate.load(metric) for metric in data_args.eval_metrics} eval_metrics = {metric: evaluate.load(metric, cache_dir=model_args.cache_dir) for metric in data_args.eval_metrics}
# for large datasets it is advised to run the preprocessing on a # for large datasets it is advised to run the preprocessing on a
# single machine first with ``args.preprocessing_only`` since there will mostly likely # single machine first with ``args.preprocessing_only`` since there will mostly likely
......
...@@ -520,7 +520,7 @@ def main(): ...@@ -520,7 +520,7 @@ def main():
return return
# 8. Load Metric # 8. Load Metric
metric = evaluate.load("wer") metric = evaluate.load("wer", cache_dir=model_args.cache_dir)
def compute_metrics(pred): def compute_metrics(pred):
pred_ids = pred.predictions pred_ids = pred.predictions
......
...@@ -645,7 +645,7 @@ def main(): ...@@ -645,7 +645,7 @@ def main():
) )
# Metric # Metric
metric = evaluate.load("rouge") metric = evaluate.load("rouge", cache_dir=model_args.cache_dir)
def postprocess_text(preds, labels): def postprocess_text(preds, labels):
preds = [pred.strip() for pred in preds] preds = [pred.strip() for pred in preds]
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment