Commit 0919389d authored by William Tambellini's avatar William Tambellini
Browse files

Add speed log to examples/run_squad.py

Add a speed estimate log (time per example)
for evaluation to examples/run_squad.py
parent fd97761c
...@@ -22,6 +22,7 @@ import logging ...@@ -22,6 +22,7 @@ import logging
import os import os
import random import random
import glob import glob
import timeit
import numpy as np import numpy as np
import torch import torch
...@@ -218,6 +219,7 @@ def evaluate(args, model, tokenizer, prefix=""): ...@@ -218,6 +219,7 @@ def evaluate(args, model, tokenizer, prefix=""):
logger.info(" Num examples = %d", len(dataset)) logger.info(" Num examples = %d", len(dataset))
logger.info(" Batch size = %d", args.eval_batch_size) logger.info(" Batch size = %d", args.eval_batch_size)
all_results = [] all_results = []
start_time = timeit.default_timer()
for batch in tqdm(eval_dataloader, desc="Evaluating"): for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval() model.eval()
batch = tuple(t.to(args.device) for t in batch) batch = tuple(t.to(args.device) for t in batch)
...@@ -250,6 +252,9 @@ def evaluate(args, model, tokenizer, prefix=""): ...@@ -250,6 +252,9 @@ def evaluate(args, model, tokenizer, prefix=""):
end_logits = to_list(outputs[1][i])) end_logits = to_list(outputs[1][i]))
all_results.append(result) all_results.append(result)
evalTime = timeit.default_timer() - start_time
logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(dataset))
# Compute predictions # Compute predictions
output_prediction_file = os.path.join(args.output_dir, "predictions_{}.json".format(prefix)) output_prediction_file = os.path.join(args.output_dir, "predictions_{}.json".format(prefix))
output_nbest_file = os.path.join(args.output_dir, "nbest_predictions_{}.json".format(prefix)) output_nbest_file = os.path.join(args.output_dir, "nbest_predictions_{}.json".format(prefix))
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment