evaluate_ai2d.py 1.48 KB
Newer Older
xingjinliang's avatar
xingjinliang committed
1
2
3
4
5
6
7
8
9
10
11
import argparse
import json

from evaluate_mmmu import get_input_output_paths
from evaluate_vqav2 import compute_vqa_accuracy


def merge_input_files(input_path):
    """Merge input files to a format compatible with the evaluator."""
    input_file_paths, output_file_path = get_input_output_paths(input_path, task="AI2D")

silencealiang's avatar
add  
silencealiang committed
12
    results = dict()
xingjinliang's avatar
xingjinliang committed
13
14
15
16
17

    for input_file_path in input_file_paths:
        with open(input_file_path, "r") as input_file:
            for line in input_file:
                res = json.loads(line)
silencealiang's avatar
add  
silencealiang committed
18
19
20
21
22
23
24
25
26
27
28
29
30
                sample_id = res["sample_id"]

                # Ignore possible duplicates.
                if sample_id in results:
                    continue

                results[sample_id] = {
                    "question_id": sample_id,
                    "answer": res["answer"],
                    "gt_answer": res["gt_answer"],
                }

    results = list(results.values())
xingjinliang's avatar
xingjinliang committed
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52

    with open(output_file_path, "w") as output_file:
        json.dump(results, output_file)

    return output_file_path


def ai2d_eval(input_path):
    """Run AI2D evaluation."""
    result_file_path = merge_input_files(input_path)
    avg_acc = compute_vqa_accuracy(result_file_path, task="AI2D")
    return avg_acc


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--input-path', type=str, help="Path to input file(s)")
    args = parser.parse_args()

    avg_acc = ai2d_eval(args.input_path)

    print(f"===== AI2D Accuracy {avg_acc:.2f}% =====")