evaluator.py 9.03 KB
Newer Older
wanglch's avatar
wanglch committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
import collections
import itertools
from typing import Any, Callable, Dict, List, Mapping, Optional, Sequence, Tuple, Union
from icecream import ic
import re

from pycocoevalcap.tokenizer.ptbtokenizer import PTBTokenizer
from pycocoevalcap.cider.cider import Cider
from pycocoevalcap.bleu.bleu import Bleu
from pycocoevalcap.rouge.rouge import Rouge
from pycocoevalcap.meteor.meteor import Meteor
import editdistance

"""
this script support:
ANLS for DocVQA

RelaxedAccuracy for ChartQA

ContainAccuracy for MultimodalOCR LLM zero-shot text-recognition


"""



def anls_metric(target: str, prediction: str, theta: float = 0.5):
    """Calculates ANLS for DocVQA.

    There does not seem to be an official evaluation script.
    Public implementation on which this implementation is based:
    https://github.com/herobd/layoutlmv2/blob/main/eval_docvqa.py#L92

    Original paper (see Eq 1): https://arxiv.org/pdf/1907.00490.pdf

    Args:
        target: Target string.
        prediction: Predicted string.
        theta: Filter threshold set to 0.5 for DocVQA.

    Returns:
        ANLS score.
    """

    edit_distance = editdistance.eval(target, prediction)
    normalized_ld = edit_distance / max(len(target), len(prediction))
    return 1.0 - normalized_ld if normalized_ld < theta else 0.0

def relaxed_correctness(target: str,
                        prediction: str,
                        max_relative_change: float = 0.05) -> bool:
    """Calculates relaxed correctness.

    The correctness tolerates certain error ratio defined by max_relative_change.
    See https://arxiv.org/pdf/2203.10244.pdf, end of section 5.1:
    “Following Methani et al. (2020), we use a relaxed accuracy measure for the
    numeric answers to allow a minor inaccuracy that may result from the automatic
    data extraction process. We consider an answer to be correct if it is within
    5% of the gold answer. For non-numeric answers, we still need an exact match
    to consider an answer to be correct.”

    Args:
    target: Target string.
    prediction: Predicted string.
    max_relative_change: Maximum relative change.

    Returns:
    Whether the prediction was correct given the specified tolerance.
    """

    def _to_float(text: str) -> Optional[float]:
        try:
            if text.endswith("%"):
                # Convert percentages to floats.
                return float(text.rstrip("%")) / 100.0
            else:
                return float(text)
        except ValueError:
            return None

    prediction_float = _to_float(prediction)
    target_float = _to_float(target)
    if prediction_float is not None and target_float:
        relative_change = abs(prediction_float - target_float) / abs(target_float)
        return float(relative_change <= max_relative_change)
    else:
        return float(prediction.lower() == target.lower())


def exact_match(target: str, prediction: str):
    return float(target == prediction)


def iou_match(target: list, prediction: list, threshold=0.5):
    """
    target/prediction: normalized bbox (list(float)), xyxy
    """
    g_x1, g_y1, g_x2, g_y2 = target
    p_x1, p_y1, p_x2, p_y2 = prediction
    
    g_w = g_x2 - g_x1
    p_w = p_x2 - p_x1
    g_h = g_y2 - g_y1
    p_h = p_y2 - p_y1

    W = (min(g_x2, p_x2)-max(g_x1, p_x1))
    H = (min(g_y2, p_y2)-max(g_y1, p_y1))
    Intersection = W*H
    

    if Intersection <= 0:
        return 0.0

    Union = g_w*g_h + p_w*p_h -Intersection
    # ic(W, H, Intersection, Union)

    if Intersection / Union >= threshold:
        return 1.0
    else:
        return 0.0


def remove_special_chars_and_lower(s):
    pattern = r"[^a-zA-Z0-9\s]"
    # print('raw:', s)
    s = re.sub(pattern, "", s)
    # print('new:', s)
    return s.lower()

def contain_match(target:str, prediction:str):
    def has_word(sentence, word):
        pattern = r"\b" + re.escape(word) + r"\b"
        match = re.search(pattern, sentence)
        if match:
            return True
        else:
            return False
    # print(prediction, target, float(has_word(prediction, target)))
    return float(has_word(prediction, target))


def cider(
    targets: Sequence[Sequence[str]],
    predictions: Sequence[str]) -> float:
    """Compute CIDEr score."""
    coco_tokenizer = PTBTokenizer()
    scorer = Cider()
    score, scores = scorer.compute_score(
      gts=coco_tokenizer.tokenize({
          str(i): [{"caption": t} for t in target]
          for i, target in enumerate(targets)
      }),
      res=coco_tokenizer.tokenize({
          str(i): [{"caption": prediction}]
          for i, prediction in enumerate(predictions)
      }))
    score = float(score) * 100.0
    scores = [float(s) * 100.0 for s in scores.tolist()]
    return score, scores

def rouge(
    targets: Sequence[Sequence[str]],
    predictions: Sequence[str]) -> float:
    """Compute CIDEr score."""
    coco_tokenizer = PTBTokenizer()
    scorer = Rouge()
    score, scores = scorer.compute_score(
      gts=coco_tokenizer.tokenize({
          str(i): [{"caption": t} for t in target]
          for i, target in enumerate(targets)
      }),
      res=coco_tokenizer.tokenize({
          str(i): [{"caption": prediction}]
          for i, prediction in enumerate(predictions)
      }))
    score = float(score) * 100.0
    scores = [float(s) * 100.0 for s in scores.tolist()]
    return score, scores

def meteor(
    targets: Sequence[Sequence[str]],
    predictions: Sequence[str]) -> float:
    """Compute CIDEr score."""
    coco_tokenizer = PTBTokenizer()
    scorer = Meteor()
    score, scores = scorer.compute_score(
      gts=coco_tokenizer.tokenize({
          str(i): [{"caption": t} for t in target]
          for i, target in enumerate(targets)
      }),
      res=coco_tokenizer.tokenize({
          str(i): [{"caption": prediction}]
          for i, prediction in enumerate(predictions)
      }))
    score = float(score) * 100.0
    scores = [float(s) * 100.0 for s in scores]
    return score, scores

def bleu(
    ngram: int,
    targets: Sequence[Sequence[str]],
    predictions: Sequence[str]) -> float:
    """Compute Bleu score."""
    assert ngram <= 4
    coco_tokenizer = PTBTokenizer()

    scorer = Bleu(4)
    score, scores = scorer.compute_score(
      gts=coco_tokenizer.tokenize({
          str(i): [{"caption": t} for t in target]
          for i, target in enumerate(targets)
      }),
      res=coco_tokenizer.tokenize({
          str(i): [{"caption": prediction}]
          for i, prediction in enumerate(predictions)
      }))
    
    
    score = score[ngram-1]
    scores = scores[ngram-1]
    # ic(score)
    # ic(scores)
    score = float(score) * 100.0
    scores = [float(s) * 100.0 for s in scores]
    return score, scores


def metric_calculate(
    targets: Sequence[Sequence[str]],
    predictions: Sequence[str],
    metric_fn: Callable[[str, str], Any],
    normalize_fn: Callable[[str], str] = lambda v: v):
    """Aggregate target-prediction pair metrics over a dataset."""
    assert len(targets) == len(predictions)
    total = 0
    scores = []
    for prediction, target in zip(predictions, targets):
        p = normalize_fn(prediction)
        score = max(metric_fn(normalize_fn(t), p) for t in target)
        scores.append(score)
        total += score
    score = (100.0 * total) / len(targets)
    return score, scores

def doc_evaluate(
    metric: str,
    targets: Sequence[Sequence[str]],
    predictions: Sequence[str]):
    """Calculates evaluation metrics.

    Args:
    metrcs: metric names
    targets: list of list of strings.
    predictions: list of strings.

    Returns:
    dictionary with metric names as keys and metric value as values.
    """
    results = {}

    assert metric in ['ExactAccuracy', 'RelaxedAccuracy', 'ANLS', 'ContainAccuracy', 
                        'CIDEr', 'BLEU1', 'BLEU2', 'BLEU3', 'BLEU4', 'RougeL', 'Meteor',
                        'IOU@0.5']
    if metric=='ExactAccuracy': # case sensitive
        score, scores = metric_calculate(targets, predictions, metric_fn=exact_match)
    elif metric=='IOU@0.5': 
        score, scores = metric_calculate(targets, predictions, metric_fn=iou_match)
    elif metric == 'ANLS':
        score, scores = metric_calculate(targets, predictions, metric_fn=anls_metric, normalize_fn=lambda v: v.lower())
    elif metric == 'RelaxedAccuracy':
        score, scores = metric_calculate(targets, predictions, metric_fn=relaxed_correctness)
    elif metric == 'ContainAccuracy':
        score, scores = metric_calculate(targets, predictions, metric_fn=contain_match, normalize_fn=remove_special_chars_and_lower)
    elif metric == 'CIDEr':
        score, scores = cider(targets, predictions)
    elif metric == 'BLEU1':
        score, scores = bleu(1, targets, predictions)
    elif metric == 'BLEU2':
        score, scores = bleu(2, targets, predictions)
    elif metric == 'BLEU3':
        score, scores = bleu(3, targets, predictions)
    elif metric == 'BLEU4':
        score, scores = bleu(4, targets, predictions)
    elif metric == 'RougeL':
        score, scores = rouge(targets, predictions)
    elif metric == 'Meteor':
        score, scores = meteor(targets, predictions)
    return score, scores