run_eval.py 6.48 KB
Newer Older
1
2
#!/usr/bin/env python

3
import argparse
4
import datetime
5
import json
6
7
8
import time
import warnings
from logging import getLogger
9
from pathlib import Path
10
from typing import Dict, List
11
12
13
14
15

import torch
from tqdm import tqdm

from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
16
from utils import calculate_bleu, calculate_rouge, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
17
18


19
20
logger = getLogger(__name__)

21
22
23
24
25
26
27
28
29
30

DEFAULT_DEVICE = "cuda" if torch.cuda.is_available() else "cpu"


def chunks(lst, n):
    """Yield successive n-sized chunks from lst."""
    for i in range(0, len(lst), n):
        yield lst[i : i + n]


31
def generate_summaries_or_translations(
32
    examples: List[str],
33
34
35
36
37
    out_file: str,
    model_name: str,
    batch_size: int = 8,
    device: str = DEFAULT_DEVICE,
    fp16=False,
38
    task="summarization",
39
    prefix=None,
40
41
42
    **generate_kwargs,
) -> Dict:
    """Save model.generate results to <out_file>, and return how long it took."""
43
    fout = Path(out_file).open("w", encoding="utf-8")
44
    model_name = str(model_name)
45
46
47
48
49
    model = AutoModelForSeq2SeqLM.from_pretrained(model_name).to(device)
    if fp16:
        model = model.half()

    tokenizer = AutoTokenizer.from_pretrained(model_name)
50
    logger.info(f"Inferred tokenizer type: {tokenizer.__class__}")  # if this is wrong, check config.model_type.
51

52
53
    start_time = time.time()
    # update config with task specific params
54
    use_task_specific_params(model, task)
55
56
    if prefix is None:
        prefix = prefix or getattr(model.config, "prefix", "") or ""
57
    for examples_chunk in tqdm(list(chunks(examples, batch_size))):
58
        examples_chunk = [prefix + text for text in examples_chunk]
59
        batch = tokenizer(examples_chunk, return_tensors="pt", truncation=True, padding="longest").to(device)
60
        summaries = model.generate(
61
62
63
            input_ids=batch.input_ids,
            attention_mask=batch.attention_mask,
            **generate_kwargs,
64
        )
65
66
67
68
        dec = tokenizer.batch_decode(summaries, skip_special_tokens=True, clean_up_tokenization_spaces=False)
        for hypothesis in dec:
            fout.write(hypothesis + "\n")
            fout.flush()
69
    fout.close()
70
    runtime = int(time.time() - start_time)  # seconds
71
72
    n_obs = len(examples)
    return dict(n_obs=n_obs, runtime=runtime, seconds_per_sample=round(runtime / n_obs, 4))
73
74


75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
def datetime_now():
    return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")


def run_generate(verbose=True):
    """

    Takes input text, generates output, and then using reference calculates the BLEU scores.

    The results are saved to a file and returned to the caller, and printed out unless ``verbose=False`` is passed.

    Args:
        verbose (:obj:`bool`, `optional`, defaults to :obj:`True`): print results to stdout

    Returns:
        a tuple: ``(scores, params}``
        - ``scores``: a dict of scores data ``{'bleu': 39.6501, 'n_obs': 2000, 'runtime': 186, 'seconds_per_sample': 0.093}``
        - ``params``: a dict of custom params, e.g. ``{'num_beams': 5, 'length_penalty': 0.8}``
    """

95
96
    parser = argparse.ArgumentParser()
    parser.add_argument("model_name", type=str, help="like facebook/bart-large-cnn,t5-base, etc.")
97
98
    parser.add_argument("input_path", type=str, help="like cnn_dm/test.source")
    parser.add_argument("save_path", type=str, help="where to save summaries")
99
100
    parser.add_argument("--reference_path", type=str, required=False, help="like cnn_dm/test.target")
    parser.add_argument("--score_path", type=str, required=False, default="metrics.json", help="where to save metrics")
101
    parser.add_argument("--device", type=str, required=False, default=DEFAULT_DEVICE, help="cuda, cuda:1, cpu etc.")
102
103
104
    parser.add_argument(
        "--prefix", type=str, required=False, default=None, help="will be added to the begininng of src examples"
    )
105
    parser.add_argument("--task", type=str, default="summarization", help="used for task_specific_params + metrics")
106
    parser.add_argument("--bs", type=int, default=8, required=False, help="batch size")
107
108
109
    parser.add_argument(
        "--n_obs", type=int, default=-1, required=False, help="How many observations. Defaults to all."
    )
110
    parser.add_argument("--fp16", action="store_true")
111
112
113
114
115
116
117
118
    parser.add_argument("--dump-args", action="store_true", help="print the custom hparams with the results")
    parser.add_argument(
        "--info",
        nargs="?",
        type=str,
        const=datetime_now(),
        help="use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g. lang=en-ru. If no value is passed, the current datetime string will be used.",
    )
119
120
    # Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
    args, rest = parser.parse_known_args()
121
122
123
    parsed_args = parse_numeric_n_bool_cl_kwargs(rest)
    if parsed_args and verbose:
        print(f"parsed the following generate kwargs: {parsed_args}")
124
    examples = [" " + x.rstrip() if "t5" in args.model_name else x.rstrip() for x in open(args.input_path).readlines()]
125
126
    if args.n_obs > 0:
        examples = examples[: args.n_obs]
127
    Path(args.save_path).parent.mkdir(exist_ok=True)
128
129
130
    if args.reference_path is None and Path(args.score_path).exists():
        warnings.warn(f"score_path {args.score_path} will be overwritten unless you type ctrl-c.")
    runtime_metrics = generate_summaries_or_translations(
131
132
133
134
135
136
137
        examples,
        args.save_path,
        args.model_name,
        batch_size=args.bs,
        device=args.device,
        fp16=args.fp16,
        task=args.task,
138
        prefix=args.prefix,
139
        **parsed_args,
140
    )
141

142
    if args.reference_path is None:
143
144
        return {}

145
    # Compute scores
146
    score_fn = calculate_bleu if "translation" in args.task else calculate_rouge
147
148
149
    output_lns = [x.rstrip() for x in open(args.save_path).readlines()]
    reference_lns = [x.rstrip() for x in open(args.reference_path).readlines()][: len(output_lns)]
    scores: dict = score_fn(output_lns, reference_lns)
150
    scores.update(runtime_metrics)
151
152
153
154
155
156
157

    if args.dump_args:
        scores.update(parsed_args)
    if args.info:
        scores["info"] = args.info

    if verbose:
158
        print(scores)
159

160
    if args.score_path is not None:
161
162
163
        path = args.score_path
        json.dump(scores, open(path, "w"))

164
    return scores
165
166
167


if __name__ == "__main__":
168
169
    # Usage for MT:
    # python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json  --task translation $@
170
    run_generate(verbose=True)