run_eval.py 6.33 KB
Newer Older
1
2
#!/usr/bin/env python

3
import argparse
4
import datetime
5
import json
6
7
8
import time
import warnings
from logging import getLogger
9
from pathlib import Path
10
from typing import Dict, List
11
12
13
14
15

import torch
from tqdm import tqdm

from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
16
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
17
18


19
20
logger = getLogger(__name__)

21
22
23
24

DEFAULT_DEVICE = "cuda" if torch.cuda.is_available() else "cpu"


25
def generate_summaries_or_translations(
26
    examples: List[str],
27
28
29
30
31
    out_file: str,
    model_name: str,
    batch_size: int = 8,
    device: str = DEFAULT_DEVICE,
    fp16=False,
32
    task="summarization",
33
    prefix=None,
34
35
36
    **generate_kwargs,
) -> Dict:
    """Save model.generate results to <out_file>, and return how long it took."""
37
    fout = Path(out_file).open("w", encoding="utf-8")
38
    model_name = str(model_name)
39
40
41
42
43
    model = AutoModelForSeq2SeqLM.from_pretrained(model_name).to(device)
    if fp16:
        model = model.half()

    tokenizer = AutoTokenizer.from_pretrained(model_name)
44
    logger.info(f"Inferred tokenizer type: {tokenizer.__class__}")  # if this is wrong, check config.model_type.
45

46
47
    start_time = time.time()
    # update config with task specific params
48
    use_task_specific_params(model, task)
49
50
    if prefix is None:
        prefix = prefix or getattr(model.config, "prefix", "") or ""
51
    for examples_chunk in tqdm(list(chunks(examples, batch_size))):
52
        examples_chunk = [prefix + text for text in examples_chunk]
53
        batch = tokenizer(examples_chunk, return_tensors="pt", truncation=True, padding="longest").to(device)
54
        summaries = model.generate(
55
56
57
            input_ids=batch.input_ids,
            attention_mask=batch.attention_mask,
            **generate_kwargs,
58
        )
59
60
61
62
        dec = tokenizer.batch_decode(summaries, skip_special_tokens=True, clean_up_tokenization_spaces=False)
        for hypothesis in dec:
            fout.write(hypothesis + "\n")
            fout.flush()
63
    fout.close()
64
    runtime = int(time.time() - start_time)  # seconds
65
66
    n_obs = len(examples)
    return dict(n_obs=n_obs, runtime=runtime, seconds_per_sample=round(runtime / n_obs, 4))
67
68


69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
def datetime_now():
    return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")


def run_generate(verbose=True):
    """

    Takes input text, generates output, and then using reference calculates the BLEU scores.

    The results are saved to a file and returned to the caller, and printed out unless ``verbose=False`` is passed.

    Args:
        verbose (:obj:`bool`, `optional`, defaults to :obj:`True`): print results to stdout

    Returns:
        a tuple: ``(scores, params}``
        - ``scores``: a dict of scores data ``{'bleu': 39.6501, 'n_obs': 2000, 'runtime': 186, 'seconds_per_sample': 0.093}``
        - ``params``: a dict of custom params, e.g. ``{'num_beams': 5, 'length_penalty': 0.8}``
    """

89
90
    parser = argparse.ArgumentParser()
    parser.add_argument("model_name", type=str, help="like facebook/bart-large-cnn,t5-base, etc.")
91
92
    parser.add_argument("input_path", type=str, help="like cnn_dm/test.source")
    parser.add_argument("save_path", type=str, help="where to save summaries")
93
94
    parser.add_argument("--reference_path", type=str, required=False, help="like cnn_dm/test.target")
    parser.add_argument("--score_path", type=str, required=False, default="metrics.json", help="where to save metrics")
95
    parser.add_argument("--device", type=str, required=False, default=DEFAULT_DEVICE, help="cuda, cuda:1, cpu etc.")
96
97
98
    parser.add_argument(
        "--prefix", type=str, required=False, default=None, help="will be added to the begininng of src examples"
    )
99
    parser.add_argument("--task", type=str, default="summarization", help="used for task_specific_params + metrics")
100
    parser.add_argument("--bs", type=int, default=8, required=False, help="batch size")
101
102
103
    parser.add_argument(
        "--n_obs", type=int, default=-1, required=False, help="How many observations. Defaults to all."
    )
104
    parser.add_argument("--fp16", action="store_true")
105
106
107
108
109
110
111
112
    parser.add_argument("--dump-args", action="store_true", help="print the custom hparams with the results")
    parser.add_argument(
        "--info",
        nargs="?",
        type=str,
        const=datetime_now(),
        help="use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g. lang=en-ru. If no value is passed, the current datetime string will be used.",
    )
113
114
    # Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
    args, rest = parser.parse_known_args()
115
116
117
    parsed_args = parse_numeric_n_bool_cl_kwargs(rest)
    if parsed_args and verbose:
        print(f"parsed the following generate kwargs: {parsed_args}")
118
    examples = [" " + x.rstrip() if "t5" in args.model_name else x.rstrip() for x in open(args.input_path).readlines()]
119
120
    if args.n_obs > 0:
        examples = examples[: args.n_obs]
121
    Path(args.save_path).parent.mkdir(exist_ok=True)
122
123
124
    if args.reference_path is None and Path(args.score_path).exists():
        warnings.warn(f"score_path {args.score_path} will be overwritten unless you type ctrl-c.")
    runtime_metrics = generate_summaries_or_translations(
125
126
127
128
129
130
131
        examples,
        args.save_path,
        args.model_name,
        batch_size=args.bs,
        device=args.device,
        fp16=args.fp16,
        task=args.task,
132
        prefix=args.prefix,
133
        **parsed_args,
134
    )
135

136
    if args.reference_path is None:
137
138
        return {}

139
    # Compute scores
140
    score_fn = calculate_bleu if "translation" in args.task else calculate_rouge
141
142
143
    output_lns = [x.rstrip() for x in open(args.save_path).readlines()]
    reference_lns = [x.rstrip() for x in open(args.reference_path).readlines()][: len(output_lns)]
    scores: dict = score_fn(output_lns, reference_lns)
144
    scores.update(runtime_metrics)
145
146
147
148
149
150
151

    if args.dump_args:
        scores.update(parsed_args)
    if args.info:
        scores["info"] = args.info

    if verbose:
152
        print(scores)
153

154
    if args.score_path is not None:
155
        json.dump(scores, open(args.score_path, "w"))
156

157
    return scores
158
159
160


if __name__ == "__main__":
161
162
    # Usage for MT:
    # python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json  --task translation $@
163
    run_generate(verbose=True)