run_summarization.py 9.86 KB
Newer Older
Rémi Louf's avatar
Rémi Louf committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
import argparse
from collections import namedtuple
import logging
import os
import sys

import torch
from torch.utils.data import DataLoader, SequentialSampler
from tqdm import tqdm

from transformers import BertTokenizer

from modeling_bertabs import BertAbs, build_predictor

from utils_summarization import (
    SummarizationDataset,
    encode_for_summarization,
    build_mask,
    fit_to_block_size,
    compute_token_type_ids,
)

logger = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout, level=logging.INFO)


Batch = namedtuple(
    "Batch", ["document_names", "batch_size", "src", "segs", "mask_src", "tgt_str"]
)


def evaluate(args):
    tokenizer = BertTokenizer.from_pretrained("bert-base-uncased", do_lower_case=True)
34
35
36
    model = BertAbs.from_pretrained("bertabs-finetuned-cnndm")
    model.to(args.device)
    model.eval()
Rémi Louf's avatar
Rémi Louf committed
37
38
39
40
41
42
43

    symbols = {
        "BOS": tokenizer.vocab["[unused0]"],
        "EOS": tokenizer.vocab["[unused1]"],
        "PAD": tokenizer.vocab["[PAD]"],
    }

Rémi Louf's avatar
Rémi Louf committed
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
    if args.compute_rouge:
        reference_summaries = []
        generated_summaries = []

        import rouge
        import nltk
        nltk.download('punkt')
        rouge_evaluator = rouge.Rouge(
            metrics=['rouge-n', 'rouge-l'],
            max_n=2,
            limit_length=True,
            length_limit=args.beam_size,
            length_limit_type='words',
            apply_avg=True,
            apply_best=False,
            alpha=0.5,  # Default F1_score
            weight_factor=1.2,
            stemming=True,
        )

Rémi Louf's avatar
Rémi Louf committed
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
    # these (unused) arguments are defined to keep the compatibility
    # with the legacy code and will be deleted in a next iteration.
    args.result_path = ""
    args.temp_dir = ""

    data_iterator = build_data_iterator(args, tokenizer)
    predictor = build_predictor(args, tokenizer, symbols, model)

    logger.info("***** Running evaluation *****")
    logger.info("  Number examples = %d", len(data_iterator.dataset))
    logger.info("  Batch size = %d", args.batch_size)
    logger.info("")
    logger.info("***** Beam Search parameters *****")
    logger.info("  Beam size = %d", args.beam_size)
    logger.info("  Minimum length = %d", args.min_length)
    logger.info("  Maximum length = %d", args.max_length)
    logger.info("  Alpha (length penalty) = %.2f", args.alpha)
    logger.info("  Trigrams %s be blocked", ("will" if args.block_trigram else "will NOT"))

    for batch in tqdm(data_iterator):
        batch_data = predictor.translate_batch(batch)
        translations = predictor.from_batch(batch_data)
        summaries = [format_summary(t) for t in translations]
        save_summaries(summaries, args.summaries_output_dir, batch.document_names)

Rémi Louf's avatar
Rémi Louf committed
89
90
91
92
93
94
95
96
97
98
        if args.compute_rouge:
            reference_summaries += batch.tgt_str
            generated_summaries += summaries

    if args.compute_rouge:
        scores = rouge_evaluator.get_scores(generated_summaries, reference_summaries)
        str_scores = format_rouge_scores(scores)
        save_rouge_scores(str_scores)
        print(str_scores)

Rémi Louf's avatar
Rémi Louf committed
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118

def format_summary(translation):
    """ Transforms the output of the `from_batch` function
    into nicely formatted summaries.
    """
    raw_summary, _, _ = translation
    summary = (
        raw_summary.replace("[unused0]", "")
        .replace("[unused3]", "")
        .replace("[PAD]", "")
        .replace("[unused1]", "")
        .replace(r" +", " ")
        .replace(" [unused2] ", ". ")
        .replace("[unused2]", "")
        .strip()
    )

    return summary


Rémi Louf's avatar
Rémi Louf committed
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
def format_rouge_scores(scores):
    return """\n
****** ROUGE SCORES ******

** ROUGE 1
F1        >> {:.3f}
Precision >> {:.3f}
Recall    >> {:.3f}

** ROUGE 2
F1        >> {:.3f}
Precision >> {:.3f}
Recall    >> {:.3f}

** ROUGE L
F1        >> {:.3f}
Precision >> {:.3f}
Recall    >> {:.3f}""".format(
        scores['rouge-1']['f'],
        scores['rouge-1']['p'],
        scores['rouge-1']['r'],
        scores['rouge-2']['f'],
        scores['rouge-2']['p'],
        scores['rouge-2']['r'],
        scores['rouge-l']['f'],
        scores['rouge-l']['p'],
        scores['rouge-l']['r'],
    )


def save_rouge_scores(str_scores):
    with open("rouge_scores.txt", "w") as output:
        output.write(str_scores)


Rémi Louf's avatar
Rémi Louf committed
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
def save_summaries(summaries, path, original_document_name):
    """ Write the summaries in fies that are prefixed by the original
    files' name with the `_summary` appended.

    Attributes:
        original_document_names: List[string]
            Name of the document that was summarized.
        path: string
            Path were the summaries will be written
        summaries: List[string]
            The summaries that we produced.
    """
    for summary, document_name in zip(summaries, original_document_name):
        # Prepare the summary file's name
        if "." in document_name:
            bare_document_name = ".".join(document_name.split(".")[:-1])
            extension = document_name.split(".")[-1]
            name = bare_document_name + "_summary." + extension
        else:
            name = document_name + "_summary"

        file_path = os.path.join(path, name)
        with open(file_path, "w") as output:
            output.write(summary)


#
# LOAD the dataset
#


def build_data_iterator(args, tokenizer):
    dataset = load_and_cache_examples(args, tokenizer)
    sampler = SequentialSampler(dataset)
    collate_fn = lambda data: collate(data, tokenizer, block_size=512)
    iterator = DataLoader(
        dataset, sampler=sampler, batch_size=args.batch_size, collate_fn=collate_fn,
    )

    return iterator


def load_and_cache_examples(args, tokenizer):
    dataset = SummarizationDataset(args.documents_dir)
    return dataset


def collate(data, tokenizer, block_size):
    """ Collate formats the data passed to the data loader.

    In particular we tokenize the data batch after batch to avoid keeping them
    all in memory. We output the data as a namedtuple to fit the original BertAbs's
    API.
    """
    data = [x for x in data if not len(x[1]) == 0]  # remove empty_files
    names = [name for name, _, _ in data]
Rémi Louf's avatar
Rémi Louf committed
210
    summaries = [" ".join(summary_list) for _, _, summary_list in data]
Rémi Louf's avatar
Rémi Louf committed
211
212
213
214

    encoded_text = [
        encode_for_summarization(story, summary, tokenizer) for _, story, summary in data
    ]
Rémi Louf's avatar
Rémi Louf committed
215
    encoded_stories = torch.tensor(
Rémi Louf's avatar
Rémi Louf committed
216
217
218
219
220
        [
            fit_to_block_size(story, block_size, tokenizer.pad_token_id)
            for story, _ in encoded_text
        ]
    )
Rémi Louf's avatar
Rémi Louf committed
221
222
    encoder_token_type_ids = compute_token_type_ids(encoded_stories, tokenizer.cls_token_id)
    encoder_mask = build_mask(encoded_stories, tokenizer.pad_token_id)
Rémi Louf's avatar
Rémi Louf committed
223
224
225

    batch = Batch(
        document_names=names,
Rémi Louf's avatar
Rémi Louf committed
226
227
        batch_size=len(encoded_stories),
        src=encoded_stories,
Rémi Louf's avatar
Rémi Louf committed
228
229
        segs=encoder_token_type_ids,
        mask_src=encoder_mask,
Rémi Louf's avatar
Rémi Louf committed
230
        tgt_str=summaries,
Rémi Louf's avatar
Rémi Louf committed
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
    )

    return batch


def decode_summary(summary_tokens, tokenizer):
    """ Decode the summary and return it in a format
    suitable for evaluation.
    """
    summary_tokens = summary_tokens.to("cpu").numpy()
    summary = tokenizer.decode(summary_tokens)
    sentences = summary.split(".")
    sentences = [s + "." for s in sentences]
    return sentences


def main():
    """ The main function defines the interface with the users.
    """
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--documents_dir",
        default=None,
        type=str,
        required=True,
        help="The folder where the documents to summarize are located.",
    )
    parser.add_argument(
        "--summaries_output_dir",
        default=None,
        type=str,
262
263
        required=False,
        help="The folder in wich the summaries should be written. Defaults to the folder where the documents are",
Rémi Louf's avatar
Rémi Louf committed
264
    )
Rémi Louf's avatar
Rémi Louf committed
265
266
267
268
269
270
271
    parser.add_argument(
        "--compute_rouge",
        default=False,
        type=bool,
        required=False,
        help="Compute the ROUGE metrics during evaluation. Only available for the CNN/DailyMail dataset.",
    )
Rémi Louf's avatar
Rémi Louf committed
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
    # EVALUATION options
    parser.add_argument(
        "--visible_gpus",
        default=-1,
        type=int,
        help="Number of GPUs with which to do the training.",
    )
    parser.add_argument(
        "--batch_size", default=4, type=int, help="Batch size per GPU/CPU for training.",
    )
    # BEAM SEARCH arguments
    parser.add_argument(
        "--min_length",
        default=50,
        type=int,
        help="Minimum number of tokens for the summaries.",
    )
    parser.add_argument(
        "--max_length",
        default=200,
        type=int,
        help="Maixmum number of tokens for the summaries.",
    )
    parser.add_argument(
        "--beam_size",
        default=5,
        type=int,
        help="The number of beams to start with for each example.",
    )
    parser.add_argument(
        "--alpha",
        default=0.95,
        type=float,
        help="The value of alpha for the length penalty in the beam search.",
    )
    parser.add_argument(
        "--block_trigram",
        default=True,
        type=bool,
        help="Whether to block the existence of repeating trigrams in the text generated by beam search.",
    )
    args = parser.parse_args()
    args.device = torch.device("cpu") if args.visible_gpus == -1 else torch.device("cuda")

316
317
318
    if not args.summaries_output_dir:
        args.summaries_output_dir = args.documents_dir

Rémi Louf's avatar
Rémi Louf committed
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
    if not documents_dir_is_valid(args.documents_dir):
        raise FileNotFoundError(
            "We could not find the directory you specified for the documents to summarize, or it was empty. Please specify a valid path."
        )
    maybe_create_output_dir(args.summaries_output_dir)

    evaluate(args)


def documents_dir_is_valid(path):
    if not os.path.exists(path):
        return False

    file_list = os.listdir(path)
    if len(file_list) == 0:
        return False

    return True


def maybe_create_output_dir(path):
    if not os.path.exists(path):
        os.makedirs(path)


if __name__ == "__main__":
    main()