run_openai_gpt.py 13.9 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
# coding=utf-8
thomwolf's avatar
thomwolf committed
2
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
thomwolf's avatar
thomwolf committed
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" OpenAI GPT model fine-tuning script.
    Adapted from https://github.com/huggingface/pytorch-openai-transformer-lm/blob/master/train.py
    It self adapted from https://github.com/openai/finetune-transformer-lm/blob/master/train.py

Ben Johnson's avatar
Ben Johnson committed
20
21
22
23
24
    This script with default values fine-tunes and evaluate a pretrained OpenAI GPT on the RocStories dataset:
        python run_openai_gpt.py \
          --model_name openai-gpt \
          --do_train \
          --do_eval \
25
26
          --train_dataset "$ROC_STORIES_DIR/cloze_test_val__spring2016 - cloze_test_ALL_val.csv" \
          --eval_dataset "$ROC_STORIES_DIR/cloze_test_test__spring2016 - cloze_test_ALL_test.csv" \
Ben Johnson's avatar
Ben Johnson committed
27
28
          --output_dir ../log \
          --train_batch_size 16 \
thomwolf's avatar
thomwolf committed
29
30
31
32
"""
import argparse
import csv
import logging
Aymeric Augustin's avatar
Aymeric Augustin committed
33
34
import os
import random
thomwolf's avatar
thomwolf committed
35
36
37

import numpy as np
import torch
38
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
Aymeric Augustin's avatar
Aymeric Augustin committed
39
from tqdm import tqdm, trange
40
41

from transformers import (
Aymeric Augustin's avatar
Aymeric Augustin committed
42
43
44
    CONFIG_NAME,
    WEIGHTS_NAME,
    AdamW,
45
46
47
48
    OpenAIGPTDoubleHeadsModel,
    OpenAIGPTTokenizer,
    get_linear_schedule_with_warmup,
)
thomwolf's avatar
thomwolf committed
49

Aymeric Augustin's avatar
Aymeric Augustin committed
50

51
52
53
logging.basicConfig(
    format="%(asctime)s - %(levelname)s - %(name)s -   %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
thomwolf's avatar
thomwolf committed
54
55
logger = logging.getLogger(__name__)

56

thomwolf's avatar
thomwolf committed
57
58
59
60
def accuracy(out, labels):
    outputs = np.argmax(out, axis=1)
    return np.sum(outputs == labels)

61

thomwolf's avatar
thomwolf committed
62
63
def load_rocstories_dataset(dataset_path):
    """ Output a list of tuples(story, 1st continuation, 2nd continuation, label) """
64
    with open(dataset_path, encoding="utf_8") as f:
thomwolf's avatar
thomwolf committed
65
66
        f = csv.reader(f)
        output = []
67
        next(f)  # skip the first line
thomwolf's avatar
thomwolf committed
68
        for line in tqdm(f):
69
            output.append((" ".join(line[1:5]), line[5], line[6], int(line[-1]) - 1))
thomwolf's avatar
thomwolf committed
70
71
    return output

72

thomwolf's avatar
thomwolf committed
73
def pre_process_datasets(encoded_datasets, input_len, cap_length, start_token, delimiter_token, clf_token):
Lysandre's avatar
Lysandre committed
74
    """Pre-process datasets containing lists of tuples(story, 1st continuation, 2nd continuation, label)
thomwolf's avatar
thomwolf committed
75

Lysandre's avatar
Lysandre committed
76
77
    To Transformer inputs of shape (n_batch, n_alternative, length) comprising for each batch, continuation:
    input_ids[batch, alternative, :] = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:cap_length] + [clf_token]
thomwolf's avatar
thomwolf committed
78
79
80
81
    """
    tensor_datasets = []
    for dataset in encoded_datasets:
        n_batch = len(dataset)
thomwolf's avatar
thomwolf committed
82
        input_ids = np.zeros((n_batch, 2, input_len), dtype=np.int64)
thomwolf's avatar
thomwolf committed
83
        mc_token_ids = np.zeros((n_batch, 2), dtype=np.int64)
Lysandre's avatar
Lysandre committed
84
        lm_labels = np.full((n_batch, 2, input_len), fill_value=-100, dtype=np.int64)
thomwolf's avatar
thomwolf committed
85
        mc_labels = np.zeros((n_batch,), dtype=np.int64)
Lysandre's avatar
Lysandre committed
86
87
88
89
        for (
            i,
            (story, cont1, cont2, mc_label),
        ) in enumerate(dataset):
thomwolf's avatar
thomwolf committed
90
91
            with_cont1 = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:cap_length] + [clf_token]
            with_cont2 = [start_token] + story[:cap_length] + [delimiter_token] + cont2[:cap_length] + [clf_token]
92
93
            input_ids[i, 0, : len(with_cont1)] = with_cont1
            input_ids[i, 1, : len(with_cont2)] = with_cont2
thomwolf's avatar
thomwolf committed
94
95
            mc_token_ids[i, 0] = len(with_cont1) - 1
            mc_token_ids[i, 1] = len(with_cont2) - 1
96
97
            lm_labels[i, 0, : len(with_cont1)] = with_cont1
            lm_labels[i, 1, : len(with_cont2)] = with_cont2
thomwolf's avatar
thomwolf committed
98
            mc_labels[i] = mc_label
thomwolf's avatar
thomwolf committed
99
        all_inputs = (input_ids, mc_token_ids, lm_labels, mc_labels)
thomwolf's avatar
thomwolf committed
100
101
102
        tensor_datasets.append(tuple(torch.tensor(t) for t in all_inputs))
    return tensor_datasets

103

thomwolf's avatar
thomwolf committed
104
105
def main():
    parser = argparse.ArgumentParser()
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
    parser.add_argument("--model_name", type=str, default="openai-gpt", help="pretrained model name")
    parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
    parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.")
    parser.add_argument(
        "--output_dir",
        default=None,
        type=str,
        required=True,
        help="The output directory where the model predictions and checkpoints will be written.",
    )
    parser.add_argument("--train_dataset", type=str, default="")
    parser.add_argument("--eval_dataset", type=str, default="")
    parser.add_argument("--seed", type=int, default=42)
    parser.add_argument("--num_train_epochs", type=int, default=3)
    parser.add_argument("--train_batch_size", type=int, default=8)
    parser.add_argument("--eval_batch_size", type=int, default=16)
    parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
    parser.add_argument("--max_grad_norm", type=int, default=1)
    parser.add_argument(
        "--max_steps",
        default=-1,
        type=int,
        help="If > 0: set total number of training \
                        steps to perform. Override num_train_epochs.",
    )
    parser.add_argument(
        "--gradient_accumulation_steps",
        type=int,
        default=1,
        help="Number of updates steps to accumulate before\
                        performing a backward/update pass.",
    )
    parser.add_argument("--learning_rate", type=float, default=6.25e-5)
    parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
    parser.add_argument("--lr_schedule", type=str, default="warmup_linear")
    parser.add_argument("--weight_decay", type=float, default=0.01)
    parser.add_argument("--lm_coef", type=float, default=0.9)
    parser.add_argument("--n_valid", type=int, default=374)

    parser.add_argument("--server_ip", type=str, default="", help="Can be used for distant debugging.")
    parser.add_argument("--server_port", type=str, default="", help="Can be used for distant debugging.")
thomwolf's avatar
thomwolf committed
147
148
149
    args = parser.parse_args()
    print(args)

thomwolf's avatar
thomwolf committed
150
151
152
    if args.server_ip and args.server_port:
        # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
        import ptvsd
153

thomwolf's avatar
thomwolf committed
154
155
156
        print("Waiting for debugger attach")
        ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
        ptvsd.wait_for_attach()
thomwolf's avatar
thomwolf committed
157

thomwolf's avatar
thomwolf committed
158
159
160
161
162
163
164
165
166
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    n_gpu = torch.cuda.device_count()
    logger.info("device: {}, n_gpu {}".format(device, n_gpu))

thomwolf's avatar
thomwolf committed
167
168
169
170
171
172
    if not args.do_train and not args.do_eval:
        raise ValueError("At least one of `do_train` or `do_eval` must be True.")

    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

thomwolf's avatar
thomwolf committed
173
174
175
    # Load tokenizer and model
    # This loading functions also add new tokens and embeddings called `special tokens`
    # These new embeddings will be fine-tuned on the RocStories dataset
176
    special_tokens = ["_start_", "_delimiter_", "_classify_"]
thomwolf's avatar
thomwolf committed
177
178
179
180
181
    tokenizer = OpenAIGPTTokenizer.from_pretrained(args.model_name)
    tokenizer.add_tokens(special_tokens)
    special_tokens_ids = tokenizer.convert_tokens_to_ids(special_tokens)
    model = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name)
    model.resize_token_embeddings(len(tokenizer))
thomwolf's avatar
thomwolf committed
182
    model.to(device)
thomwolf's avatar
thomwolf committed
183
184

    # Load and encode the datasets
thomwolf's avatar
thomwolf committed
185
186
187
188
189
190
191
    def tokenize_and_encode(obj):
        """ Tokenize and encode a nested object """
        if isinstance(obj, str):
            return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(obj))
        elif isinstance(obj, int):
            return obj
        return list(tokenize_and_encode(o) for o in obj)
192

thomwolf's avatar
thomwolf committed
193
194
    logger.info("Encoding dataset...")
    train_dataset = load_rocstories_dataset(args.train_dataset)
thomwolf's avatar
thomwolf committed
195
196
197
    eval_dataset = load_rocstories_dataset(args.eval_dataset)
    datasets = (train_dataset, eval_dataset)
    encoded_datasets = tokenize_and_encode(datasets)
thomwolf's avatar
thomwolf committed
198

Catalin Voss's avatar
Catalin Voss committed
199
    # Compute the max input length for the Transformer
thomwolf's avatar
thomwolf committed
200
    max_length = model.config.n_positions // 2 - 2
201
202
203
204
205
    input_length = max(
        len(story[:max_length]) + max(len(cont1[:max_length]), len(cont2[:max_length])) + 3
        for dataset in encoded_datasets
        for story, cont1, cont2, _ in dataset
    )
thomwolf's avatar
thomwolf committed
206
    input_length = min(input_length, model.config.n_positions)  # Max size of input for the pre-trained model
thomwolf's avatar
thomwolf committed
207
208

    # Prepare inputs tensors and dataloaders
thomwolf's avatar
thomwolf committed
209
    tensor_datasets = pre_process_datasets(encoded_datasets, input_length, max_length, *special_tokens_ids)
thomwolf's avatar
thomwolf committed
210
211
212
213
214
215
216
217
218
219
220
    train_tensor_dataset, eval_tensor_dataset = tensor_datasets[0], tensor_datasets[1]

    train_data = TensorDataset(*train_tensor_dataset)
    train_sampler = RandomSampler(train_data)
    train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)

    eval_data = TensorDataset(*eval_tensor_dataset)
    eval_sampler = SequentialSampler(eval_data)
    eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)

    # Prepare optimizer
221
    if args.do_train:
222
223
        if args.max_steps > 0:
            t_total = args.max_steps
224
            args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
225
        else:
226
            t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
227

228
        param_optimizer = list(model.named_parameters())
229
        no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
230
        optimizer_grouped_parameters = [
231
232
233
234
235
236
            {
                "params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
                "weight_decay": args.weight_decay,
            },
            {"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
        ]
237
        optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
238
239
240
        scheduler = get_linear_schedule_with_warmup(
            optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
        )
thomwolf's avatar
thomwolf committed
241
242

    if args.do_train:
thomwolf's avatar
thomwolf committed
243
        nb_tr_steps, tr_loss, exp_average_loss = 0, 0, None
thomwolf's avatar
thomwolf committed
244
245
246
        model.train()
        for _ in trange(int(args.num_train_epochs), desc="Epoch"):
            tr_loss = 0
thomwolf's avatar
thomwolf committed
247
            nb_tr_steps = 0
thomwolf's avatar
thomwolf committed
248
249
            tqdm_bar = tqdm(train_dataloader, desc="Training")
            for step, batch in enumerate(tqdm_bar):
thomwolf's avatar
thomwolf committed
250
                batch = tuple(t.to(device) for t in batch)
thomwolf's avatar
thomwolf committed
251
                input_ids, mc_token_ids, lm_labels, mc_labels = batch
252
                losses = model(input_ids, mc_token_ids=mc_token_ids, lm_labels=lm_labels, mc_labels=mc_labels)
thomwolf's avatar
thomwolf committed
253
254
255
                loss = args.lm_coef * losses[0] + losses[1]
                loss.backward()
                optimizer.step()
256
                scheduler.step()
257
                optimizer.zero_grad()
thomwolf's avatar
thomwolf committed
258
                tr_loss += loss.item()
259
260
261
                exp_average_loss = (
                    loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
                )
thomwolf's avatar
thomwolf committed
262
                nb_tr_steps += 1
263
                tqdm_bar.desc = "Training loss: {:.2e} lr: {:.2e}".format(exp_average_loss, scheduler.get_lr()[0])
thomwolf's avatar
thomwolf committed
264
265
266

    # Save a trained model
    if args.do_train:
267
        # Save a trained model, configuration and tokenizer
268
        model_to_save = model.module if hasattr(model, "module") else model  # Only save the model itself
269
270
271
272
273

        # If we save using the predefined names, we can load using `from_pretrained`
        output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)
        output_config_file = os.path.join(args.output_dir, CONFIG_NAME)

thomwolf's avatar
thomwolf committed
274
        torch.save(model_to_save.state_dict(), output_model_file)
275
276
        model_to_save.config.to_json_file(output_config_file)
        tokenizer.save_vocabulary(args.output_dir)
thomwolf's avatar
thomwolf committed
277

278
279
280
        # Load a trained model and vocabulary that you have fine-tuned
        model = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir)
        tokenizer = OpenAIGPTTokenizer.from_pretrained(args.output_dir)
thomwolf's avatar
thomwolf committed
281
        model.to(device)
thomwolf's avatar
thomwolf committed
282
283
284
285
286
287
288

    if args.do_eval:
        model.eval()
        eval_loss, eval_accuracy = 0, 0
        nb_eval_steps, nb_eval_examples = 0, 0
        for batch in tqdm(eval_dataloader, desc="Evaluating"):
            batch = tuple(t.to(device) for t in batch)
thomwolf's avatar
thomwolf committed
289
            input_ids, mc_token_ids, lm_labels, mc_labels = batch
thomwolf's avatar
thomwolf committed
290
            with torch.no_grad():
291
292
293
                _, mc_loss, _, mc_logits = model(
                    input_ids, mc_token_ids=mc_token_ids, lm_labels=lm_labels, mc_labels=mc_labels
                )
thomwolf's avatar
thomwolf committed
294
295

            mc_logits = mc_logits.detach().cpu().numpy()
296
            mc_labels = mc_labels.to("cpu").numpy()
thomwolf's avatar
thomwolf committed
297
298
299
300
301
302
303
304
305
306
            tmp_eval_accuracy = accuracy(mc_logits, mc_labels)

            eval_loss += mc_loss.mean().item()
            eval_accuracy += tmp_eval_accuracy

            nb_eval_examples += input_ids.size(0)
            nb_eval_steps += 1

        eval_loss = eval_loss / nb_eval_steps
        eval_accuracy = eval_accuracy / nb_eval_examples
307
308
        train_loss = tr_loss / nb_tr_steps if args.do_train else None
        result = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss}
thomwolf's avatar
thomwolf committed
309
310
311
312
313
314
315
316

        output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
        with open(output_eval_file, "w") as writer:
            logger.info("***** Eval results *****")
            for key in sorted(result.keys()):
                logger.info("  %s = %s", key, str(result[key]))
                writer.write("%s = %s\n" % (key, str(result[key])))

317
318

if __name__ == "__main__":
thomwolf's avatar
thomwolf committed
319
    main()