run_openai_gpt.py 14 KB
Newer Older
1
#!/usr/bin/env python
thomwolf's avatar
thomwolf committed
2
# coding=utf-8
thomwolf's avatar
thomwolf committed
3
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
thomwolf's avatar
thomwolf committed
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" OpenAI GPT model fine-tuning script.
    Adapted from https://github.com/huggingface/pytorch-openai-transformer-lm/blob/master/train.py
    It self adapted from https://github.com/openai/finetune-transformer-lm/blob/master/train.py

Ben Johnson's avatar
Ben Johnson committed
21
22
23
24
25
    This script with default values fine-tunes and evaluate a pretrained OpenAI GPT on the RocStories dataset:
        python run_openai_gpt.py \
          --model_name openai-gpt \
          --do_train \
          --do_eval \
26
27
          --train_dataset "$ROC_STORIES_DIR/cloze_test_val__spring2016 - cloze_test_ALL_val.csv" \
          --eval_dataset "$ROC_STORIES_DIR/cloze_test_test__spring2016 - cloze_test_ALL_test.csv" \
Ben Johnson's avatar
Ben Johnson committed
28
29
          --output_dir ../log \
          --train_batch_size 16 \
thomwolf's avatar
thomwolf committed
30
31
32
33
"""
import argparse
import csv
import logging
Aymeric Augustin's avatar
Aymeric Augustin committed
34
35
import os
import random
thomwolf's avatar
thomwolf committed
36
37
38

import numpy as np
import torch
39
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
Aymeric Augustin's avatar
Aymeric Augustin committed
40
from tqdm import tqdm, trange
41
42

from transformers import (
Aymeric Augustin's avatar
Aymeric Augustin committed
43
44
45
    CONFIG_NAME,
    WEIGHTS_NAME,
    AdamW,
46
47
48
49
    OpenAIGPTDoubleHeadsModel,
    OpenAIGPTTokenizer,
    get_linear_schedule_with_warmup,
)
thomwolf's avatar
thomwolf committed
50

Aymeric Augustin's avatar
Aymeric Augustin committed
51

52
logging.basicConfig(
53
    format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
54
)
thomwolf's avatar
thomwolf committed
55
56
logger = logging.getLogger(__name__)

57

thomwolf's avatar
thomwolf committed
58
59
60
61
def accuracy(out, labels):
    outputs = np.argmax(out, axis=1)
    return np.sum(outputs == labels)

62

thomwolf's avatar
thomwolf committed
63
def load_rocstories_dataset(dataset_path):
Patrick von Platen's avatar
Patrick von Platen committed
64
    """Output a list of tuples(story, 1st continuation, 2nd continuation, label)"""
65
    with open(dataset_path, encoding="utf_8") as f:
thomwolf's avatar
thomwolf committed
66
67
        f = csv.reader(f)
        output = []
68
        next(f)  # skip the first line
thomwolf's avatar
thomwolf committed
69
        for line in tqdm(f):
70
            output.append((" ".join(line[1:5]), line[5], line[6], int(line[-1]) - 1))
thomwolf's avatar
thomwolf committed
71
72
    return output

73

thomwolf's avatar
thomwolf committed
74
def pre_process_datasets(encoded_datasets, input_len, cap_length, start_token, delimiter_token, clf_token):
Lysandre's avatar
Lysandre committed
75
    """Pre-process datasets containing lists of tuples(story, 1st continuation, 2nd continuation, label)
thomwolf's avatar
thomwolf committed
76

Lysandre's avatar
Lysandre committed
77
78
    To Transformer inputs of shape (n_batch, n_alternative, length) comprising for each batch, continuation:
    input_ids[batch, alternative, :] = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:cap_length] + [clf_token]
thomwolf's avatar
thomwolf committed
79
80
81
82
    """
    tensor_datasets = []
    for dataset in encoded_datasets:
        n_batch = len(dataset)
thomwolf's avatar
thomwolf committed
83
        input_ids = np.zeros((n_batch, 2, input_len), dtype=np.int64)
thomwolf's avatar
thomwolf committed
84
        mc_token_ids = np.zeros((n_batch, 2), dtype=np.int64)
Lysandre's avatar
Lysandre committed
85
        lm_labels = np.full((n_batch, 2, input_len), fill_value=-100, dtype=np.int64)
thomwolf's avatar
thomwolf committed
86
        mc_labels = np.zeros((n_batch,), dtype=np.int64)
Lysandre's avatar
Lysandre committed
87
88
89
90
        for (
            i,
            (story, cont1, cont2, mc_label),
        ) in enumerate(dataset):
thomwolf's avatar
thomwolf committed
91
92
            with_cont1 = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:cap_length] + [clf_token]
            with_cont2 = [start_token] + story[:cap_length] + [delimiter_token] + cont2[:cap_length] + [clf_token]
93
94
            input_ids[i, 0, : len(with_cont1)] = with_cont1
            input_ids[i, 1, : len(with_cont2)] = with_cont2
thomwolf's avatar
thomwolf committed
95
96
            mc_token_ids[i, 0] = len(with_cont1) - 1
            mc_token_ids[i, 1] = len(with_cont2) - 1
97
98
            lm_labels[i, 0, : len(with_cont1)] = with_cont1
            lm_labels[i, 1, : len(with_cont2)] = with_cont2
thomwolf's avatar
thomwolf committed
99
            mc_labels[i] = mc_label
thomwolf's avatar
thomwolf committed
100
        all_inputs = (input_ids, mc_token_ids, lm_labels, mc_labels)
thomwolf's avatar
thomwolf committed
101
102
103
        tensor_datasets.append(tuple(torch.tensor(t) for t in all_inputs))
    return tensor_datasets

104

thomwolf's avatar
thomwolf committed
105
106
def main():
    parser = argparse.ArgumentParser()
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
    parser.add_argument("--model_name", type=str, default="openai-gpt", help="pretrained model name")
    parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
    parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.")
    parser.add_argument(
        "--output_dir",
        default=None,
        type=str,
        required=True,
        help="The output directory where the model predictions and checkpoints will be written.",
    )
    parser.add_argument("--train_dataset", type=str, default="")
    parser.add_argument("--eval_dataset", type=str, default="")
    parser.add_argument("--seed", type=int, default=42)
    parser.add_argument("--num_train_epochs", type=int, default=3)
    parser.add_argument("--train_batch_size", type=int, default=8)
    parser.add_argument("--eval_batch_size", type=int, default=16)
    parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
    parser.add_argument("--max_grad_norm", type=int, default=1)
    parser.add_argument(
        "--max_steps",
        default=-1,
        type=int,
Sylvain Gugger's avatar
Sylvain Gugger committed
129
130
131
        help=(
            "If > 0: set total number of training                         steps to perform. Override num_train_epochs."
        ),
132
133
134
135
136
    )
    parser.add_argument(
        "--gradient_accumulation_steps",
        type=int,
        default=1,
Sylvain Gugger's avatar
Sylvain Gugger committed
137
        help="Number of updates steps to accumulate before                        performing a backward/update pass.",
138
139
140
141
142
143
144
145
146
147
    )
    parser.add_argument("--learning_rate", type=float, default=6.25e-5)
    parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
    parser.add_argument("--lr_schedule", type=str, default="warmup_linear")
    parser.add_argument("--weight_decay", type=float, default=0.01)
    parser.add_argument("--lm_coef", type=float, default=0.9)
    parser.add_argument("--n_valid", type=int, default=374)

    parser.add_argument("--server_ip", type=str, default="", help="Can be used for distant debugging.")
    parser.add_argument("--server_port", type=str, default="", help="Can be used for distant debugging.")
thomwolf's avatar
thomwolf committed
148
149
150
    args = parser.parse_args()
    print(args)

thomwolf's avatar
thomwolf committed
151
152
153
    if args.server_ip and args.server_port:
        # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
        import ptvsd
154

thomwolf's avatar
thomwolf committed
155
156
157
        print("Waiting for debugger attach")
        ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
        ptvsd.wait_for_attach()
thomwolf's avatar
thomwolf committed
158

thomwolf's avatar
thomwolf committed
159
160
161
162
163
164
165
166
167
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    n_gpu = torch.cuda.device_count()
    logger.info("device: {}, n_gpu {}".format(device, n_gpu))

thomwolf's avatar
thomwolf committed
168
169
170
171
172
173
    if not args.do_train and not args.do_eval:
        raise ValueError("At least one of `do_train` or `do_eval` must be True.")

    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

thomwolf's avatar
thomwolf committed
174
175
176
    # Load tokenizer and model
    # This loading functions also add new tokens and embeddings called `special tokens`
    # These new embeddings will be fine-tuned on the RocStories dataset
177
    special_tokens = ["_start_", "_delimiter_", "_classify_"]
thomwolf's avatar
thomwolf committed
178
179
180
181
182
    tokenizer = OpenAIGPTTokenizer.from_pretrained(args.model_name)
    tokenizer.add_tokens(special_tokens)
    special_tokens_ids = tokenizer.convert_tokens_to_ids(special_tokens)
    model = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name)
    model.resize_token_embeddings(len(tokenizer))
thomwolf's avatar
thomwolf committed
183
    model.to(device)
thomwolf's avatar
thomwolf committed
184
185

    # Load and encode the datasets
thomwolf's avatar
thomwolf committed
186
    def tokenize_and_encode(obj):
Patrick von Platen's avatar
Patrick von Platen committed
187
        """Tokenize and encode a nested object"""
thomwolf's avatar
thomwolf committed
188
189
190
191
        if isinstance(obj, str):
            return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(obj))
        elif isinstance(obj, int):
            return obj
192
        return [tokenize_and_encode(o) for o in obj]
193

thomwolf's avatar
thomwolf committed
194
195
    logger.info("Encoding dataset...")
    train_dataset = load_rocstories_dataset(args.train_dataset)
thomwolf's avatar
thomwolf committed
196
197
198
    eval_dataset = load_rocstories_dataset(args.eval_dataset)
    datasets = (train_dataset, eval_dataset)
    encoded_datasets = tokenize_and_encode(datasets)
thomwolf's avatar
thomwolf committed
199

Catalin Voss's avatar
Catalin Voss committed
200
    # Compute the max input length for the Transformer
thomwolf's avatar
thomwolf committed
201
    max_length = model.config.n_positions // 2 - 2
202
203
204
205
206
    input_length = max(
        len(story[:max_length]) + max(len(cont1[:max_length]), len(cont2[:max_length])) + 3
        for dataset in encoded_datasets
        for story, cont1, cont2, _ in dataset
    )
thomwolf's avatar
thomwolf committed
207
    input_length = min(input_length, model.config.n_positions)  # Max size of input for the pre-trained model
thomwolf's avatar
thomwolf committed
208
209

    # Prepare inputs tensors and dataloaders
thomwolf's avatar
thomwolf committed
210
    tensor_datasets = pre_process_datasets(encoded_datasets, input_length, max_length, *special_tokens_ids)
thomwolf's avatar
thomwolf committed
211
212
213
214
215
216
217
218
219
220
221
    train_tensor_dataset, eval_tensor_dataset = tensor_datasets[0], tensor_datasets[1]

    train_data = TensorDataset(*train_tensor_dataset)
    train_sampler = RandomSampler(train_data)
    train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)

    eval_data = TensorDataset(*eval_tensor_dataset)
    eval_sampler = SequentialSampler(eval_data)
    eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)

    # Prepare optimizer
222
    if args.do_train:
223
224
        if args.max_steps > 0:
            t_total = args.max_steps
225
            args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
226
        else:
227
            t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
228

229
        param_optimizer = list(model.named_parameters())
230
        no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
231
        optimizer_grouped_parameters = [
232
233
234
235
236
237
            {
                "params": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
                "weight_decay": args.weight_decay,
            },
            {"params": [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
        ]
238
        optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
239
240
241
        scheduler = get_linear_schedule_with_warmup(
            optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
        )
thomwolf's avatar
thomwolf committed
242
243

    if args.do_train:
thomwolf's avatar
thomwolf committed
244
        nb_tr_steps, tr_loss, exp_average_loss = 0, 0, None
thomwolf's avatar
thomwolf committed
245
246
247
        model.train()
        for _ in trange(int(args.num_train_epochs), desc="Epoch"):
            tr_loss = 0
thomwolf's avatar
thomwolf committed
248
            nb_tr_steps = 0
thomwolf's avatar
thomwolf committed
249
250
            tqdm_bar = tqdm(train_dataloader, desc="Training")
            for step, batch in enumerate(tqdm_bar):
thomwolf's avatar
thomwolf committed
251
                batch = tuple(t.to(device) for t in batch)
thomwolf's avatar
thomwolf committed
252
                input_ids, mc_token_ids, lm_labels, mc_labels = batch
253
                losses = model(input_ids, mc_token_ids=mc_token_ids, lm_labels=lm_labels, mc_labels=mc_labels)
thomwolf's avatar
thomwolf committed
254
255
256
                loss = args.lm_coef * losses[0] + losses[1]
                loss.backward()
                optimizer.step()
257
                scheduler.step()
258
                optimizer.zero_grad()
thomwolf's avatar
thomwolf committed
259
                tr_loss += loss.item()
260
261
262
                exp_average_loss = (
                    loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
                )
thomwolf's avatar
thomwolf committed
263
                nb_tr_steps += 1
264
                tqdm_bar.desc = "Training loss: {:.2e} lr: {:.2e}".format(exp_average_loss, scheduler.get_lr()[0])
thomwolf's avatar
thomwolf committed
265
266
267

    # Save a trained model
    if args.do_train:
268
        # Save a trained model, configuration and tokenizer
269
        model_to_save = model.module if hasattr(model, "module") else model  # Only save the model itself
270
271
272
273
274

        # If we save using the predefined names, we can load using `from_pretrained`
        output_model_file = os.path.join(args.output_dir, WEIGHTS_NAME)
        output_config_file = os.path.join(args.output_dir, CONFIG_NAME)

thomwolf's avatar
thomwolf committed
275
        torch.save(model_to_save.state_dict(), output_model_file)
276
277
        model_to_save.config.to_json_file(output_config_file)
        tokenizer.save_vocabulary(args.output_dir)
thomwolf's avatar
thomwolf committed
278

279
280
281
        # Load a trained model and vocabulary that you have fine-tuned
        model = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir)
        tokenizer = OpenAIGPTTokenizer.from_pretrained(args.output_dir)
thomwolf's avatar
thomwolf committed
282
        model.to(device)
thomwolf's avatar
thomwolf committed
283
284
285
286
287
288
289

    if args.do_eval:
        model.eval()
        eval_loss, eval_accuracy = 0, 0
        nb_eval_steps, nb_eval_examples = 0, 0
        for batch in tqdm(eval_dataloader, desc="Evaluating"):
            batch = tuple(t.to(device) for t in batch)
thomwolf's avatar
thomwolf committed
290
            input_ids, mc_token_ids, lm_labels, mc_labels = batch
thomwolf's avatar
thomwolf committed
291
            with torch.no_grad():
292
293
294
                _, mc_loss, _, mc_logits = model(
                    input_ids, mc_token_ids=mc_token_ids, lm_labels=lm_labels, mc_labels=mc_labels
                )
thomwolf's avatar
thomwolf committed
295
296

            mc_logits = mc_logits.detach().cpu().numpy()
297
            mc_labels = mc_labels.to("cpu").numpy()
thomwolf's avatar
thomwolf committed
298
299
300
301
302
303
304
305
306
307
            tmp_eval_accuracy = accuracy(mc_logits, mc_labels)

            eval_loss += mc_loss.mean().item()
            eval_accuracy += tmp_eval_accuracy

            nb_eval_examples += input_ids.size(0)
            nb_eval_steps += 1

        eval_loss = eval_loss / nb_eval_steps
        eval_accuracy = eval_accuracy / nb_eval_examples
308
309
        train_loss = tr_loss / nb_tr_steps if args.do_train else None
        result = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy, "train_loss": train_loss}
thomwolf's avatar
thomwolf committed
310
311
312
313
314
315
316
317

        output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
        with open(output_eval_file, "w") as writer:
            logger.info("***** Eval results *****")
            for key in sorted(result.keys()):
                logger.info("  %s = %s", key, str(result[key]))
                writer.write("%s = %s\n" % (key, str(result[key])))

318
319

if __name__ == "__main__":
thomwolf's avatar
thomwolf committed
320
    main()