run_openai_gpt.py 12.9 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
# coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HugginFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" OpenAI GPT model fine-tuning script.
    Adapted from https://github.com/huggingface/pytorch-openai-transformer-lm/blob/master/train.py
    It self adapted from https://github.com/openai/finetune-transformer-lm/blob/master/train.py

Ben Johnson's avatar
Ben Johnson committed
20
21
22
23
24
25
26
27
28
    This script with default values fine-tunes and evaluate a pretrained OpenAI GPT on the RocStories dataset:
        python run_openai_gpt.py \
          --model_name openai-gpt \
          --do_train \
          --do_eval \
          --train_dataset $ROC_STORIES_DIR/cloze_test_val__spring2016\ -\ cloze_test_ALL_val.csv \
          --eval_dataset $ROC_STORIES_DIR/cloze_test_test__spring2016\ -\ cloze_test_ALL_test.csv \
          --output_dir ../log \
          --train_batch_size 16 \
thomwolf's avatar
thomwolf committed
29
30
31
32
33
34
35
36
37
38
39
40
41
"""
import argparse
import os
import csv
import random
import logging
from tqdm import tqdm, trange

import numpy as np
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
                              TensorDataset)

thomwolf's avatar
thomwolf committed
42
43
44
from pytorch_pretrained_bert import OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, OpenAIAdam, cached_path

ROCSTORIES_URL = "https://s3.amazonaws.com/datasets.huggingface.co/ROCStories.tar.gz"
thomwolf's avatar
thomwolf committed
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64

logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
                    datefmt = '%m/%d/%Y %H:%M:%S',
                    level = logging.INFO)
logger = logging.getLogger(__name__)

def accuracy(out, labels):
    outputs = np.argmax(out, axis=1)
    return np.sum(outputs == labels)

def load_rocstories_dataset(dataset_path):
    """ Output a list of tuples(story, 1st continuation, 2nd continuation, label) """
    with open(dataset_path, encoding='utf_8') as f:
        f = csv.reader(f)
        output = []
        next(f) # skip the first line
        for line in tqdm(f):
            output.append((' '.join(line[1:5]), line[5], line[6], int(line[-1])-1))
    return output

thomwolf's avatar
thomwolf committed
65
66
67
68
69
def pre_process_datasets(encoded_datasets, input_len, cap_length, start_token, delimiter_token, clf_token):
    """ Pre-process datasets containing lists of tuples(story, 1st continuation, 2nd continuation, label)

        To Transformer inputs of shape (n_batch, n_alternative, length) comprising for each batch, continuation:
        input_ids[batch, alternative, :] = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:cap_length] + [clf_token]
thomwolf's avatar
thomwolf committed
70
71
72
73
    """
    tensor_datasets = []
    for dataset in encoded_datasets:
        n_batch = len(dataset)
thomwolf's avatar
thomwolf committed
74
        input_ids = np.zeros((n_batch, 2, input_len), dtype=np.int64)
thomwolf's avatar
thomwolf committed
75
        mc_token_ids = np.zeros((n_batch, 2), dtype=np.int64)
thomwolf's avatar
thomwolf committed
76
        lm_labels = np.full((n_batch, 2, input_len), fill_value=-1, dtype=np.int64)
thomwolf's avatar
thomwolf committed
77
        mc_labels = np.zeros((n_batch,), dtype=np.int64)
thomwolf's avatar
thomwolf committed
78
        for i, (story, cont1, cont2, mc_label), in enumerate(dataset):
thomwolf's avatar
thomwolf committed
79
80
            with_cont1 = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:cap_length] + [clf_token]
            with_cont2 = [start_token] + story[:cap_length] + [delimiter_token] + cont2[:cap_length] + [clf_token]
thomwolf's avatar
thomwolf committed
81
82
            input_ids[i, 0, :len(with_cont1)] = with_cont1
            input_ids[i, 1, :len(with_cont2)] = with_cont2
thomwolf's avatar
thomwolf committed
83
84
            mc_token_ids[i, 0] = len(with_cont1) - 1
            mc_token_ids[i, 1] = len(with_cont2) - 1
thomwolf's avatar
thomwolf committed
85
86
87
            lm_labels[i, 0, :len(with_cont1)-1] = with_cont1[1:]
            lm_labels[i, 1, :len(with_cont2)-1] = with_cont2[1:]
            mc_labels[i] = mc_label
thomwolf's avatar
thomwolf committed
88
        all_inputs = (input_ids, mc_token_ids, lm_labels, mc_labels)
thomwolf's avatar
thomwolf committed
89
90
91
92
93
94
95
        tensor_datasets.append(tuple(torch.tensor(t) for t in all_inputs))
    return tensor_datasets

def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model_name', type=str, default='openai-gpt',
                        help='pretrained model name')
thomwolf's avatar
thomwolf committed
96
97
98
99
    parser.add_argument("--do_train", action='store_true', help="Whether to run training.")
    parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.")
    parser.add_argument("--output_dir", default=None, type=str, required=True,
                        help="The output directory where the model predictions and checkpoints will be written.")
thomwolf's avatar
thomwolf committed
100
101
    parser.add_argument('--train_dataset', type=str, default='')
    parser.add_argument('--eval_dataset', type=str, default='')
thomwolf's avatar
thomwolf committed
102
103
104
105
106
107
108
109
110
    parser.add_argument('--seed', type=int, default=42)
    parser.add_argument('--num_train_epochs', type=int, default=3)
    parser.add_argument('--train_batch_size', type=int, default=8)
    parser.add_argument('--eval_batch_size', type=int, default=16)
    parser.add_argument('--max_grad_norm', type=int, default=1)
    parser.add_argument('--learning_rate', type=float, default=6.25e-5)
    parser.add_argument('--warmup_proportion', type=float, default=0.002)
    parser.add_argument('--lr_schedule', type=str, default='warmup_linear')
    parser.add_argument('--weight_decay', type=float, default=0.01)
thomwolf's avatar
thomwolf committed
111
    parser.add_argument('--lm_coef', type=float, default=0.9)
thomwolf's avatar
thomwolf committed
112
    parser.add_argument('--n_valid', type=int, default=374)
thomwolf's avatar
thomwolf committed
113

thomwolf's avatar
thomwolf committed
114
115
    parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.")
    parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.")
thomwolf's avatar
thomwolf committed
116
117
118
    args = parser.parse_args()
    print(args)

thomwolf's avatar
thomwolf committed
119
120
121
122
123
124
    if args.server_ip and args.server_port:
        # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
        import ptvsd
        print("Waiting for debugger attach")
        ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
        ptvsd.wait_for_attach()
thomwolf's avatar
thomwolf committed
125

thomwolf's avatar
thomwolf committed
126
127
128
129
130
131
132
133
134
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    n_gpu = torch.cuda.device_count()
    logger.info("device: {}, n_gpu {}".format(device, n_gpu))

thomwolf's avatar
thomwolf committed
135
136
137
138
139
140
    if not args.do_train and not args.do_eval:
        raise ValueError("At least one of `do_train` or `do_eval` must be True.")

    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

thomwolf's avatar
thomwolf committed
141
142
143
144
145
146
147
    # Load tokenizer and model
    # This loading functions also add new tokens and embeddings called `special tokens`
    # These new embeddings will be fine-tuned on the RocStories dataset
    special_tokens = ['_start_', '_delimiter_', '_classify_']
    tokenizer = OpenAIGPTTokenizer.from_pretrained(args.model_name, special_tokens=special_tokens)
    special_tokens_ids = list(tokenizer.convert_tokens_to_ids(token) for token in special_tokens)
    model = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name, num_special_tokens=len(special_tokens))
thomwolf's avatar
thomwolf committed
148
    model.to(device)
thomwolf's avatar
thomwolf committed
149
150

    # Load and encode the datasets
thomwolf's avatar
thomwolf committed
151
152
    if not args.train_dataset and not args.eval_dataset:
        roc_stories = cached_path(ROCSTORIES_URL)
thomwolf's avatar
thomwolf committed
153
154
155
156
157
158
159
    def tokenize_and_encode(obj):
        """ Tokenize and encode a nested object """
        if isinstance(obj, str):
            return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(obj))
        elif isinstance(obj, int):
            return obj
        return list(tokenize_and_encode(o) for o in obj)
thomwolf's avatar
thomwolf committed
160
161
    logger.info("Encoding dataset...")
    train_dataset = load_rocstories_dataset(args.train_dataset)
thomwolf's avatar
thomwolf committed
162
163
164
    eval_dataset = load_rocstories_dataset(args.eval_dataset)
    datasets = (train_dataset, eval_dataset)
    encoded_datasets = tokenize_and_encode(datasets)
thomwolf's avatar
thomwolf committed
165

Catalin Voss's avatar
Catalin Voss committed
166
    # Compute the max input length for the Transformer
thomwolf's avatar
thomwolf committed
167
168
    max_length = model.config.n_positions // 2 - 2
    input_length = max(len(story[:max_length]) + max(len(cont1[:max_length]), len(cont2[:max_length])) + 3  \
thomwolf's avatar
thomwolf committed
169
                           for dataset in encoded_datasets for story, cont1, cont2, _ in dataset)
thomwolf's avatar
thomwolf committed
170
    input_length = min(input_length, model.config.n_positions)  # Max size of input for the pre-trained model
thomwolf's avatar
thomwolf committed
171
172

    # Prepare inputs tensors and dataloaders
thomwolf's avatar
thomwolf committed
173
    tensor_datasets = pre_process_datasets(encoded_datasets, input_length, max_length, *special_tokens_ids)
thomwolf's avatar
thomwolf committed
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
    train_tensor_dataset, eval_tensor_dataset = tensor_datasets[0], tensor_datasets[1]

    train_data = TensorDataset(*train_tensor_dataset)
    train_sampler = RandomSampler(train_data)
    train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=args.train_batch_size)

    eval_data = TensorDataset(*eval_tensor_dataset)
    eval_sampler = SequentialSampler(eval_data)
    eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)

    # Prepare optimizer
    param_optimizer = list(model.named_parameters())
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [
        {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.01},
        {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
        ]
thomwolf's avatar
thomwolf committed
191
    num_train_optimization_steps = len(train_data) * args.num_train_epochs // args.train_batch_size
thomwolf's avatar
thomwolf committed
192
193
194
195
196
197
198
199
    optimizer = OpenAIAdam(optimizer_grouped_parameters,
                           lr=args.learning_rate,
                           warmup=args.warmup_proportion,
                           max_grad_norm=args.max_grad_norm,
                           weight_decay=args.weight_decay,
                           t_total=num_train_optimization_steps)

    if args.do_train:
thomwolf's avatar
thomwolf committed
200
        nb_tr_steps, tr_loss, exp_average_loss = 0, 0, None
thomwolf's avatar
thomwolf committed
201
202
203
        model.train()
        for _ in trange(int(args.num_train_epochs), desc="Epoch"):
            tr_loss = 0
thomwolf's avatar
thomwolf committed
204
            nb_tr_steps = 0
thomwolf's avatar
thomwolf committed
205
206
            tqdm_bar = tqdm(train_dataloader, desc="Training")
            for step, batch in enumerate(tqdm_bar):
thomwolf's avatar
thomwolf committed
207
                batch = tuple(t.to(device) for t in batch)
thomwolf's avatar
thomwolf committed
208
209
                input_ids, mc_token_ids, lm_labels, mc_labels = batch
                losses = model(input_ids, mc_token_ids, lm_labels, mc_labels)
thomwolf's avatar
thomwolf committed
210
211
212
213
                loss = args.lm_coef * losses[0] + losses[1]
                loss.backward()
                optimizer.step()
                tr_loss += loss.item()
thomwolf's avatar
thomwolf committed
214
                exp_average_loss = loss.item() if exp_average_loss is None else 0.7*exp_average_loss+0.3*loss.item()
thomwolf's avatar
thomwolf committed
215
                nb_tr_steps += 1
thomwolf's avatar
thomwolf committed
216
                tqdm_bar.desc = "Training loss: {:.2e} lr: {:.2e}".format(exp_average_loss, optimizer.get_lr()[0])
thomwolf's avatar
thomwolf committed
217
218
219

    # Save a trained model
    if args.do_train:
thomwolf's avatar
thomwolf committed
220
221
222
        model_to_save = model.module if hasattr(model, 'module') else model  # Only save the model it-self
        output_model_file = os.path.join(args.output_dir, "pytorch_model.bin")
        config = model.config
thomwolf's avatar
thomwolf committed
223
224
        torch.save(model_to_save.state_dict(), output_model_file)

thomwolf's avatar
thomwolf committed
225
226
227
228
229
        # Load a trained model that you have fine-tuned
        model_state_dict = torch.load(output_model_file)
        model = OpenAIGPTDoubleHeadsModel(config)
        model.load_state_dict(model_state_dict)
        model.to(device)
thomwolf's avatar
thomwolf committed
230
231
232
233
234
235
236

    if args.do_eval:
        model.eval()
        eval_loss, eval_accuracy = 0, 0
        nb_eval_steps, nb_eval_examples = 0, 0
        for batch in tqdm(eval_dataloader, desc="Evaluating"):
            batch = tuple(t.to(device) for t in batch)
thomwolf's avatar
thomwolf committed
237
            input_ids, mc_token_ids, lm_labels, mc_labels = batch
thomwolf's avatar
thomwolf committed
238
            with torch.no_grad():
thomwolf's avatar
thomwolf committed
239
240
                _, mc_loss = model(input_ids, mc_token_ids, lm_labels, mc_labels)
                _, mc_logits = model(input_ids, mc_token_ids)
thomwolf's avatar
thomwolf committed
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267

            mc_logits = mc_logits.detach().cpu().numpy()
            mc_labels = mc_labels.to('cpu').numpy()
            tmp_eval_accuracy = accuracy(mc_logits, mc_labels)

            eval_loss += mc_loss.mean().item()
            eval_accuracy += tmp_eval_accuracy

            nb_eval_examples += input_ids.size(0)
            nb_eval_steps += 1

        eval_loss = eval_loss / nb_eval_steps
        eval_accuracy = eval_accuracy / nb_eval_examples
        train_loss = tr_loss/nb_tr_steps if args.do_train else None
        result = {'eval_loss': eval_loss,
                  'eval_accuracy': eval_accuracy,
                  'train_loss': train_loss}

        output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
        with open(output_eval_file, "w") as writer:
            logger.info("***** Eval results *****")
            for key in sorted(result.keys()):
                logger.info("  %s = %s", key, str(result[key]))
                writer.write("%s = %s\n" % (key, str(result[key])))

if __name__ == '__main__':
    main()