run_pplm.py 29.3 KB
Newer Older
Piero Molino's avatar
Piero Molino committed
1
#! /usr/bin/env python3
Julien Chaumond's avatar
Julien Chaumond committed
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
# coding=utf-8
# Copyright 2018 The Uber AI Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

"""
Example command with bag of words:
python examples/run_pplm.py -B space --cond_text "The president" --length 100 --gamma 1.5 --num_iterations 3 --num_samples 10 --stepsize 0.01 --window_length 5 --kl_scale 0.01 --gm_scale 0.95

Example command with discriminator:
22
python examples/run_pplm.py -D sentiment --class_label 3 --cond_text "The lake" --length 10 --gamma 1.0 --num_iterations 30 --num_samples 10 --stepsize 0.01 --kl_scale 0.01 --gm_scale 0.95
Julien Chaumond's avatar
Julien Chaumond committed
23
24
25
"""

import argparse
26
import json
Julien Chaumond's avatar
Julien Chaumond committed
27
28
29
30
31
32
33
34
35
from operator import add
from typing import List, Optional, Tuple, Union

import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from tqdm import trange

36
from examples.run_pplm_discrim_train import ClassificationHead
Julien Chaumond's avatar
Julien Chaumond committed
37
38
39
40
41
42
43
44
from transformers import GPT2Tokenizer
from transformers.file_utils import cached_path
from transformers.modeling_gpt2 import GPT2LMHeadModel

PPLM_BOW = 1
PPLM_DISCRIM = 2
PPLM_BOW_DISCRIM = 3
SMALL_CONST = 1e-15
45
BIG_CONST = 1e10
Julien Chaumond's avatar
Julien Chaumond committed
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66

BAG_OF_WORDS_ARCHIVE_MAP = {
    'kitchen': "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/kitchen.txt",
    'legal': "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/legal.txt",
    'military': "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/military.txt",
    'monsters': "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/monsters.txt",
    'politics': "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/politics.txt",
    'positive_words': "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/positive_words.txt",
    'religion': "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/religion.txt",
    'science': "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/science.txt",
    'space': "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/space.txt",
    'technology': "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/technology.txt",
}

DISCRIMINATOR_MODELS_PARAMS = {
    "clickbait": {
        "url": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/discriminators/clickbait_classifierhead.pt",
        "class_size": 2,
        "embed_size": 1024,
        "class_vocab": {"non_clickbait": 0, "clickbait": 1},
        "default_class": 1,
67
        "pretrained_model": "gpt2-medium",
Julien Chaumond's avatar
Julien Chaumond committed
68
69
    },
    "sentiment": {
Piero Molino's avatar
Piero Molino committed
70
        "url": "http://s.yosinski.com/SST_classifier_head.pt",
Julien Chaumond's avatar
Julien Chaumond committed
71
72
73
74
        "class_size": 5,
        "embed_size": 1024,
        "class_vocab": {"very_positive": 2, "very_negative": 3},
        "default_class": 3,
75
        "pretrained_model": "gpt2-medium",
Julien Chaumond's avatar
Julien Chaumond committed
76
77
78
79
80
81
82
    },
    "toxicity": {
        "url": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/discriminators/toxicity_classifierhead.pt",
        "class_size": 2,
        "embed_size": 1024,
        "class_vocab": {"non_toxic": 0, "toxic": 1},
        "default_class": 0,
83
        "pretrained_model": "gpt2-medium",
Julien Chaumond's avatar
Julien Chaumond committed
84
85
86
87
    },
}


88
89
def to_var(x, requires_grad=False, volatile=False, device='cuda'):
    if torch.cuda.is_available() and device == 'cuda':
Piero Molino's avatar
Piero Molino committed
90
        x = x.cuda()
91
92
    elif device != 'cuda':
        x = x.to(device)
Piero Molino's avatar
Piero Molino committed
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
    return Variable(x, requires_grad=requires_grad, volatile=volatile)


def top_k_filter(logits, k, probs=False):
    """
    Masks everything but the k top entries as -infinity (1e10).
    Used to mask logits such that e^-infinity -> 0 won't contribute to the
    sum of the denominator.
    """
    if k == 0:
        return logits
    else:
        values = torch.topk(logits, k)[0]
        batch_mins = values[:, -1].view(-1, 1).expand_as(logits)
        if probs:
            return torch.where(logits < batch_mins,
                               torch.ones_like(logits) * 0.0, logits)
110
111
        return torch.where(logits < batch_mins,
                           torch.ones_like(logits) * -BIG_CONST,
Piero Molino's avatar
Piero Molino committed
112
113
114
                           logits)


115
116
117
def perturb_past(
        past,
        model,
118
        last,
119
120
121
122
123
        unpert_past=None,
        unpert_logits=None,
        accumulated_hidden=None,
        grad_norms=None,
        stepsize=0.01,
124
        one_hot_bows_vectors=None,
125
        classifier=None,
126
        class_label=None,
127
128
129
        loss_type=0,
        num_iterations=3,
        horizon_length=1,
130
        window_length=0,
131
132
        decay=False,
        gamma=1.5,
133
134
        kl_scale=0.01,
        device='cuda',
135
):
Piero Molino's avatar
Piero Molino committed
136
    # Generate inital perturbed past
137
138
139
140
    grad_accumulator = [
        (np.zeros(p.shape).astype("float32"))
        for p in past
    ]
Julien Chaumond's avatar
Julien Chaumond committed
141
142
143
144

    if accumulated_hidden is None:
        accumulated_hidden = 0

145
    if decay:
146
147
148
149
150
        decay_mask = torch.arange(
            0.,
            1.0 + SMALL_CONST,
            1.0 / (window_length)
        )[1:]
Julien Chaumond's avatar
Julien Chaumond committed
151
152
153
    else:
        decay_mask = 1.0

154
    # TODO fix this comment (SUMANTH)
Piero Molino's avatar
Piero Molino committed
155
    # Generate a mask is gradient perturbated is based on a past window
156
    _, _, _, curr_length, _ = past[0].shape
Piero Molino's avatar
Piero Molino committed
157

158
159
160
161
162
163
    if curr_length > window_length and window_length > 0:
        ones_key_val_shape = (
                tuple(past[0].shape[:-2])
                + tuple([window_length])
                + tuple(past[0].shape[-1:])
        )
Piero Molino's avatar
Piero Molino committed
164

165
166
167
168
169
        zeros_key_val_shape = (
                tuple(past[0].shape[:-2])
                + tuple([curr_length - window_length])
                + tuple(past[0].shape[-1:])
        )
Julien Chaumond's avatar
Julien Chaumond committed
170
171
172
173
174

        ones_mask = torch.ones(ones_key_val_shape)
        ones_mask = decay_mask * ones_mask.permute(0, 1, 2, 4, 3)
        ones_mask = ones_mask.permute(0, 1, 2, 4, 3)

175
176
177
178
        window_mask = torch.cat(
            (ones_mask, torch.zeros(zeros_key_val_shape)),
            dim=-2
        ).to(device)
Julien Chaumond's avatar
Julien Chaumond committed
179
    else:
180
        window_mask = torch.ones_like(past[0]).to(device)
Julien Chaumond's avatar
Julien Chaumond committed
181

182
    # accumulate perturbations for num_iterations
Julien Chaumond's avatar
Julien Chaumond committed
183
    loss_per_iter = []
184
    new_accumulated_hidden = None
185
    for i in range(num_iterations):
Julien Chaumond's avatar
Julien Chaumond committed
186
        print("Iteration ", i + 1)
187
        curr_perturbation = [
188
            to_var(torch.from_numpy(p_), requires_grad=True, device=device)
189
190
191
192
193
194
195
            for p_ in grad_accumulator
        ]

        # Compute hidden using perturbed past
        perturbed_past = list(map(add, past, curr_perturbation))
        _, _, _, curr_length, _ = curr_perturbation[0].shape
        all_logits, _, all_hidden = model(last, past=perturbed_past)
Piero Molino's avatar
Piero Molino committed
196
        hidden = all_hidden[-1]
197
198
199
200
201
202
203
        new_accumulated_hidden = accumulated_hidden + torch.sum(
            hidden,
            dim=1
        ).detach()
        # TODO: Check the layer-norm consistency of this with trained discriminator (Sumanth)
        logits = all_logits[:, -1, :]
        probs = F.softmax(logits, dim=-1)
Piero Molino's avatar
Piero Molino committed
204
205
206

        loss = 0.0
        loss_list = []
207
208
209
210
211
212
        if loss_type == PPLM_BOW or loss_type == PPLM_BOW_DISCRIM:
            for one_hot_bow in one_hot_bows_vectors:
                bow_logits = torch.mm(probs, torch.t(one_hot_bow))
                bow_loss = -torch.log(torch.sum(bow_logits))
                loss += bow_loss
                loss_list.append(bow_loss)
Piero Molino's avatar
Piero Molino committed
213
214
            print(" pplm_bow_loss:", loss.data.cpu().numpy())

215
        if loss_type == 2 or loss_type == 3:
Julien Chaumond's avatar
Julien Chaumond committed
216
            ce_loss = torch.nn.CrossEntropyLoss()
217
218
219
220
221
222
223
224
            # TODO why we need to do this assignment and not just using unpert_past? (Sumanth)
            curr_unpert_past = unpert_past
            curr_probs = torch.unsqueeze(probs, dim=1)
            wte = model.resize_token_embeddings()
            for _ in range(horizon_length):
                inputs_embeds = torch.matmul(curr_probs, wte.weight.data)
                _, curr_unpert_past, curr_all_hidden = model(
                    past=curr_unpert_past,
Julien Chaumond's avatar
Julien Chaumond committed
225
226
                    inputs_embeds=inputs_embeds
                )
227
                curr_hidden = curr_all_hidden[-1]
Piero Molino's avatar
Piero Molino committed
228
                new_accumulated_hidden = new_accumulated_hidden + torch.sum(
229
                    curr_hidden, dim=1)
Julien Chaumond's avatar
Julien Chaumond committed
230

231
232
            prediction = classifier(new_accumulated_hidden /
                                    (curr_length + 1 + horizon_length))
Julien Chaumond's avatar
Julien Chaumond committed
233

234
            label = torch.tensor([class_label], device=device,
Piero Molino's avatar
Piero Molino committed
235
                                 dtype=torch.long)
236
            discrim_loss = ce_loss(prediction, label)
Julien Chaumond's avatar
Julien Chaumond committed
237
            print(" pplm_discrim_loss:", discrim_loss.data.cpu().numpy())
Piero Molino's avatar
Piero Molino committed
238
239
            loss += discrim_loss
            loss_list.append(discrim_loss)
Julien Chaumond's avatar
Julien Chaumond committed
240

Piero Molino's avatar
Piero Molino committed
241
242
        kl_loss = 0.0
        if kl_scale > 0.0:
243
244
245
246
247
            unpert_probs = F.softmax(unpert_logits[:, -1, :], dim=-1)
            unpert_probs = (
                    unpert_probs + SMALL_CONST *
                    (unpert_probs <= SMALL_CONST).float().to(device).detach()
            )
248
249
            correction = SMALL_CONST * (probs <= SMALL_CONST).float().to(
                device).detach()
250
            corrected_probs = probs + correction.detach()
Rosanne Liu's avatar
Rosanne Liu committed
251
            kl_loss = kl_scale * (
252
253
254
255
                (corrected_probs * (corrected_probs / unpert_probs).log()).sum()
            )
            print(' kl_loss', kl_loss.data.cpu().numpy())
            loss += kl_loss
Julien Chaumond's avatar
Julien Chaumond committed
256
257
258
259

        loss_per_iter.append(loss.data.cpu().numpy())
        print(' pplm_loss', (loss - kl_loss).data.cpu().numpy())

260
        # compute gradients
Rosanne Liu's avatar
Rosanne Liu committed
261
        loss.backward()
262
263
264

        # calculate gradient norms
        if grad_norms is not None and loss_type == PPLM_BOW:
Julien Chaumond's avatar
Julien Chaumond committed
265
266
            grad_norms = [
                torch.max(grad_norms[index], torch.norm(p_.grad * window_mask))
267
268
                for index, p_ in enumerate(curr_perturbation)
            ]
Julien Chaumond's avatar
Julien Chaumond committed
269
        else:
270
271
272
273
            grad_norms = [
                (torch.norm(p_.grad * window_mask) + SMALL_CONST)
                for index, p_ in enumerate(curr_perturbation)
            ]
Julien Chaumond's avatar
Julien Chaumond committed
274

275
        # normalize gradients
Julien Chaumond's avatar
Julien Chaumond committed
276
        grad = [
277
            -stepsize *
278
279
            (p_.grad * window_mask / grad_norms[
                index] ** gamma).data.cpu().numpy()
280
281
            for index, p_ in enumerate(curr_perturbation)
        ]
Julien Chaumond's avatar
Julien Chaumond committed
282

283
284
285
286
287
        # accumulate gradient
        grad_accumulator = list(map(add, grad, grad_accumulator))

        # reset gradients, just to make sure
        for p_ in curr_perturbation:
Julien Chaumond's avatar
Julien Chaumond committed
288
289
            p_.grad.data.zero_()

290
        # removing past from the graph
Julien Chaumond's avatar
Julien Chaumond committed
291
        new_past = []
292
293
        for p_ in past:
            new_past.append(p_.detach())
Julien Chaumond's avatar
Julien Chaumond committed
294
295
        past = new_past

296
297
    # apply the accumulated perturbations to the past
    grad_accumulator = [
298
        to_var(torch.from_numpy(p_), requires_grad=True, device=device)
299
300
301
        for p_ in grad_accumulator
    ]
    pert_past = list(map(add, past, grad_accumulator))
Julien Chaumond's avatar
Julien Chaumond committed
302

303
    return pert_past, new_accumulated_hidden, grad_norms, loss_per_iter
Julien Chaumond's avatar
Julien Chaumond committed
304
305
306


def get_classifier(
307
        name: Optional[str], class_label: Union[str, int],
308
        device: str
Julien Chaumond's avatar
Julien Chaumond committed
309
310
311
312
313
314
315
316
317
) -> Tuple[Optional[ClassificationHead], Optional[int]]:
    if name is None:
        return None, None

    params = DISCRIMINATOR_MODELS_PARAMS[name]
    classifier = ClassificationHead(
        class_size=params['class_size'],
        embed_size=params['embed_size']
    ).to(device)
318
319
    if "url" in params:
        resolved_archive_file = cached_path(params["url"])
320
    elif "path" in params:
321
        resolved_archive_file = params["path"]
322
323
324
    else:
        raise ValueError("Either url or path have to be specified "
                         "in the discriminator model parameters")
Piero Molino's avatar
Piero Molino committed
325
326
    classifier.load_state_dict(
        torch.load(resolved_archive_file, map_location=device))
Julien Chaumond's avatar
Julien Chaumond committed
327
328
    classifier.eval()

329
330
331
    if isinstance(class_label, str):
        if class_label in params["class_vocab"]:
            label_id = params["class_vocab"][class_label]
Julien Chaumond's avatar
Julien Chaumond committed
332
333
        else:
            label_id = params["default_class"]
334
            print("class_label {} not in class_vocab".format(class_label))
Julien Chaumond's avatar
Julien Chaumond committed
335
336
337
            print("available values are: {}".format(params["class_vocab"]))
            print("using default class {}".format(label_id))

338
339
340
    elif isinstance(class_label, int):
        if class_label in set(params["class_vocab"].values()):
            label_id = class_label
Julien Chaumond's avatar
Julien Chaumond committed
341
342
        else:
            label_id = params["default_class"]
343
            print("class_label {} not in class_vocab".format(class_label))
Julien Chaumond's avatar
Julien Chaumond committed
344
345
346
347
348
349
350
351
352
            print("available values are: {}".format(params["class_vocab"]))
            print("using default class {}".format(label_id))

    else:
        label_id = params["default_class"]

    return classifier, label_id


353
def get_bag_of_words_indices(bag_of_words_ids_or_paths: List[str], tokenizer) -> \
354
        List[List[List[int]]]:
Julien Chaumond's avatar
Julien Chaumond committed
355
356
357
358
359
360
361
    bow_indices = []
    for id_or_path in bag_of_words_ids_or_paths:
        if id_or_path in BAG_OF_WORDS_ARCHIVE_MAP:
            filepath = cached_path(BAG_OF_WORDS_ARCHIVE_MAP[id_or_path])
        else:
            filepath = id_or_path
        with open(filepath, "r") as f:
Piero Molino's avatar
Piero Molino committed
362
363
            words = f.read().strip().split("\n")
        bow_indices.append(
364
            [tokenizer.encode(word.strip(), add_prefix_space=True) for word in
Piero Molino's avatar
Piero Molino committed
365
             words])
Julien Chaumond's avatar
Julien Chaumond committed
366
367
368
    return bow_indices


369
def build_bows_one_hot_vectors(bow_indices, tokenizer, device='cuda'):
Julien Chaumond's avatar
Julien Chaumond committed
370
371
372
373
374
375
    if bow_indices is None:
        return None

    one_hot_bows_vectors = []
    for single_bow in bow_indices:
        single_bow = list(filter(lambda x: len(x) <= 1, single_bow))
376
        single_bow = torch.tensor(single_bow).to(device)
Julien Chaumond's avatar
Julien Chaumond committed
377
        num_words = single_bow.shape[0]
378
        one_hot_bow = torch.zeros(num_words, tokenizer.vocab_size).to(device)
Julien Chaumond's avatar
Julien Chaumond committed
379
380
381
382
383
        one_hot_bow.scatter_(1, single_bow, 1)
        one_hot_bows_vectors.append(one_hot_bow)
    return one_hot_bows_vectors


384
def full_text_generation(
385
        model,
386
        tokenizer,
387
388
389
        context=None,
        num_samples=1,
        device="cuda",
390
        bag_of_words=None,
391
        discrim=None,
392
        class_label=None,
393
394
395
396
        length=100,
        stepsize=0.02,
        temperature=1.0,
        top_k=10,
397
398
399
        sample=False,
        num_iterations=3,
        grad_length=10000,
400
        horizon_length=1,
401
        window_length=0,
402
403
        decay=False,
        gamma=1.5,
404
405
        gm_scale=0.9,
        kl_scale=0.01,
406
407
        **kwargs
):
Julien Chaumond's avatar
Julien Chaumond committed
408
    classifier, class_id = get_classifier(
409
        discrim,
410
        class_label,
Julien Chaumond's avatar
Julien Chaumond committed
411
412
413
        device
    )

414
415
    bow_indices = []
    if bag_of_words:
416
417
        bow_indices = get_bag_of_words_indices(bag_of_words.split(";"),
                                               tokenizer)
Piero Molino's avatar
Piero Molino committed
418

419
    if bag_of_words and classifier:
Julien Chaumond's avatar
Julien Chaumond committed
420
        print("Both PPLM-BoW and PPLM-Discrim are on. This is not optimized.")
421
        loss_type = PPLM_BOW_DISCRIM
Julien Chaumond's avatar
Julien Chaumond committed
422

423
424
    elif bag_of_words:
        loss_type = PPLM_BOW
Julien Chaumond's avatar
Julien Chaumond committed
425
426
427
        print("Using PPLM-BoW")

    elif classifier is not None:
428
        loss_type = PPLM_DISCRIM
Julien Chaumond's avatar
Julien Chaumond committed
429
430
431
        print("Using PPLM-Discrim")

    else:
432
        raise Exception("Specify either a bag of words or a discriminator")
Julien Chaumond's avatar
Julien Chaumond committed
433

434
    unpert_gen_tok_text, _, _ = generate_text_pplm(
435
        model=model,
436
        tokenizer=tokenizer,
437
438
439
        context=context,
        device=device,
        length=length,
440
        sample=sample,
441
442
        perturb=False
    )
443
444
    if device == 'cuda':
        torch.cuda.empty_cache()
Julien Chaumond's avatar
Julien Chaumond committed
445

446
447
448
    pert_gen_tok_texts = []
    discrim_losses = []
    losses_in_time = []
Piero Molino's avatar
Piero Molino committed
449

450
    for i in range(num_samples):
451
        pert_gen_tok_text, discrim_loss, loss_in_time = generate_text_pplm(
452
            model=model,
453
            tokenizer=tokenizer,
454
455
456
457
458
            context=context,
            device=device,
            perturb=True,
            bow_indices=bow_indices,
            classifier=classifier,
459
            class_label=class_id,
460
461
462
463
464
            loss_type=loss_type,
            length=length,
            stepsize=stepsize,
            temperature=temperature,
            top_k=top_k,
465
466
467
            sample=sample,
            num_iterations=num_iterations,
            grad_length=grad_length,
468
            horizon_length=horizon_length,
469
            window_length=window_length,
470
471
            decay=decay,
            gamma=gamma,
472
473
            gm_scale=gm_scale,
            kl_scale=kl_scale,
474
        )
475
        pert_gen_tok_texts.append(pert_gen_tok_text)
Julien Chaumond's avatar
Julien Chaumond committed
476
        if classifier is not None:
477
478
            discrim_losses.append(discrim_loss.data.cpu().numpy())
        losses_in_time.append(loss_in_time)
Julien Chaumond's avatar
Julien Chaumond committed
479

480
481
    if device == 'cuda':
        torch.cuda.empty_cache()
Julien Chaumond's avatar
Julien Chaumond committed
482

483
    return unpert_gen_tok_text, pert_gen_tok_texts, discrim_losses, losses_in_time
Julien Chaumond's avatar
Julien Chaumond committed
484

485
486
487

def generate_text_pplm(
        model,
488
        tokenizer,
489
490
491
492
        context=None,
        past=None,
        device="cuda",
        perturb=True,
493
        bow_indices=None,
494
        classifier=None,
495
        class_label=None,
496
497
498
499
500
        loss_type=0,
        length=100,
        stepsize=0.02,
        temperature=1.0,
        top_k=10,
501
502
503
        sample=False,
        num_iterations=3,
        grad_length=10000,
504
        horizon_length=1,
505
        window_length=0,
506
507
        decay=False,
        gamma=1.5,
508
509
        gm_scale=0.9,
        kl_scale=0.01,
510
):
511
512
513
514
515
    output_so_far = (
        torch.tensor(context, device=device, dtype=torch.long).unsqueeze(0)
        if context
        else None
    )
Julien Chaumond's avatar
Julien Chaumond committed
516

517
    # collect one hot vectors for bags of words
518
519
    one_hot_bows_vectors = build_bows_one_hot_vectors(bow_indices, tokenizer,
                                                      device)
520

Julien Chaumond's avatar
Julien Chaumond committed
521
    grad_norms = None
522
    last = None
523
    unpert_discrim_loss = 0
Julien Chaumond's avatar
Julien Chaumond committed
524
    loss_in_time = []
525
    for i in trange(length, ascii=True):
Julien Chaumond's avatar
Julien Chaumond committed
526
527

        # Get past/probs for current output, except for last word
528
        # Note that GPT takes 2 inputs: past + current_token
Julien Chaumond's avatar
Julien Chaumond committed
529

530
531
532
        # run model forward to obtain unperturbed
        if past is None and output_so_far is not None:
            last = output_so_far[:, -1:]
533
534
            if output_so_far.shape[1] > 1:
                _, past, _ = model(output_so_far[:, :-1])
Piero Molino's avatar
Piero Molino committed
535

536
537
        unpert_logits, unpert_past, unpert_all_hidden = model(output_so_far)
        unpert_last_hidden = unpert_all_hidden[-1]
Piero Molino's avatar
Piero Molino committed
538

539
        # check if we are abowe grad max length
540
541
        if i >= grad_length:
            current_stepsize = stepsize * 0
Julien Chaumond's avatar
Julien Chaumond committed
542
        else:
543
            current_stepsize = stepsize
Julien Chaumond's avatar
Julien Chaumond committed
544

545
        # modify the past if necessary
546
        if not perturb or num_iterations == 0:
547
            pert_past = past
Julien Chaumond's avatar
Julien Chaumond committed
548
549

        else:
550
            accumulated_hidden = unpert_last_hidden[:, :-1, :]
Julien Chaumond's avatar
Julien Chaumond committed
551
552
            accumulated_hidden = torch.sum(accumulated_hidden, dim=1)

553
554
555
556
557
558
559
560
561
562
            if past is not None:
                pert_past, _, grad_norms, loss_this_iter = perturb_past(
                    past,
                    model,
                    last,
                    unpert_past=unpert_past,
                    unpert_logits=unpert_logits,
                    accumulated_hidden=accumulated_hidden,
                    grad_norms=grad_norms,
                    stepsize=current_stepsize,
563
                    one_hot_bows_vectors=one_hot_bows_vectors,
564
                    classifier=classifier,
565
                    class_label=class_label,
566
567
568
                    loss_type=loss_type,
                    num_iterations=num_iterations,
                    horizon_length=horizon_length,
569
                    window_length=window_length,
570
571
                    decay=decay,
                    gamma=gamma,
572
573
                    kl_scale=kl_scale,
                    device=device,
574
575
576
577
                )
                loss_in_time.append(loss_this_iter)
            else:
                pert_past = past
Piero Molino's avatar
Piero Molino committed
578

579
580
581
        pert_logits, past, pert_all_hidden = model(last, past=pert_past)
        pert_logits = pert_logits[:, -1, :] / temperature  # + SMALL_CONST
        pert_probs = F.softmax(pert_logits, dim=-1)
Julien Chaumond's avatar
Julien Chaumond committed
582
583

        if classifier is not None:
Piero Molino's avatar
Piero Molino committed
584
            ce_loss = torch.nn.CrossEntropyLoss()
585
            prediction = classifier(torch.mean(unpert_last_hidden, dim=1))
586
            label = torch.tensor([class_label], device=device,
Piero Molino's avatar
Piero Molino committed
587
                                 dtype=torch.long)
588
589
590
591
592
            unpert_discrim_loss = ce_loss(prediction, label)
            print(
                "unperturbed discrim loss",
                unpert_discrim_loss.data.cpu().numpy()
            )
Julien Chaumond's avatar
Julien Chaumond committed
593
        else:
594
            unpert_discrim_loss = 0
Piero Molino's avatar
Piero Molino committed
595
596

        # Fuse the modified model and original model
Julien Chaumond's avatar
Julien Chaumond committed
597
598
        if perturb:

599
            unpert_probs = F.softmax(unpert_logits[:, -1, :], dim=-1)
Piero Molino's avatar
Piero Molino committed
600

601
602
603
            pert_probs = ((pert_probs ** gm_scale) * (
                    unpert_probs ** (1 - gm_scale)))  # + SMALL_CONST
            pert_probs = top_k_filter(pert_probs, k=top_k,
604
                                      probs=True)  # + SMALL_CONST
Julien Chaumond's avatar
Julien Chaumond committed
605

606
607
608
            # rescale
            if torch.sum(pert_probs) <= 1:
                pert_probs = pert_probs / torch.sum(pert_probs)
Julien Chaumond's avatar
Julien Chaumond committed
609
610

        else:
611
612
            pert_logits = top_k_filter(pert_logits, k=top_k)  # + SMALL_CONST
            pert_probs = F.softmax(pert_logits, dim=-1)
Julien Chaumond's avatar
Julien Chaumond committed
613

614
        # sample or greedy
Julien Chaumond's avatar
Julien Chaumond committed
615
        if sample:
616
617
            last = torch.multinomial(pert_probs, num_samples=1)

Julien Chaumond's avatar
Julien Chaumond committed
618
        else:
619
620
621
622
623
624
625
626
            _, last = torch.topk(pert_probs, k=1, dim=-1)

        # update context/output_so_far appending the new token
        output_so_far = (
            last if output_so_far is None
            else torch.cat((output_so_far, last), dim=1)
        )

627
        print(tokenizer.decode(output_so_far.tolist()[0]))
628
629

    return output_so_far, unpert_discrim_loss, loss_in_time
Julien Chaumond's avatar
Julien Chaumond committed
630
631


632
633
634
635
636
637
638
639
640
641
642
643
644
645
def set_generic_model_params(discrim_weights, discrim_meta):
    if discrim_weights is None:
        raise ValueError('When using a generic discriminator, '
                         'discrim_weights need to be specified')
    if discrim_meta is None:
        raise ValueError('When using a generic discriminator, '
                         'discrim_meta need to be specified')

    with open(discrim_meta, 'r') as discrim_meta_file:
        meta = json.load(discrim_meta_file)
    meta['path'] = discrim_weights
    DISCRIMINATOR_MODELS_PARAMS['generic'] = meta


646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
def run_pplm_example(
        pretrained_model="gpt2-medium",
        cond_text="",
        uncond=False,
        num_samples=1,
        bag_of_words=None,
        discrim=None,
        discrim_weights=None,
        discrim_meta=None,
        class_label=-1,
        length=100,
        stepsize=0.02,
        temperature=1.0,
        top_k=10,
        sample=False,
        num_iterations=3,
        grad_length=10000,
        horizon_length=1,
        window_length=0,
        decay=False,
        gamma=1.5,
        gm_scale=0.9,
        kl_scale=0.01,
        seed=0,
        no_cuda=False,
        colorama=False
):
673
    # set Random seed
674
675
    torch.manual_seed(seed)
    np.random.seed(seed)
Julien Chaumond's avatar
Julien Chaumond committed
676

677
    # set the device
678
679
680
681
    device = "cuda" if torch.cuda.is_available() and not no_cuda else "cpu"

    if discrim == 'generic':
        set_generic_model_params(discrim_weights, discrim_meta)
Julien Chaumond's avatar
Julien Chaumond committed
682

683
684
685
686
    if discrim is not None:
        pretrained_model = DISCRIMINATOR_MODELS_PARAMS[discrim][
            "pretrained_model"
        ]
687
        print("discrim = {}, pretrained_model set "
688
              "to discriminator's = {}".format(discrim, pretrained_model))
689

690
    # load pretrained model
Julien Chaumond's avatar
Julien Chaumond committed
691
    model = GPT2LMHeadModel.from_pretrained(
692
        pretrained_model,
Julien Chaumond's avatar
Julien Chaumond committed
693
694
695
696
697
        output_hidden_states=True
    )
    model.to(device)
    model.eval()

698
699
700
    # load tokenizer
    tokenizer = GPT2Tokenizer.from_pretrained(pretrained_model)

Piero Molino's avatar
Piero Molino committed
701
    # Freeze GPT-2 weights
Julien Chaumond's avatar
Julien Chaumond committed
702
703
704
    for param in model.parameters():
        param.requires_grad = False

705
    # figure out conditioning text
706
707
708
    if uncond:
        tokenized_cond_text = tokenizer.encode(
            [tokenizer.bos_token]
709
        )
Julien Chaumond's avatar
Julien Chaumond committed
710
    else:
711
        raw_text = cond_text
Julien Chaumond's avatar
Julien Chaumond committed
712
        while not raw_text:
713
            print("Did you forget to add `--cond_text`? ")
Julien Chaumond's avatar
Julien Chaumond committed
714
            raw_text = input("Model prompt >>> ")
715
        tokenized_cond_text = tokenizer.encode(tokenizer.bos_token + raw_text)
Piero Molino's avatar
Piero Molino committed
716

717
    print("= Prefix of sentence =")
718
    print(tokenizer.decode(tokenized_cond_text))
719
    print()
Piero Molino's avatar
Piero Molino committed
720

721
    # generate unperturbed and perturbed texts
Piero Molino's avatar
Piero Molino committed
722

723
724
725
    # full_text_generation returns:
    # unpert_gen_tok_text, pert_gen_tok_texts, discrim_losses, losses_in_time
    unpert_gen_tok_text, pert_gen_tok_texts, _, _ = full_text_generation(
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
        model=model,
        tokenizer=tokenizer,
        context=tokenized_cond_text,
        device=device,
        num_samples=num_samples,
        bag_of_words=bag_of_words,
        discrim=discrim,
        class_label=class_label,
        length=length,
        stepsize=stepsize,
        temperature=temperature,
        top_k=top_k,
        sample=sample,
        num_iterations=num_iterations,
        grad_length=grad_length,
        horizon_length=horizon_length,
        window_length=window_length,
        decay=decay,
        gamma=gamma,
        gm_scale=gm_scale,
        kl_scale=kl_scale,
747
748
749
    )

    # untokenize unperturbed text
750
    unpert_gen_text = tokenizer.decode(unpert_gen_tok_text.tolist()[0])
Piero Molino's avatar
Piero Molino committed
751

752
753
754
755
    print("=" * 80)
    print("= Unperturbed generated text =")
    print(unpert_gen_text)
    print()
Piero Molino's avatar
Piero Molino committed
756

757
758
    generated_texts = []

759
    bow_word_ids = set()
760
761
762
    if bag_of_words and colorama:
        bow_indices = get_bag_of_words_indices(bag_of_words.split(";"),
                                               tokenizer)
763
764
765
766
767
        for single_bow_list in bow_indices:
            # filtering all words in the list composed of more than 1 token
            filtered = list(filter(lambda x: len(x) <= 1, single_bow_list))
            # w[0] because we are sure w has only 1 item because previous fitler
            bow_word_ids.update(w[0] for w in filtered)
768
769
770
771
772

    # iterate through the perturbed texts
    for i, pert_gen_tok_text in enumerate(pert_gen_tok_texts):
        try:
            # untokenize unperturbed text
773
            if colorama:
Piero Molino's avatar
Piero Molino committed
774
775
                import colorama

776
777
                pert_gen_text = ''
                for word_id in pert_gen_tok_text.tolist()[0]:
778
                    if word_id in bow_word_ids:
779
780
                        pert_gen_text += '{}{}{}'.format(
                            colorama.Fore.RED,
781
                            tokenizer.decode([word_id]),
782
783
                            colorama.Style.RESET_ALL
                        )
Piero Molino's avatar
Piero Molino committed
784
                    else:
785
                        pert_gen_text += tokenizer.decode([word_id])
Piero Molino's avatar
Piero Molino committed
786
            else:
787
                pert_gen_text = tokenizer.decode(pert_gen_tok_text.tolist()[0])
Julien Chaumond's avatar
Julien Chaumond committed
788

789
790
791
792
793
            print("= Perturbed generated text {} =".format(i + 1))
            print(pert_gen_text)
            print()
        except:
            pass
Julien Chaumond's avatar
Julien Chaumond committed
794

795
796
797
798
        # keep the prefix, perturbed seq, original seq for each index
        generated_texts.append(
            (tokenized_cond_text, pert_gen_tok_text, unpert_gen_tok_text)
        )
Julien Chaumond's avatar
Julien Chaumond committed
799

Piero Molino's avatar
Piero Molino committed
800
    return
Julien Chaumond's avatar
Julien Chaumond committed
801
802


Piero Molino's avatar
Piero Molino committed
803
if __name__ == '__main__':
804
805
806
807
808
809
810
811
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--pretrained_model",
        "-M",
        type=str,
        default="gpt2-medium",
        help="pretrained model name or path to local checkpoint",
    )
812
813
814
815
816
817
818
819
820
821
822
823
824
825
    parser.add_argument(
        "--cond_text", type=str, default="The lake",
        help="Prefix texts to condition on"
    )
    parser.add_argument(
        "--uncond", action="store_true",
        help="Generate from end-of-text as prefix"
    )
    parser.add_argument(
        "--num_samples",
        type=int,
        default=1,
        help="Number of samples to generate from the modified latents",
    )
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
    parser.add_argument(
        "--bag_of_words",
        "-B",
        type=str,
        default=None,
        help="Bags of words used for PPLM-BoW. "
             "Either a BOW id (see list in code) or a filepath. "
             "Multiple BoWs separated by ;",
    )
    parser.add_argument(
        "--discrim",
        "-D",
        type=str,
        default=None,
        choices=("clickbait", "sentiment", "toxicity", "generic"),
        help="Discriminator to use",
    )
    parser.add_argument('--discrim_weights', type=str, default=None,
                        help='Weights for the generic discriminator')
    parser.add_argument('--discrim_meta', type=str, default=None,
                        help='Meta information for the generic discriminator')
    parser.add_argument(
        "--class_label",
        type=int,
        default=-1,
        help="Class label used for the discriminator",
    )
    parser.add_argument("--length", type=int, default=100)
854
    parser.add_argument("--stepsize", type=float, default=0.02)
855
856
857
858
859
860
861
862
863
    parser.add_argument("--temperature", type=float, default=1.0)
    parser.add_argument("--top_k", type=int, default=10)
    parser.add_argument(
        "--sample", action="store_true",
        help="Generate from end-of-text as prefix"
    )
    parser.add_argument("--num_iterations", type=int, default=3)
    parser.add_argument("--grad_length", type=int, default=10000)
    parser.add_argument(
864
        "--window_length",
865
        type=int,
866
867
868
        default=0,
        help="Length of past which is being optimized; "
             "0 corresponds to infinite window length",
869
870
871
872
873
874
875
876
877
878
    )
    parser.add_argument(
        "--horizon_length",
        type=int,
        default=1,
        help="Length of future to optimize over",
    )
    parser.add_argument("--decay", action="store_true",
                        help="whether to decay or not")
    parser.add_argument("--gamma", type=float, default=1.5)
879
880
881
882
    parser.add_argument("--gm_scale", type=float, default=0.9)
    parser.add_argument("--kl_scale", type=float, default=0.01)
    parser.add_argument("--seed", type=int, default=0)
    parser.add_argument("--no_cuda", action="store_true", help="no cuda")
883
884
885
886
887
    parser.add_argument("--colorama", action="store_true",
                        help="colors keywords")

    args = parser.parse_args()
    run_pplm_example(**vars(args))