run_pplm.py 27.6 KB
Newer Older
Piero Molino's avatar
Piero Molino committed
1
#! /usr/bin/env python3
Julien Chaumond's avatar
Julien Chaumond committed
2
# coding=utf-8
Rosanne Liu's avatar
Rosanne Liu committed
3

4
# Copyright (c) 2019 Uber Technologies, Inc.
Julien Chaumond's avatar
Julien Chaumond committed
5
#
6
7
8
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
Julien Chaumond's avatar
Julien Chaumond committed
9
#
10
# http://www.apache.org/licenses/LICENSE-2.0
Julien Chaumond's avatar
Julien Chaumond committed
11
#
12
13
14
15
16
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Julien Chaumond's avatar
Julien Chaumond committed
17
18
19

"""
Example command with bag of words:
20
python run_pplm.py -B space --cond_text "The president" --length 100 --gamma 1.5 --num_iterations 3 --num_samples 10 --stepsize 0.01 --window_length 5 --kl_scale 0.01 --gm_scale 0.95
Julien Chaumond's avatar
Julien Chaumond committed
21
22

Example command with discriminator:
23
python run_pplm.py -D sentiment --class_label 3 --cond_text "The lake" --length 10 --gamma 1.0 --num_iterations 30 --num_samples 10 --stepsize 0.01 --kl_scale 0.01 --gm_scale 0.95
Julien Chaumond's avatar
Julien Chaumond committed
24
25
26
"""

import argparse
27
import json
Julien Chaumond's avatar
Julien Chaumond committed
28
29
30
31
32
33
34
35
from operator import add
from typing import List, Optional, Tuple, Union

import numpy as np
import torch
import torch.nn.functional as F
from tqdm import trange

Aymeric Augustin's avatar
Aymeric Augustin committed
36
from pplm_classification_head import ClassificationHead
Sylvain Gugger's avatar
Sylvain Gugger committed
37
from transformers import GPT2LMHeadModel, GPT2Tokenizer
Julien Chaumond's avatar
Julien Chaumond committed
38
from transformers.file_utils import cached_path
Aymeric Augustin's avatar
Aymeric Augustin committed
39

Julien Chaumond's avatar
Julien Chaumond committed
40
41
42
43
44

PPLM_BOW = 1
PPLM_DISCRIM = 2
PPLM_BOW_DISCRIM = 3
SMALL_CONST = 1e-15
45
BIG_CONST = 1e10
Julien Chaumond's avatar
Julien Chaumond committed
46
47

BAG_OF_WORDS_ARCHIVE_MAP = {
48
49
50
51
52
53
54
    "legal": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/legal.txt",
    "military": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/military.txt",
    "politics": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/politics.txt",
    "religion": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/religion.txt",
    "science": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/science.txt",
    "space": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/space.txt",
    "technology": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/bow/technology.txt",
Julien Chaumond's avatar
Julien Chaumond committed
55
56
57
58
}

DISCRIMINATOR_MODELS_PARAMS = {
    "clickbait": {
Julien Chaumond's avatar
Julien Chaumond committed
59
        "url": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/discriminators/clickbait_classifier_head.pt",
Julien Chaumond's avatar
Julien Chaumond committed
60
61
62
63
        "class_size": 2,
        "embed_size": 1024,
        "class_vocab": {"non_clickbait": 0, "clickbait": 1},
        "default_class": 1,
64
        "pretrained_model": "gpt2-medium",
Julien Chaumond's avatar
Julien Chaumond committed
65
66
    },
    "sentiment": {
Julien Chaumond's avatar
Julien Chaumond committed
67
        "url": "https://s3.amazonaws.com/models.huggingface.co/bert/pplm/discriminators/SST_classifier_head.pt",
Julien Chaumond's avatar
Julien Chaumond committed
68
69
70
71
        "class_size": 5,
        "embed_size": 1024,
        "class_vocab": {"very_positive": 2, "very_negative": 3},
        "default_class": 3,
72
        "pretrained_model": "gpt2-medium",
Julien Chaumond's avatar
Julien Chaumond committed
73
74
75
76
    },
}


Piero Molino's avatar
Piero Molino committed
77
78
79
80
81
82
83
84
85
86
87
88
def top_k_filter(logits, k, probs=False):
    """
    Masks everything but the k top entries as -infinity (1e10).
    Used to mask logits such that e^-infinity -> 0 won't contribute to the
    sum of the denominator.
    """
    if k == 0:
        return logits
    else:
        values = torch.topk(logits, k)[0]
        batch_mins = values[:, -1].view(-1, 1).expand_as(logits)
        if probs:
89
90
            return torch.where(logits < batch_mins, torch.ones_like(logits) * 0.0, logits)
        return torch.where(logits < batch_mins, torch.ones_like(logits) * -BIG_CONST, logits)
Piero Molino's avatar
Piero Molino committed
91
92


93
def perturb_past(
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
    past,
    model,
    last,
    unpert_past=None,
    unpert_logits=None,
    accumulated_hidden=None,
    grad_norms=None,
    stepsize=0.01,
    one_hot_bows_vectors=None,
    classifier=None,
    class_label=None,
    loss_type=0,
    num_iterations=3,
    horizon_length=1,
    window_length=0,
    decay=False,
    gamma=1.5,
    kl_scale=0.01,
    device="cuda",
113
):
Piero Molino's avatar
Piero Molino committed
114
    # Generate inital perturbed past
115
    grad_accumulator = [(np.zeros(p.shape).astype("float32")) for p in past]
Julien Chaumond's avatar
Julien Chaumond committed
116
117
118
119

    if accumulated_hidden is None:
        accumulated_hidden = 0

120
    if decay:
121
        decay_mask = torch.arange(0.0, 1.0 + SMALL_CONST, 1.0 / (window_length))[1:]
Julien Chaumond's avatar
Julien Chaumond committed
122
123
124
    else:
        decay_mask = 1.0

125
    # TODO fix this comment (SUMANTH)
Piero Molino's avatar
Piero Molino committed
126
    # Generate a mask is gradient perturbated is based on a past window
127
    _, _, _, curr_length, _ = past[0].shape
Piero Molino's avatar
Piero Molino committed
128

129
    if curr_length > window_length and window_length > 0:
130
        ones_key_val_shape = tuple(past[0].shape[:-2]) + tuple([window_length]) + tuple(past[0].shape[-1:])
Piero Molino's avatar
Piero Molino committed
131

132
        zeros_key_val_shape = (
133
            tuple(past[0].shape[:-2]) + tuple([curr_length - window_length]) + tuple(past[0].shape[-1:])
134
        )
Julien Chaumond's avatar
Julien Chaumond committed
135
136
137
138
139

        ones_mask = torch.ones(ones_key_val_shape)
        ones_mask = decay_mask * ones_mask.permute(0, 1, 2, 4, 3)
        ones_mask = ones_mask.permute(0, 1, 2, 4, 3)

140
        window_mask = torch.cat((ones_mask, torch.zeros(zeros_key_val_shape)), dim=-2).to(device)
Julien Chaumond's avatar
Julien Chaumond committed
141
    else:
142
        window_mask = torch.ones_like(past[0]).to(device)
Julien Chaumond's avatar
Julien Chaumond committed
143

144
    # accumulate perturbations for num_iterations
Julien Chaumond's avatar
Julien Chaumond committed
145
    loss_per_iter = []
146
    new_accumulated_hidden = None
147
    for i in range(num_iterations):
Julien Chaumond's avatar
Julien Chaumond committed
148
        print("Iteration ", i + 1)
149
        curr_perturbation = [torch.from_numpy(p_).requires_grad_(True).to(device=device) for p_ in grad_accumulator]
songyouwei's avatar
songyouwei committed
150
151
152
        # make sure p_.grad is not None
        for p_ in curr_perturbation:
            p_.retain_grad()
153
154
155
156
157

        # Compute hidden using perturbed past
        perturbed_past = list(map(add, past, curr_perturbation))
        _, _, _, curr_length, _ = curr_perturbation[0].shape
        all_logits, _, all_hidden = model(last, past=perturbed_past)
Piero Molino's avatar
Piero Molino committed
158
        hidden = all_hidden[-1]
159
        new_accumulated_hidden = accumulated_hidden + torch.sum(hidden, dim=1).detach()
160
161
162
        # TODO: Check the layer-norm consistency of this with trained discriminator (Sumanth)
        logits = all_logits[:, -1, :]
        probs = F.softmax(logits, dim=-1)
Piero Molino's avatar
Piero Molino committed
163
164
165

        loss = 0.0
        loss_list = []
166
167
168
169
170
171
        if loss_type == PPLM_BOW or loss_type == PPLM_BOW_DISCRIM:
            for one_hot_bow in one_hot_bows_vectors:
                bow_logits = torch.mm(probs, torch.t(one_hot_bow))
                bow_loss = -torch.log(torch.sum(bow_logits))
                loss += bow_loss
                loss_list.append(bow_loss)
Piero Molino's avatar
Piero Molino committed
172
173
            print(" pplm_bow_loss:", loss.data.cpu().numpy())

174
        if loss_type == 2 or loss_type == 3:
Julien Chaumond's avatar
Julien Chaumond committed
175
            ce_loss = torch.nn.CrossEntropyLoss()
176
177
178
179
180
181
            # TODO why we need to do this assignment and not just using unpert_past? (Sumanth)
            curr_unpert_past = unpert_past
            curr_probs = torch.unsqueeze(probs, dim=1)
            wte = model.resize_token_embeddings()
            for _ in range(horizon_length):
                inputs_embeds = torch.matmul(curr_probs, wte.weight.data)
182
                _, curr_unpert_past, curr_all_hidden = model(past=curr_unpert_past, inputs_embeds=inputs_embeds)
183
                curr_hidden = curr_all_hidden[-1]
184
                new_accumulated_hidden = new_accumulated_hidden + torch.sum(curr_hidden, dim=1)
Julien Chaumond's avatar
Julien Chaumond committed
185

186
            prediction = classifier(new_accumulated_hidden / (curr_length + 1 + horizon_length))
Julien Chaumond's avatar
Julien Chaumond committed
187

188
            label = torch.tensor(prediction.shape[0] * [class_label], device=device, dtype=torch.long)
189
            discrim_loss = ce_loss(prediction, label)
Julien Chaumond's avatar
Julien Chaumond committed
190
            print(" pplm_discrim_loss:", discrim_loss.data.cpu().numpy())
Piero Molino's avatar
Piero Molino committed
191
192
            loss += discrim_loss
            loss_list.append(discrim_loss)
Julien Chaumond's avatar
Julien Chaumond committed
193

Piero Molino's avatar
Piero Molino committed
194
195
        kl_loss = 0.0
        if kl_scale > 0.0:
196
            unpert_probs = F.softmax(unpert_logits[:, -1, :], dim=-1)
197
198
            unpert_probs = unpert_probs + SMALL_CONST * (unpert_probs <= SMALL_CONST).float().to(device).detach()
            correction = SMALL_CONST * (probs <= SMALL_CONST).float().to(device).detach()
199
            corrected_probs = probs + correction.detach()
200
201
            kl_loss = kl_scale * ((corrected_probs * (corrected_probs / unpert_probs).log()).sum())
            print(" kl_loss", kl_loss.data.cpu().numpy())
202
            loss += kl_loss
Julien Chaumond's avatar
Julien Chaumond committed
203
204

        loss_per_iter.append(loss.data.cpu().numpy())
205
        print(" pplm_loss", (loss - kl_loss).data.cpu().numpy())
Julien Chaumond's avatar
Julien Chaumond committed
206

207
        # compute gradients
Rosanne Liu's avatar
Rosanne Liu committed
208
        loss.backward()
209
210
211

        # calculate gradient norms
        if grad_norms is not None and loss_type == PPLM_BOW:
Julien Chaumond's avatar
Julien Chaumond committed
212
213
            grad_norms = [
                torch.max(grad_norms[index], torch.norm(p_.grad * window_mask))
214
215
                for index, p_ in enumerate(curr_perturbation)
            ]
Julien Chaumond's avatar
Julien Chaumond committed
216
        else:
217
            grad_norms = [
218
                (torch.norm(p_.grad * window_mask) + SMALL_CONST) for index, p_ in enumerate(curr_perturbation)
219
            ]
Julien Chaumond's avatar
Julien Chaumond committed
220

221
        # normalize gradients
Julien Chaumond's avatar
Julien Chaumond committed
222
        grad = [
223
            -stepsize * (p_.grad * window_mask / grad_norms[index] ** gamma).data.cpu().numpy()
224
225
            for index, p_ in enumerate(curr_perturbation)
        ]
Julien Chaumond's avatar
Julien Chaumond committed
226

227
228
229
230
231
        # accumulate gradient
        grad_accumulator = list(map(add, grad, grad_accumulator))

        # reset gradients, just to make sure
        for p_ in curr_perturbation:
Julien Chaumond's avatar
Julien Chaumond committed
232
233
            p_.grad.data.zero_()

234
        # removing past from the graph
Julien Chaumond's avatar
Julien Chaumond committed
235
        new_past = []
236
237
        for p_ in past:
            new_past.append(p_.detach())
Julien Chaumond's avatar
Julien Chaumond committed
238
239
        past = new_past

240
    # apply the accumulated perturbations to the past
241
    grad_accumulator = [torch.from_numpy(p_).requires_grad_(True).to(device=device) for p_ in grad_accumulator]
242
    pert_past = list(map(add, past, grad_accumulator))
Julien Chaumond's avatar
Julien Chaumond committed
243

244
    return pert_past, new_accumulated_hidden, grad_norms, loss_per_iter
Julien Chaumond's avatar
Julien Chaumond committed
245
246
247


def get_classifier(
248
    name: Optional[str], class_label: Union[str, int], device: str
Julien Chaumond's avatar
Julien Chaumond committed
249
250
251
252
253
) -> Tuple[Optional[ClassificationHead], Optional[int]]:
    if name is None:
        return None, None

    params = DISCRIMINATOR_MODELS_PARAMS[name]
254
    classifier = ClassificationHead(class_size=params["class_size"], embed_size=params["embed_size"]).to(device)
255
256
    if "url" in params:
        resolved_archive_file = cached_path(params["url"])
257
    elif "path" in params:
258
        resolved_archive_file = params["path"]
259
    else:
260
        raise ValueError("Either url or path have to be specified in the discriminator model parameters")
261
    classifier.load_state_dict(torch.load(resolved_archive_file, map_location=device))
Julien Chaumond's avatar
Julien Chaumond committed
262
263
    classifier.eval()

264
265
266
    if isinstance(class_label, str):
        if class_label in params["class_vocab"]:
            label_id = params["class_vocab"][class_label]
Julien Chaumond's avatar
Julien Chaumond committed
267
268
        else:
            label_id = params["default_class"]
269
            print("class_label {} not in class_vocab".format(class_label))
Julien Chaumond's avatar
Julien Chaumond committed
270
271
272
            print("available values are: {}".format(params["class_vocab"]))
            print("using default class {}".format(label_id))

273
274
275
    elif isinstance(class_label, int):
        if class_label in set(params["class_vocab"].values()):
            label_id = class_label
Julien Chaumond's avatar
Julien Chaumond committed
276
277
        else:
            label_id = params["default_class"]
278
            print("class_label {} not in class_vocab".format(class_label))
Julien Chaumond's avatar
Julien Chaumond committed
279
280
281
282
283
284
285
286
287
            print("available values are: {}".format(params["class_vocab"]))
            print("using default class {}".format(label_id))

    else:
        label_id = params["default_class"]

    return classifier, label_id


288
def get_bag_of_words_indices(bag_of_words_ids_or_paths: List[str], tokenizer) -> List[List[List[int]]]:
Julien Chaumond's avatar
Julien Chaumond committed
289
290
291
292
293
294
295
    bow_indices = []
    for id_or_path in bag_of_words_ids_or_paths:
        if id_or_path in BAG_OF_WORDS_ARCHIVE_MAP:
            filepath = cached_path(BAG_OF_WORDS_ARCHIVE_MAP[id_or_path])
        else:
            filepath = id_or_path
        with open(filepath, "r") as f:
Piero Molino's avatar
Piero Molino committed
296
            words = f.read().strip().split("\n")
297
        bow_indices.append([tokenizer.encode(word.strip(), add_prefix_space=True) for word in words])
Julien Chaumond's avatar
Julien Chaumond committed
298
299
300
    return bow_indices


301
def build_bows_one_hot_vectors(bow_indices, tokenizer, device="cuda"):
Julien Chaumond's avatar
Julien Chaumond committed
302
303
304
305
306
307
    if bow_indices is None:
        return None

    one_hot_bows_vectors = []
    for single_bow in bow_indices:
        single_bow = list(filter(lambda x: len(x) <= 1, single_bow))
308
        single_bow = torch.tensor(single_bow).to(device)
Julien Chaumond's avatar
Julien Chaumond committed
309
        num_words = single_bow.shape[0]
310
        one_hot_bow = torch.zeros(num_words, tokenizer.vocab_size).to(device)
Julien Chaumond's avatar
Julien Chaumond committed
311
312
313
314
315
        one_hot_bow.scatter_(1, single_bow, 1)
        one_hot_bows_vectors.append(one_hot_bow)
    return one_hot_bows_vectors


316
def full_text_generation(
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
    model,
    tokenizer,
    context=None,
    num_samples=1,
    device="cuda",
    bag_of_words=None,
    discrim=None,
    class_label=None,
    length=100,
    stepsize=0.02,
    temperature=1.0,
    top_k=10,
    sample=False,
    num_iterations=3,
    grad_length=10000,
    horizon_length=1,
    window_length=0,
    decay=False,
    gamma=1.5,
    gm_scale=0.9,
    kl_scale=0.01,
338
    repetition_penalty=1.0,
339
    **kwargs
340
):
341
    classifier, class_id = get_classifier(discrim, class_label, device)
Julien Chaumond's avatar
Julien Chaumond committed
342

343
344
    bow_indices = []
    if bag_of_words:
345
        bow_indices = get_bag_of_words_indices(bag_of_words.split(";"), tokenizer)
Piero Molino's avatar
Piero Molino committed
346

347
    if bag_of_words and classifier:
Julien Chaumond's avatar
Julien Chaumond committed
348
        print("Both PPLM-BoW and PPLM-Discrim are on. This is not optimized.")
349
        loss_type = PPLM_BOW_DISCRIM
Julien Chaumond's avatar
Julien Chaumond committed
350

351
352
    elif bag_of_words:
        loss_type = PPLM_BOW
Julien Chaumond's avatar
Julien Chaumond committed
353
354
355
        print("Using PPLM-BoW")

    elif classifier is not None:
356
        loss_type = PPLM_DISCRIM
Julien Chaumond's avatar
Julien Chaumond committed
357
358
359
        print("Using PPLM-Discrim")

    else:
360
        raise Exception("Specify either a bag of words or a discriminator")
Julien Chaumond's avatar
Julien Chaumond committed
361

362
    unpert_gen_tok_text, _, _ = generate_text_pplm(
363
364
365
366
367
368
369
370
        model=model,
        tokenizer=tokenizer,
        context=context,
        device=device,
        length=length,
        sample=sample,
        perturb=False,
        repetition_penalty=repetition_penalty,
371
    )
372
    if device == "cuda":
373
        torch.cuda.empty_cache()
Julien Chaumond's avatar
Julien Chaumond committed
374

375
376
377
    pert_gen_tok_texts = []
    discrim_losses = []
    losses_in_time = []
Piero Molino's avatar
Piero Molino committed
378

379
    for i in range(num_samples):
380
        pert_gen_tok_text, discrim_loss, loss_in_time = generate_text_pplm(
381
            model=model,
382
            tokenizer=tokenizer,
383
384
385
386
387
            context=context,
            device=device,
            perturb=True,
            bow_indices=bow_indices,
            classifier=classifier,
388
            class_label=class_id,
389
390
391
392
393
            loss_type=loss_type,
            length=length,
            stepsize=stepsize,
            temperature=temperature,
            top_k=top_k,
394
395
396
            sample=sample,
            num_iterations=num_iterations,
            grad_length=grad_length,
397
            horizon_length=horizon_length,
398
            window_length=window_length,
399
400
            decay=decay,
            gamma=gamma,
401
402
            gm_scale=gm_scale,
            kl_scale=kl_scale,
403
            repetition_penalty=repetition_penalty,
404
        )
405
        pert_gen_tok_texts.append(pert_gen_tok_text)
Julien Chaumond's avatar
Julien Chaumond committed
406
        if classifier is not None:
407
408
            discrim_losses.append(discrim_loss.data.cpu().numpy())
        losses_in_time.append(loss_in_time)
Julien Chaumond's avatar
Julien Chaumond committed
409

410
    if device == "cuda":
411
        torch.cuda.empty_cache()
Julien Chaumond's avatar
Julien Chaumond committed
412

413
    return unpert_gen_tok_text, pert_gen_tok_texts, discrim_losses, losses_in_time
Julien Chaumond's avatar
Julien Chaumond committed
414

415
416

def generate_text_pplm(
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
    model,
    tokenizer,
    context=None,
    past=None,
    device="cuda",
    perturb=True,
    bow_indices=None,
    classifier=None,
    class_label=None,
    loss_type=0,
    length=100,
    stepsize=0.02,
    temperature=1.0,
    top_k=10,
    sample=False,
    num_iterations=3,
    grad_length=10000,
    horizon_length=1,
    window_length=0,
    decay=False,
    gamma=1.5,
    gm_scale=0.9,
    kl_scale=0.01,
440
    repetition_penalty=1.0,
441
):
442
443
444
445
446
447
    output_so_far = None
    if context:
        context_t = torch.tensor(context, device=device, dtype=torch.long)
        while len(context_t.shape) < 2:
            context_t = context_t.unsqueeze(0)
        output_so_far = context_t
Julien Chaumond's avatar
Julien Chaumond committed
448

449
    # collect one hot vectors for bags of words
450
    one_hot_bows_vectors = build_bows_one_hot_vectors(bow_indices, tokenizer, device)
451

Julien Chaumond's avatar
Julien Chaumond committed
452
    grad_norms = None
453
    last = None
454
    unpert_discrim_loss = 0
Julien Chaumond's avatar
Julien Chaumond committed
455
    loss_in_time = []
456
    for i in trange(length, ascii=True):
Julien Chaumond's avatar
Julien Chaumond committed
457
458

        # Get past/probs for current output, except for last word
459
        # Note that GPT takes 2 inputs: past + current_token
Julien Chaumond's avatar
Julien Chaumond committed
460

461
462
463
        # run model forward to obtain unperturbed
        if past is None and output_so_far is not None:
            last = output_so_far[:, -1:]
464
465
            if output_so_far.shape[1] > 1:
                _, past, _ = model(output_so_far[:, :-1])
Piero Molino's avatar
Piero Molino committed
466

467
468
        unpert_logits, unpert_past, unpert_all_hidden = model(output_so_far)
        unpert_last_hidden = unpert_all_hidden[-1]
Piero Molino's avatar
Piero Molino committed
469

470
        # check if we are abowe grad max length
471
472
        if i >= grad_length:
            current_stepsize = stepsize * 0
Julien Chaumond's avatar
Julien Chaumond committed
473
        else:
474
            current_stepsize = stepsize
Julien Chaumond's avatar
Julien Chaumond committed
475

476
        # modify the past if necessary
477
        if not perturb or num_iterations == 0:
478
            pert_past = past
Julien Chaumond's avatar
Julien Chaumond committed
479
480

        else:
481
            accumulated_hidden = unpert_last_hidden[:, :-1, :]
Julien Chaumond's avatar
Julien Chaumond committed
482
483
            accumulated_hidden = torch.sum(accumulated_hidden, dim=1)

484
485
486
487
488
489
490
491
492
493
            if past is not None:
                pert_past, _, grad_norms, loss_this_iter = perturb_past(
                    past,
                    model,
                    last,
                    unpert_past=unpert_past,
                    unpert_logits=unpert_logits,
                    accumulated_hidden=accumulated_hidden,
                    grad_norms=grad_norms,
                    stepsize=current_stepsize,
494
                    one_hot_bows_vectors=one_hot_bows_vectors,
495
                    classifier=classifier,
496
                    class_label=class_label,
497
498
499
                    loss_type=loss_type,
                    num_iterations=num_iterations,
                    horizon_length=horizon_length,
500
                    window_length=window_length,
501
502
                    decay=decay,
                    gamma=gamma,
503
504
                    kl_scale=kl_scale,
                    device=device,
505
506
507
508
                )
                loss_in_time.append(loss_this_iter)
            else:
                pert_past = past
Piero Molino's avatar
Piero Molino committed
509

510
511
        pert_logits, past, pert_all_hidden = model(last, past=pert_past)
        pert_logits = pert_logits[:, -1, :] / temperature  # + SMALL_CONST
512
513
514
515
516
517
518

        for token_idx in set(output_so_far[0].tolist()):
            if pert_logits[0, token_idx] < 0:
                pert_logits[0, token_idx] *= repetition_penalty
            else:
                pert_logits[0, token_idx] /= repetition_penalty

519
        pert_probs = F.softmax(pert_logits, dim=-1)
Julien Chaumond's avatar
Julien Chaumond committed
520
521

        if classifier is not None:
Piero Molino's avatar
Piero Molino committed
522
            ce_loss = torch.nn.CrossEntropyLoss()
523
            prediction = classifier(torch.mean(unpert_last_hidden, dim=1))
524
            label = torch.tensor([class_label], device=device, dtype=torch.long)
525
            unpert_discrim_loss = ce_loss(prediction, label)
526
            print("unperturbed discrim loss", unpert_discrim_loss.data.cpu().numpy())
Julien Chaumond's avatar
Julien Chaumond committed
527
        else:
528
            unpert_discrim_loss = 0
Piero Molino's avatar
Piero Molino committed
529
530

        # Fuse the modified model and original model
Julien Chaumond's avatar
Julien Chaumond committed
531
532
        if perturb:

533
            unpert_probs = F.softmax(unpert_logits[:, -1, :], dim=-1)
Piero Molino's avatar
Piero Molino committed
534

535
536
            pert_probs = (pert_probs ** gm_scale) * (unpert_probs ** (1 - gm_scale))  # + SMALL_CONST
            pert_probs = top_k_filter(pert_probs, k=top_k, probs=True)  # + SMALL_CONST
Julien Chaumond's avatar
Julien Chaumond committed
537

538
539
540
            # rescale
            if torch.sum(pert_probs) <= 1:
                pert_probs = pert_probs / torch.sum(pert_probs)
Julien Chaumond's avatar
Julien Chaumond committed
541
542

        else:
543
544
            pert_logits = top_k_filter(pert_logits, k=top_k)  # + SMALL_CONST
            pert_probs = F.softmax(pert_logits, dim=-1)
Julien Chaumond's avatar
Julien Chaumond committed
545

546
        # sample or greedy
Julien Chaumond's avatar
Julien Chaumond committed
547
        if sample:
548
549
            last = torch.multinomial(pert_probs, num_samples=1)

Julien Chaumond's avatar
Julien Chaumond committed
550
        else:
551
552
553
            _, last = torch.topk(pert_probs, k=1, dim=-1)

        # update context/output_so_far appending the new token
554
        output_so_far = last if output_so_far is None else torch.cat((output_so_far, last), dim=1)
555

556
        print(tokenizer.decode(output_so_far.tolist()[0]))
557
558

    return output_so_far, unpert_discrim_loss, loss_in_time
Julien Chaumond's avatar
Julien Chaumond committed
559
560


561
562
def set_generic_model_params(discrim_weights, discrim_meta):
    if discrim_weights is None:
563
        raise ValueError("When using a generic discriminator, discrim_weights need to be specified")
564
    if discrim_meta is None:
565
        raise ValueError("When using a generic discriminator, discrim_meta need to be specified")
566

567
    with open(discrim_meta, "r") as discrim_meta_file:
568
        meta = json.load(discrim_meta_file)
569
570
    meta["path"] = discrim_weights
    DISCRIMINATOR_MODELS_PARAMS["generic"] = meta
571
572


573
def run_pplm_example(
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
    pretrained_model="gpt2-medium",
    cond_text="",
    uncond=False,
    num_samples=1,
    bag_of_words=None,
    discrim=None,
    discrim_weights=None,
    discrim_meta=None,
    class_label=-1,
    length=100,
    stepsize=0.02,
    temperature=1.0,
    top_k=10,
    sample=False,
    num_iterations=3,
    grad_length=10000,
    horizon_length=1,
    window_length=0,
    decay=False,
    gamma=1.5,
    gm_scale=0.9,
    kl_scale=0.01,
    seed=0,
    no_cuda=False,
    colorama=False,
599
    repetition_penalty=1.0,
600
):
601
    # set Random seed
602
603
    torch.manual_seed(seed)
    np.random.seed(seed)
Julien Chaumond's avatar
Julien Chaumond committed
604

605
    # set the device
606
607
    device = "cuda" if torch.cuda.is_available() and not no_cuda else "cpu"

608
    if discrim == "generic":
609
        set_generic_model_params(discrim_weights, discrim_meta)
Julien Chaumond's avatar
Julien Chaumond committed
610

611
    if discrim is not None:
612
        pretrained_model = DISCRIMINATOR_MODELS_PARAMS[discrim]["pretrained_model"]
613
        print("discrim = {}, pretrained_model set to discriminator's = {}".format(discrim, pretrained_model))
614

615
    # load pretrained model
616
    model = GPT2LMHeadModel.from_pretrained(pretrained_model, output_hidden_states=True)
Julien Chaumond's avatar
Julien Chaumond committed
617
618
619
    model.to(device)
    model.eval()

620
621
622
    # load tokenizer
    tokenizer = GPT2Tokenizer.from_pretrained(pretrained_model)

Piero Molino's avatar
Piero Molino committed
623
    # Freeze GPT-2 weights
Julien Chaumond's avatar
Julien Chaumond committed
624
625
626
    for param in model.parameters():
        param.requires_grad = False

627
    # figure out conditioning text
628
    if uncond:
629
        tokenized_cond_text = tokenizer.encode([tokenizer.bos_token])
Julien Chaumond's avatar
Julien Chaumond committed
630
    else:
631
        raw_text = cond_text
Julien Chaumond's avatar
Julien Chaumond committed
632
        while not raw_text:
633
            print("Did you forget to add `--cond_text`? ")
Julien Chaumond's avatar
Julien Chaumond committed
634
            raw_text = input("Model prompt >>> ")
635
        tokenized_cond_text = tokenizer.encode(tokenizer.bos_token + raw_text)
Piero Molino's avatar
Piero Molino committed
636

637
    print("= Prefix of sentence =")
638
    print(tokenizer.decode(tokenized_cond_text))
639
    print()
Piero Molino's avatar
Piero Molino committed
640

641
    # generate unperturbed and perturbed texts
Piero Molino's avatar
Piero Molino committed
642

643
644
645
    # full_text_generation returns:
    # unpert_gen_tok_text, pert_gen_tok_texts, discrim_losses, losses_in_time
    unpert_gen_tok_text, pert_gen_tok_texts, _, _ = full_text_generation(
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
        model=model,
        tokenizer=tokenizer,
        context=tokenized_cond_text,
        device=device,
        num_samples=num_samples,
        bag_of_words=bag_of_words,
        discrim=discrim,
        class_label=class_label,
        length=length,
        stepsize=stepsize,
        temperature=temperature,
        top_k=top_k,
        sample=sample,
        num_iterations=num_iterations,
        grad_length=grad_length,
        horizon_length=horizon_length,
        window_length=window_length,
        decay=decay,
        gamma=gamma,
        gm_scale=gm_scale,
        kl_scale=kl_scale,
667
        repetition_penalty=repetition_penalty,
668
669
670
    )

    # untokenize unperturbed text
671
    unpert_gen_text = tokenizer.decode(unpert_gen_tok_text.tolist()[0])
Piero Molino's avatar
Piero Molino committed
672

673
674
675
676
    print("=" * 80)
    print("= Unperturbed generated text =")
    print(unpert_gen_text)
    print()
Piero Molino's avatar
Piero Molino committed
677

678
679
    generated_texts = []

680
    bow_word_ids = set()
681
    if bag_of_words and colorama:
682
        bow_indices = get_bag_of_words_indices(bag_of_words.split(";"), tokenizer)
683
684
685
686
687
        for single_bow_list in bow_indices:
            # filtering all words in the list composed of more than 1 token
            filtered = list(filter(lambda x: len(x) <= 1, single_bow_list))
            # w[0] because we are sure w has only 1 item because previous fitler
            bow_word_ids.update(w[0] for w in filtered)
688
689
690
691
692

    # iterate through the perturbed texts
    for i, pert_gen_tok_text in enumerate(pert_gen_tok_texts):
        try:
            # untokenize unperturbed text
693
            if colorama:
Piero Molino's avatar
Piero Molino committed
694
695
                import colorama

696
                pert_gen_text = ""
697
                for word_id in pert_gen_tok_text.tolist()[0]:
698
                    if word_id in bow_word_ids:
699
                        pert_gen_text += "{}{}{}".format(
Lysandre's avatar
Lysandre committed
700
701
702
                            colorama.Fore.RED,
                            tokenizer.decode([word_id]),
                            colorama.Style.RESET_ALL,
703
                        )
Piero Molino's avatar
Piero Molino committed
704
                    else:
705
                        pert_gen_text += tokenizer.decode([word_id])
Piero Molino's avatar
Piero Molino committed
706
            else:
707
                pert_gen_text = tokenizer.decode(pert_gen_tok_text.tolist()[0])
Julien Chaumond's avatar
Julien Chaumond committed
708

709
710
711
            print("= Perturbed generated text {} =".format(i + 1))
            print(pert_gen_text)
            print()
712
713
        except Exception as exc:
            print("Ignoring error while generating perturbed text:", exc)
Julien Chaumond's avatar
Julien Chaumond committed
714

715
        # keep the prefix, perturbed seq, original seq for each index
716
        generated_texts.append((tokenized_cond_text, pert_gen_tok_text, unpert_gen_tok_text))
Julien Chaumond's avatar
Julien Chaumond committed
717

Piero Molino's avatar
Piero Molino committed
718
    return
Julien Chaumond's avatar
Julien Chaumond committed
719
720


721
if __name__ == "__main__":
722
723
724
725
726
727
728
729
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--pretrained_model",
        "-M",
        type=str,
        default="gpt2-medium",
        help="pretrained model name or path to local checkpoint",
    )
730
731
    parser.add_argument("--cond_text", type=str, default="The lake", help="Prefix texts to condition on")
    parser.add_argument("--uncond", action="store_true", help="Generate from end-of-text as prefix")
732
    parser.add_argument(
Lysandre's avatar
Lysandre committed
733
734
735
736
        "--num_samples",
        type=int,
        default=1,
        help="Number of samples to generate from the modified latents",
737
    )
738
739
740
741
742
    parser.add_argument(
        "--bag_of_words",
        "-B",
        type=str,
        default=None,
743
744
745
746
747
        help=(
            "Bags of words used for PPLM-BoW. "
            "Either a BOW id (see list in code) or a filepath. "
            "Multiple BoWs separated by ;"
        ),
748
749
750
751
752
753
754
755
756
757
    )
    parser.add_argument(
        "--discrim",
        "-D",
        type=str,
        default=None,
        choices=("clickbait", "sentiment", "toxicity", "generic"),
        help="Discriminator to use",
    )
    parser.add_argument(
Lysandre's avatar
Lysandre committed
758
759
760
761
        "--discrim_weights",
        type=str,
        default=None,
        help="Weights for the generic discriminator",
762
763
    )
    parser.add_argument(
Lysandre's avatar
Lysandre committed
764
765
766
767
        "--discrim_meta",
        type=str,
        default=None,
        help="Meta information for the generic discriminator",
768
769
    )
    parser.add_argument(
Lysandre's avatar
Lysandre committed
770
771
772
773
        "--class_label",
        type=int,
        default=-1,
        help="Class label used for the discriminator",
774
775
    )
    parser.add_argument("--length", type=int, default=100)
776
    parser.add_argument("--stepsize", type=float, default=0.02)
777
778
    parser.add_argument("--temperature", type=float, default=1.0)
    parser.add_argument("--top_k", type=int, default=10)
779
    parser.add_argument("--sample", action="store_true", help="Generate from end-of-text as prefix")
780
781
782
    parser.add_argument("--num_iterations", type=int, default=3)
    parser.add_argument("--grad_length", type=int, default=10000)
    parser.add_argument(
783
        "--window_length",
784
        type=int,
785
        default=0,
786
        help="Length of past which is being optimized; 0 corresponds to infinite window length",
787
788
    )
    parser.add_argument(
Lysandre's avatar
Lysandre committed
789
790
791
792
        "--horizon_length",
        type=int,
        default=1,
        help="Length of future to optimize over",
793
    )
794
    parser.add_argument("--decay", action="store_true", help="whether to decay or not")
795
    parser.add_argument("--gamma", type=float, default=1.5)
796
797
798
799
    parser.add_argument("--gm_scale", type=float, default=0.9)
    parser.add_argument("--kl_scale", type=float, default=0.01)
    parser.add_argument("--seed", type=int, default=0)
    parser.add_argument("--no_cuda", action="store_true", help="no cuda")
800
    parser.add_argument("--colorama", action="store_true", help="colors keywords")
801
    parser.add_argument(
Lysandre's avatar
Lysandre committed
802
803
804
805
        "--repetition_penalty",
        type=float,
        default=1.0,
        help="Penalize repetition. More than 1.0 -> less repetition",
806
    )
807
808
809

    args = parser.parse_args()
    run_pplm_example(**vars(args))