test_causal_lm.py 12.5 KB
Newer Older
1
2
3
4
import pytest
import torch

from copy import copy
5
from transformers import AutoTokenizer
6

7
8
from text_generation_server.pb import generate_pb2
from text_generation_server.models.causal_lm import CausalLM, CausalLMBatch
9
10


11
12
@pytest.fixture(scope="session")
def default_causal_lm():
13
    return CausalLM.fallback("gpt2")
14
15
16
17
18
19
20
21
22


@pytest.fixture(scope="session")
def gpt2_tokenizer():
    tokenizer = AutoTokenizer.from_pretrained("gpt2", padding_side="left")
    tokenizer.pad_token_id = 50256
    return tokenizer


23
@pytest.fixture
24
def default_pb_request(default_pb_parameters, default_pb_stop_parameters):
25
26
27
    return generate_pb2.Request(
        id=0,
        inputs="Test",
Daniël de Kok's avatar
Daniël de Kok committed
28
        input_chunks=generate_pb2.Input(chunks=[generate_pb2.InputChunk(text="Test")]),
29
        prefill_logprobs=True,
30
        truncate=100,
31
        parameters=default_pb_parameters,
32
        stopping_parameters=default_pb_stop_parameters,
33
34
35
36
37
38
39
40
41
42
    )


@pytest.fixture
def default_pb_batch(default_pb_request):
    return generate_pb2.Batch(id=0, requests=[default_pb_request], size=1)


@pytest.fixture
def default_causal_lm_batch(default_pb_batch, gpt2_tokenizer):
43
44
45
    return CausalLMBatch.from_pb(
        default_pb_batch, gpt2_tokenizer, torch.float32, torch.device("cpu")
    )
46
47
48
49
50


@pytest.fixture
def default_multi_requests_causal_lm_batch(default_pb_request, gpt2_tokenizer):
    req_0 = copy(default_pb_request)
51
    req_0.id = 1
52
    req_1 = default_pb_request
53
    req_1.id = 2
54
    req_1.stopping_parameters.max_new_tokens = 5
55

56
    batch_pb = generate_pb2.Batch(id=1, requests=[req_0, req_1], size=2)
57
58
59
    return CausalLMBatch.from_pb(
        batch_pb, gpt2_tokenizer, torch.float32, torch.device("cpu")
    )
60
61
62
63
64
65
66
67
68
69
70
71


def test_batch_from_pb(default_pb_batch, default_causal_lm_batch):
    batch = default_causal_lm_batch

    assert batch.batch_id == default_pb_batch.id
    assert batch.requests == default_pb_batch.requests

    assert len(batch.input_ids) == default_pb_batch.size
    assert batch.input_ids[0][-1] == 14402
    assert torch.all(batch.input_ids[0][:-1] == 50256)

72
73
    assert batch.attention_mask[0, 0] == 1
    assert torch.all(batch.attention_mask[0, 1:] == 0)
74
75
76

    assert batch.past_key_values is None

77
78
79
80
81
82
    assert all(
        [
            torch.equal(input_ids, all_input_ids[:, 0])
            for input_ids, all_input_ids in zip(batch.input_ids, batch.all_input_ids)
        ]
    )
83
84
85

    assert batch.input_lengths == [1]

86
87
    assert len(batch) == default_pb_batch.size
    assert len(batch.next_token_choosers) == len(batch.stopping_criterias) == len(batch)
88

89
    assert batch.max_input_length == batch.input_lengths[0]
90
91
92
93
94
95
96
97
98
99
100
101


def test_batch_concatenate_no_prefill(default_causal_lm_batch):
    with pytest.raises(ValueError):
        CausalLMBatch.concatenate([default_causal_lm_batch, default_causal_lm_batch])


def test_causal_lm_batch_type(default_causal_lm):
    assert default_causal_lm.batch_type == CausalLMBatch


def test_causal_lm_generate_token(default_causal_lm, default_causal_lm_batch):
102
    sequence_length = len(default_causal_lm_batch.all_input_ids[0])
103
104
105
    generations, next_batch, _ = default_causal_lm.generate_token(
        default_causal_lm_batch
    )
106

107
    assert len(generations) == len(next_batch)
108
109
    assert isinstance(next_batch, CausalLMBatch)

110
    assert len(next_batch.all_input_ids) == len(next_batch)
111
112
    assert len(next_batch.all_input_ids[0]) == sequence_length + 1
    assert len(next_batch.attention_mask[0]) == 11
113
    assert next_batch.all_input_ids[0][-1] == 13
114
115
116
    assert next_batch.all_input_ids[0][-2] == 14402
    assert torch.all(next_batch.all_input_ids[0][:-2] == 50256)

117
118
    assert torch.all(next_batch.attention_mask[0][0:2] == 1)
    assert torch.all(next_batch.attention_mask[0][2:] == 0)
119

120
    assert next_batch.input_ids.shape == (len(next_batch), 1)
121
    assert next_batch.input_ids[0, 0] == 13
122
123

    assert next_batch.input_lengths == [2]
124
    assert next_batch.max_input_length == next_batch.input_lengths[0]
125
126

    assert next_batch.past_key_values is not None
127
128
129
130
131
132
    assert all(
        [p[0].shape == (1, 12, sequence_length, 64) for p in next_batch.past_key_values]
    )
    assert all(
        [p[1].shape == (1, 12, sequence_length, 64) for p in next_batch.past_key_values]
    )
133
134
    assert all([generation.generated_text is None for generation in generations])
    assert all([len(generation.prefill_tokens) == 1 for generation in generations])
OlivierDehaene's avatar
OlivierDehaene committed
135
136
137
138
139
140
141
142
143
144
145
146
147
148
    assert all(
        [
            token_id.item() == 13
            for generation in generations
            for token_id in generation.tokens.token_ids
        ]
    )
    assert all(
        [
            token_text == "."
            for generation in generations
            for token_text in generation.tokens.texts
        ]
    )
149
    assert generations[0].request_id == 0
150
151
152
153
154
155
156


def test_causal_lm_generate_token_completion(
    default_causal_lm, default_causal_lm_batch
):
    next_batch = default_causal_lm_batch
    for _ in range(default_causal_lm_batch.stopping_criterias[0].max_new_tokens - 1):
157
        generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
158
        assert len(generations) == len(next_batch)
159

160
    generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
161
162
    assert next_batch is None

163
    assert len(generations) == 1
164
    assert generations[0].generated_text.text == ".java:784) at net.minecraft."
165
    assert generations[0].request_id == default_causal_lm_batch.requests[0].id
166
    assert (
167
        generations[0].generated_text.generated_tokens
168
169
170
171
172
173
174
175
176
177
178
179
        == default_causal_lm_batch.stopping_criterias[0].max_new_tokens
    )


def test_causal_lm_generate_token_completion_multi(
    default_causal_lm, default_multi_requests_causal_lm_batch
):
    next_batch = default_multi_requests_causal_lm_batch

    for i in range(
        default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens - 1
    ):
180
        generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
181
        assert len(generations) == len(next_batch)
182

183
    generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
184
185
    assert next_batch is not None

186
    assert len(generations) == 2
187
    assert generations[1].generated_text.text == ".java:784)"
188
    assert (
189
190
        generations[1].request_id
        == default_multi_requests_causal_lm_batch.requests[1].id
191
192
    )
    assert (
193
        generations[1].generated_text.generated_tokens
194
195
        == default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens
    )
196
    # Copy stopping_criterias before filtering
197
198
199
    stopping_criterias = (
        default_multi_requests_causal_lm_batch.stopping_criterias.copy()
    )
200

201
    next_batch = next_batch.filter([next_batch.requests[0].id])
202

203
    for _ in range(
204
        stopping_criterias[0].max_new_tokens - stopping_criterias[1].max_new_tokens - 1
205
    ):
206
        generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
207
        assert len(generations) == len(next_batch)
208

209
    generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
210
211
    assert next_batch is None

212
    assert len(generations) == 1
213
    assert generations[0].generated_text.text == ".java:784) at net.minecraft."
214
    assert (
215
216
        generations[0].request_id
        == default_multi_requests_causal_lm_batch.requests[0].id
217
218
    )
    assert (
219
        generations[0].generated_text.generated_tokens
220
221
222
223
224
225
226
227
        == default_multi_requests_causal_lm_batch.stopping_criterias[0].max_new_tokens
    )


def test_batch_concatenate(
    default_causal_lm, default_causal_lm_batch, default_multi_requests_causal_lm_batch
):
    next_batch_0 = default_causal_lm_batch
228
229
    _, next_batch_0, _ = default_causal_lm.generate_token(next_batch_0)
    _, next_batch_0, _ = default_causal_lm.generate_token(next_batch_0)
230
231

    next_batch_1 = default_multi_requests_causal_lm_batch
232
    _, next_batch_1, _ = default_causal_lm.generate_token(next_batch_1)
233

234
235
236
237
238
239
240
241
242
    # Clone past_key_values before concatenating to compare after,
    # because they are removed from the concatenated batches
    next_batch_0_past_key_values = [
        (k.clone(), v.clone()) for (k, v) in next_batch_0.past_key_values
    ]
    next_batch_1_past_key_values = [
        (k.clone(), v.clone()) for (k, v) in next_batch_1.past_key_values
    ]

243
244
245
246
247
248
    next_batch = CausalLMBatch.concatenate([next_batch_0, next_batch_1])

    assert torch.equal(next_batch.all_input_ids[0], next_batch_0.all_input_ids[0])
    assert torch.equal(next_batch.all_input_ids[1], next_batch_1.all_input_ids[0])
    assert torch.equal(next_batch.all_input_ids[2], next_batch_1.all_input_ids[1])

249
250
251
252
253
254
255
    assert torch.all(
        next_batch.attention_mask[0, : -next_batch.padding_right_offset] == 1
    )
    assert torch.all(
        next_batch.attention_mask[1:, 1 : -next_batch.padding_right_offset] == 1
    )
    assert torch.all(next_batch.attention_mask[1:, 3:] == 0)
256
257

    assert next_batch.batch_id == 0
258
259
    assert next_batch.input_ids[0, 0] == 12355
    assert torch.all(next_batch.input_ids[1:] == 13)
260
261

    assert next_batch.input_lengths == [3, 2, 2]
262
    assert next_batch.max_input_length == 3
263
264

    assert next_batch.requests[0] == next_batch_0.requests[0]
Nicolas Patry's avatar
Nicolas Patry committed
265
    assert next_batch.requests[1:] == list(next_batch_1.requests)
266
267
268
269
270
271
272
273
274
275
276
277

    assert next_batch.next_token_choosers[0] == next_batch_0.next_token_choosers[0]
    assert next_batch.next_token_choosers[1:] == next_batch_1.next_token_choosers

    assert next_batch.stopping_criterias[0] == next_batch_0.stopping_criterias[0]
    assert next_batch.stopping_criterias[1:] == next_batch_1.stopping_criterias

    assert next_batch.past_key_values is not None
    assert all([p[0].shape == (3, 12, 2, 64) for p in next_batch.past_key_values])
    assert all([p[1].shape == (3, 12, 2, 64) for p in next_batch.past_key_values])

    for i, past in enumerate(next_batch.past_key_values):
278
        assert torch.equal(next_batch_0_past_key_values[i][0][0, :, -2:], past[0][0])
279
        assert torch.equal(
280
            next_batch_1_past_key_values[i][0][:, :, -1:], past[0][1:, :, -1:, :]
281
282
        )

283
        assert torch.equal(next_batch_0_past_key_values[i][1][0, :, -2:], past[1][0])
284
        assert torch.equal(
285
            next_batch_1_past_key_values[i][1][:, :, -1:], past[1][1:, :, -1:, :]
286
287
288
289
290
        )

    for _ in range(
        default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens - 2
    ):
291
        generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
292
        assert len(generations) == len(next_batch)
293

294
    generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
295
296
    assert next_batch is not None

297
    assert len(generations) == 3
298
    assert generations[2].generated_text.text == ".java:784)"
299
    assert (
300
301
        generations[2].request_id
        == default_multi_requests_causal_lm_batch.requests[1].id
302
303
    )
    assert (
304
        generations[2].generated_text.generated_tokens
305
306
307
        == default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens
    )

308
309
310
    next_batch = next_batch.filter(
        [next_batch.requests[0].id, next_batch.requests[1].id]
    )
311

312
313
314
315
316
    for _ in range(
        default_causal_lm_batch.stopping_criterias[0].max_new_tokens
        - default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens
        - 2
    ):
317
        generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
318
        assert len(generations) == len(next_batch)
319

320
    generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
321
322
    assert next_batch is not None

323
    assert len(generations) == 2
324
    assert generations[0].generated_text.text == ".java:784) at net.minecraft."
325
    assert generations[0].request_id == default_causal_lm_batch.requests[0].id
326
    assert (
327
        generations[0].generated_text.generated_tokens
328
329
330
        == default_causal_lm_batch.stopping_criterias[0].max_new_tokens
    )

331
    next_batch = next_batch.filter([next_batch.requests[1].id])
332

333
334
335
336
337
338
    for _ in range(
        default_multi_requests_causal_lm_batch.stopping_criterias[0].max_new_tokens
        - default_causal_lm_batch.stopping_criterias[0].max_new_tokens
        - default_multi_requests_causal_lm_batch.stopping_criterias[1].max_new_tokens
        - 4
    ):
339
        generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
340
        assert len(generations) == len(next_batch)
341

342
    generations, next_batch, _ = default_causal_lm.generate_token(next_batch)
343
344
    assert next_batch is None

345
    assert len(generations) == 1
346
    assert generations[0].generated_text.text == ".java:784) at net.minecraft."
347
    assert (
348
349
        generations[0].request_id
        == default_multi_requests_causal_lm_batch.requests[0].id
350
351
    )
    assert (
352
        generations[0].generated_text.generated_tokens
353
354
        == default_multi_requests_causal_lm_batch.stopping_criterias[0].max_new_tokens
    )