test_bloom.py 12.9 KB
Newer Older
1
2
3
4
import pytest
import torch

from copy import copy
5
from transformers import AutoTokenizer
6

7
8
from text_generation_server.pb import generate_pb2
from text_generation_server.models.causal_lm import CausalLMBatch
9
10
from text_generation_server.utils import weight_hub_files, download_weights
from text_generation_server.models.bloom import BloomCausalLMBatch, BLOOMSharded
11
12
13
from text_generation_server.models.custom_modeling.bloom_modeling import (
    BloomForCausalLM,
)
14
15


16
17
@pytest.fixture(scope="session")
def default_bloom():
18
19
20
21
    model_id = "bigscience/bloom-560m"
    revision = "main"
    filenames = weight_hub_files(model_id, revision, ".safetensors")
    download_weights(filenames, model_id, revision)
22
23
24
25
    return BLOOMSharded(
        model_id,
        model_class=BloomForCausalLM,
    )
26
27
28
29
30
31
32


@pytest.fixture(scope="session")
def bloom_560m_tokenizer():
    return AutoTokenizer.from_pretrained("bigscience/bloom-560m", padding_side="left")


33
@pytest.fixture
34
def default_pb_request(default_pb_parameters, default_pb_stop_parameters):
35
36
37
    return generate_pb2.Request(
        id=0,
        inputs="Test",
Daniël de Kok's avatar
Daniël de Kok committed
38
        input_chunks=generate_pb2.Input(chunks=[generate_pb2.InputChunk(text="Test")]),
39
        prefill_logprobs=True,
40
        truncate=100,
41
        parameters=default_pb_parameters,
42
        stopping_parameters=default_pb_stop_parameters,
43
44
45
46
47
48
49
50
51
52
53
    )


@pytest.fixture
def default_pb_batch(default_pb_request):
    return generate_pb2.Batch(id=0, requests=[default_pb_request], size=1)


@pytest.fixture
def default_bloom_batch(default_pb_batch, bloom_560m_tokenizer):
    return BloomCausalLMBatch.from_pb(
54
        default_pb_batch, bloom_560m_tokenizer, torch.float32, torch.device("cpu")
55
56
57
58
59
60
    )


@pytest.fixture
def default_multi_requests_bloom_batch(default_pb_request, bloom_560m_tokenizer):
    req_0 = copy(default_pb_request)
61
    req_0.id = 1
62
    req_1 = default_pb_request
63
    req_1.id = 2
64
    req_1.stopping_parameters.max_new_tokens = 5
65
66
67

    batch_pb = generate_pb2.Batch(id=0, requests=[req_0, req_1], size=2)
    return BloomCausalLMBatch.from_pb(
68
        batch_pb, bloom_560m_tokenizer, torch.float32, torch.device("cpu")
69
70
71
72
73
74
75
76
77
78
79
80
81
    )


def test_batch_from_pb(default_pb_batch, default_bloom_batch):
    batch = default_bloom_batch

    assert batch.batch_id == default_pb_batch.id
    assert batch.requests == default_pb_batch.requests

    assert len(batch.input_ids) == default_pb_batch.size
    assert batch.input_ids[0][-1] == 10264
    assert torch.all(batch.input_ids[0][:-1] == 3)

82
83
    assert batch.attention_mask[0][0] == 1
    assert torch.all(batch.attention_mask[0][1:] == 0)
84
85
86

    assert batch.past_key_values is None

87
88
89
90
91
92
    assert all(
        [
            torch.equal(input_ids, all_input_ids[:, 0])
            for input_ids, all_input_ids in zip(batch.input_ids, batch.all_input_ids)
        ]
    )
93
94
95

    assert batch.input_lengths == [1]

96
97
    assert len(batch) == default_pb_batch.size
    assert len(batch.next_token_choosers) == len(batch.stopping_criterias) == len(batch)
98

99
    assert batch.max_input_length == batch.input_lengths[0]
100
101
102
103
104
105
106
107
108
109
110
111


def test_batch_concatenate_no_prefill(default_bloom_batch):
    with pytest.raises(ValueError):
        BloomCausalLMBatch.concatenate([default_bloom_batch, default_bloom_batch])


def test_causal_lm_batch_type(default_bloom):
    assert default_bloom.batch_type == BloomCausalLMBatch


def test_causal_lm_generate_token(default_bloom, default_bloom_batch):
112
    sequence_length = len(default_bloom_batch.all_input_ids[0])
113
    generations, next_batch, _ = default_bloom.generate_token(default_bloom_batch)
114

115
    assert len(generations) == len(default_bloom_batch)
116
117
118
    assert isinstance(next_batch, CausalLMBatch)
    assert not next_batch.keys_head_dim_last

119
    assert len(next_batch.all_input_ids) == len(next_batch)
120
121
    assert len(next_batch.all_input_ids[0]) == sequence_length + 1
    assert len(next_batch.attention_mask[0]) == 11
122
123
124
    assert torch.all(next_batch.all_input_ids[0][-2:] == 10264)
    assert torch.all(next_batch.all_input_ids[0][:-2] == 3)

125
126
    assert torch.all(next_batch.attention_mask[0][:2] == 1)
    assert torch.all(next_batch.attention_mask[0][2:] == 0)
127

128
    assert next_batch.input_ids.shape == (len(next_batch), 1)
129
130
131
    assert next_batch.input_ids[0, 0] == 10264

    assert next_batch.input_lengths == [2]
132
    assert next_batch.max_input_length == next_batch.input_lengths[0]
133
134

    assert next_batch.past_key_values is not None
135
136
137
138
139
140
    assert all(
        [p[0].shape == (16, 64, sequence_length) for p in next_batch.past_key_values]
    )
    assert all(
        [p[1].shape == (16, sequence_length, 64) for p in next_batch.past_key_values]
    )
141
142
    assert all([generation.generated_text is None for generation in generations])
    assert all([len(generation.prefill_tokens) == 1 for generation in generations])
OlivierDehaene's avatar
OlivierDehaene committed
143
144
145
146
147
148
149
150
151
152
153
154
155
156
    assert all(
        [
            token_id.item() == 10264
            for generation in generations
            for token_id in generation.tokens.token_ids
        ]
    )
    assert all(
        [
            token_text == "Test"
            for generation in generations
            for token_text in generation.tokens.texts
        ]
    )
157
    assert generations[0].request_id == 0
158
159
160
161
162


def test_causal_lm_generate_token_completion(default_bloom, default_bloom_batch):
    next_batch = default_bloom_batch
    for _ in range(default_bloom_batch.stopping_criterias[0].max_new_tokens - 1):
163
        generations, next_batch, _ = default_bloom.generate_token(next_batch)
164
        assert len(generations) == len(default_bloom_batch)
165

166
    generations, next_batch, _ = default_bloom.generate_token(next_batch)
167
168
    assert next_batch is None

169
    assert len(generations) == 1
OlivierDehaene's avatar
OlivierDehaene committed
170
    assert (
171
        generations[0].generated_text.text == "TestTestTestTestTestTestTestTestTestTest"
OlivierDehaene's avatar
OlivierDehaene committed
172
    )
173
    assert generations[0].request_id == default_bloom_batch.requests[0].id
174
    assert (
175
        generations[0].generated_text.generated_tokens
176
177
178
179
180
181
182
183
184
185
186
187
        == default_bloom_batch.stopping_criterias[0].max_new_tokens
    )


def test_causal_lm_generate_token_completion_multi(
    default_bloom, default_multi_requests_bloom_batch
):
    next_batch = default_multi_requests_bloom_batch

    for i in range(
        default_multi_requests_bloom_batch.stopping_criterias[1].max_new_tokens - 1
    ):
188
        generations, next_batch, _ = default_bloom.generate_token(next_batch)
189
        assert len(generations) == len(default_multi_requests_bloom_batch)
190

191
    generations, next_batch, _ = default_bloom.generate_token(next_batch)
192
193
    assert next_batch is not None

194
    assert len(generations) == 2
195
    assert generations[1].generated_text.text == "TestTestTestTestTest"
196
    assert (
197
198
199
200
        generations[1].request_id == default_multi_requests_bloom_batch.requests[1].id
    )
    assert (
        generations[1].generated_text.generated_tokens
201
202
        == default_multi_requests_bloom_batch.stopping_criterias[1].max_new_tokens
    )
203
204
    # Copy stopping_criterias before filtering
    stopping_criterias = default_multi_requests_bloom_batch.stopping_criterias.copy()
205

206
    next_batch = next_batch.filter([next_batch.requests[0].id])
207

208
    for _ in range(
209
        stopping_criterias[0].max_new_tokens - stopping_criterias[1].max_new_tokens - 1
210
    ):
211
        generations, next_batch, _ = default_bloom.generate_token(next_batch)
212
        assert len(generations) == len(next_batch)
213

214
    generations, next_batch, _ = default_bloom.generate_token(next_batch)
215
216
    assert next_batch is None

217
218
    assert len(generations) == 1
    assert (
219
        generations[0].generated_text.text == "TestTestTestTestTestTestTestTestTestTest"
220
    )
OlivierDehaene's avatar
OlivierDehaene committed
221
    assert (
222
        generations[0].request_id == default_multi_requests_bloom_batch.requests[0].id
OlivierDehaene's avatar
OlivierDehaene committed
223
    )
224
    assert (
225
        generations[0].generated_text.generated_tokens
226
227
228
229
230
231
232
233
        == default_multi_requests_bloom_batch.stopping_criterias[0].max_new_tokens
    )


def test_batch_concatenate(
    default_bloom, default_bloom_batch, default_multi_requests_bloom_batch
):
    next_batch_0 = default_bloom_batch
234
235
    _, next_batch_0, _ = default_bloom.generate_token(next_batch_0)
    _, next_batch_0, _ = default_bloom.generate_token(next_batch_0)
236
237

    next_batch_1 = default_multi_requests_bloom_batch
238
    _, next_batch_1, _ = default_bloom.generate_token(next_batch_1)
239

240
241
242
243
244
245
246
247
248
    # Clone past_key_values before concatenating to compare after,
    # because they are removed from the concatenated batches
    next_batch_0_past_key_values = [
        (k.clone(), v.clone()) for (k, v) in next_batch_0.past_key_values
    ]
    next_batch_1_past_key_values = [
        (k.clone(), v.clone()) for (k, v) in next_batch_1.past_key_values
    ]

249
250
251
252
253
254
    next_batch = BloomCausalLMBatch.concatenate([next_batch_0, next_batch_1])

    assert torch.equal(next_batch.all_input_ids[0], next_batch_0.all_input_ids[0])
    assert torch.equal(next_batch.all_input_ids[1], next_batch_1.all_input_ids[0])
    assert torch.equal(next_batch.all_input_ids[2], next_batch_1.all_input_ids[1])

255
256
257
258
259
260
261
    assert torch.all(
        next_batch.attention_mask[0, : -next_batch.padding_right_offset] == 1
    )
    assert torch.all(
        next_batch.attention_mask[1:, 1 : -next_batch.padding_right_offset] == 1
    )
    assert torch.all(next_batch.attention_mask[1:, 3:] == 0)
262
263
264
265
266

    assert next_batch.batch_id == 0
    assert torch.all(next_batch.input_ids == 10264)

    assert next_batch.input_lengths == [3, 2, 2]
267
    assert next_batch.max_input_length == 3
268
269

    assert next_batch.requests[0] == next_batch_0.requests[0]
Nicolas Patry's avatar
Nicolas Patry committed
270
    assert next_batch.requests[1:] == list(next_batch_1.requests)
271
272
273
274
275
276
277
278
279
280
281
282

    assert next_batch.next_token_choosers[0] == next_batch_0.next_token_choosers[0]
    assert next_batch.next_token_choosers[1:] == next_batch_1.next_token_choosers

    assert next_batch.stopping_criterias[0] == next_batch_0.stopping_criterias[0]
    assert next_batch.stopping_criterias[1:] == next_batch_1.stopping_criterias

    assert next_batch.past_key_values is not None
    assert all([p[0].shape == (3, 16, 64, 2) for p in next_batch.past_key_values])
    assert all([p[1].shape == (3, 16, 2, 64) for p in next_batch.past_key_values])

    for i, past in enumerate(next_batch.past_key_values):
283
        assert torch.equal(next_batch_0_past_key_values[i][0][:, :, -2:], past[0][0])
284
        assert torch.equal(
285
            next_batch_1_past_key_values[i][0][:, :, -1:],
286
287
288
            past[0][1:, :, :, -1].reshape(-1, 64, 1),
        )

289
        assert torch.equal(next_batch_0_past_key_values[i][1][:, -2:, :], past[1][0])
290
        assert torch.equal(
291
            next_batch_1_past_key_values[i][1][:, -1:, :],
292
293
294
295
296
297
            past[1][1:, :, -1, :].reshape(-1, 1, 64),
        )

    for _ in range(
        default_multi_requests_bloom_batch.stopping_criterias[1].max_new_tokens - 2
    ):
298
        generations, next_batch, _ = default_bloom.generate_token(next_batch)
299
        assert len(generations) == len(next_batch)
300

301
    generations, next_batch, _ = default_bloom.generate_token(next_batch)
302
303
    assert next_batch is not None

304
    assert len(generations) == 3
305
    assert generations[2].generated_text.text == "TestTestTestTestTest"
306
    assert (
307
308
309
310
        generations[2].request_id == default_multi_requests_bloom_batch.requests[1].id
    )
    assert (
        generations[2].generated_text.generated_tokens
311
312
313
        == default_multi_requests_bloom_batch.stopping_criterias[1].max_new_tokens
    )

314
315
316
    next_batch = next_batch.filter(
        [next_batch.requests[0].id, next_batch.requests[1].id]
    )
317

318
319
320
321
322
    for _ in range(
        default_bloom_batch.stopping_criterias[0].max_new_tokens
        - default_multi_requests_bloom_batch.stopping_criterias[1].max_new_tokens
        - 2
    ):
323
        generations, next_batch, _ = default_bloom.generate_token(next_batch)
324
        assert len(generations) == len(next_batch)
325

326
    generations, next_batch, _ = default_bloom.generate_token(next_batch)
327
328
    assert next_batch is not None

329
    assert len(generations) == 2
OlivierDehaene's avatar
OlivierDehaene committed
330
    assert (
331
        generations[0].generated_text.text == "TestTestTestTestTestTestTestTestTestTest"
OlivierDehaene's avatar
OlivierDehaene committed
332
    )
333
    assert generations[0].request_id == default_bloom_batch.requests[0].id
334
    assert (
335
        generations[0].generated_text.generated_tokens
336
337
338
        == default_bloom_batch.stopping_criterias[0].max_new_tokens
    )

339
    next_batch = next_batch.filter([next_batch.requests[1].id])
340

341
342
343
344
345
346
    for _ in range(
        default_multi_requests_bloom_batch.stopping_criterias[0].max_new_tokens
        - default_bloom_batch.stopping_criterias[0].max_new_tokens
        - default_multi_requests_bloom_batch.stopping_criterias[1].max_new_tokens
        - 4
    ):
347
        generations, next_batch, _ = default_bloom.generate_token(next_batch)
348
        assert len(generations) == len(next_batch)
349

350
    generations, next_batch, _ = default_bloom.generate_token(next_batch)
351
352
    assert next_batch is None

353
354
    assert len(generations) == 1
    assert (
355
        generations[0].generated_text.text == "TestTestTestTestTestTestTestTestTestTest"
356
    )
OlivierDehaene's avatar
OlivierDehaene committed
357
    assert (
358
        generations[0].request_id == default_multi_requests_bloom_batch.requests[0].id
OlivierDehaene's avatar
OlivierDehaene committed
359
    )
360
    assert (
361
        generations[0].generated_text.generated_tokens
362
363
        == default_multi_requests_bloom_batch.stopping_criterias[0].max_new_tokens
    )