test_seq2seq_lm.py 12.8 KB
Newer Older
1
2
3
4
5
import pytest
import torch

from copy import copy

6
7
from transformers import AutoTokenizer

8
9
from text_generation_server.pb import generate_pb2
from text_generation_server.models.seq2seq_lm import Seq2SeqLM, Seq2SeqLMBatch
10
11


12
13
14
15
16
17
18
19
20
21
22
23
24
25
@pytest.fixture(scope="session")
def mt0_small_tokenizer():
    tokenizer = AutoTokenizer.from_pretrained(
        "bigscience/mt0-small", padding_side="left"
    )
    tokenizer.bos_token_id = 0
    return tokenizer


@pytest.fixture(scope="session")
def default_seq2seq_lm():
    return Seq2SeqLM("bigscience/mt0-small")


26
@pytest.fixture
27
def default_pb_request(default_pb_parameters, default_pb_stop_parameters):
28
29
30
    return generate_pb2.Request(
        id=0,
        inputs="Test",
Daniël de Kok's avatar
Daniël de Kok committed
31
        input_chunks=generate_pb2.Input(chunks=[generate_pb2.InputChunk(text="Test")]),
32
        prefill_logprobs=True,
33
        truncate=100,
34
        parameters=default_pb_parameters,
35
        stopping_parameters=default_pb_stop_parameters,
36
37
38
39
40
41
42
43
44
45
46
    )


@pytest.fixture
def default_pb_batch(default_pb_request):
    return generate_pb2.Batch(id=0, requests=[default_pb_request], size=1)


@pytest.fixture
def default_seq2seq_lm_batch(default_pb_batch, mt0_small_tokenizer):
    return Seq2SeqLMBatch.from_pb(
47
        default_pb_batch, mt0_small_tokenizer, torch.float32, torch.device("cpu")
48
49
50
51
52
53
    )


@pytest.fixture
def default_multi_requests_seq2seq_lm_batch(default_pb_request, mt0_small_tokenizer):
    req_0 = copy(default_pb_request)
54
    req_0.id = 1
55
    req_1 = default_pb_request
56
    req_1.id = 2
57
    req_1.stopping_parameters.max_new_tokens = 5
58
59

    batch_pb = generate_pb2.Batch(id=0, requests=[req_0, req_1], size=2)
60
61
62
    return Seq2SeqLMBatch.from_pb(
        batch_pb, mt0_small_tokenizer, torch.float32, torch.device("cpu")
    )
63
64
65
66


def test_batch_from_pb(default_pb_batch, default_seq2seq_lm_batch):
    batch = default_seq2seq_lm_batch
67
    sequence_length = len(default_seq2seq_lm_batch.input_ids[0])
68
69
70
71

    assert batch.batch_id == default_pb_batch.id
    assert batch.requests == default_pb_batch.requests

72
    assert batch.input_ids.shape == (default_pb_batch.size, sequence_length)
73
74
75
76
77
78
79
    assert batch.input_ids[0][-2] == 4268
    assert batch.input_ids[0][-1] == 1
    assert torch.all(batch.input_ids[0][:-2] == 0)

    assert torch.all(batch.attention_mask[0][-2:] == 1)
    assert torch.all(batch.attention_mask[0][:-2] == 0)

80
    assert len(batch.decoder_input_ids) == default_pb_batch.size
81
82
83
84
85
86
87
88
    assert batch.decoder_attention_mask is None
    assert batch.encoder_last_hidden_state is None

    assert batch.past_key_values is None

    assert batch.input_lengths == [2]
    assert batch.decoder_input_lengths == [1]

89
90
    assert len(batch) == default_pb_batch.size
    assert len(batch.next_token_choosers) == len(batch.stopping_criterias) == len(batch)
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105

    assert batch.max_input_length == batch.input_lengths[0]
    assert batch.max_decoder_input_length == batch.decoder_input_lengths[0]


def test_batch_concatenate_no_prefill(default_seq2seq_lm_batch):
    with pytest.raises(ValueError):
        Seq2SeqLMBatch.concatenate([default_seq2seq_lm_batch, default_seq2seq_lm_batch])


def test_seq2seq_lm_batch_type(default_seq2seq_lm):
    assert default_seq2seq_lm.batch_type == Seq2SeqLMBatch


def test_seq2seq_lm_generate_token(default_seq2seq_lm, default_seq2seq_lm_batch):
106
    sequence_length = len(default_seq2seq_lm_batch.input_ids[0])
107
    generations, next_batch, _ = default_seq2seq_lm.generate_token(
108
109
110
        default_seq2seq_lm_batch
    )

111
    assert len(generations) == len(next_batch)
112
113
    assert isinstance(next_batch, Seq2SeqLMBatch)

114
    assert next_batch.input_ids is None
115
116
117
118
119
120
121
122
123
124
    assert torch.equal(
        next_batch.attention_mask, default_seq2seq_lm_batch.attention_mask
    )
    assert next_batch.input_lengths == default_seq2seq_lm_batch.input_lengths
    assert next_batch.max_input_length == default_seq2seq_lm_batch.max_input_length
    assert (
        next_batch.next_token_choosers == default_seq2seq_lm_batch.next_token_choosers
    )
    assert next_batch.stopping_criterias == default_seq2seq_lm_batch.stopping_criterias

125
126
127
    assert len(next_batch.decoder_input_ids) == len(next_batch)
    assert next_batch.all_decoder_input_ids[0][0] == 0
    assert next_batch.all_decoder_input_ids[0][1] == 259
128
    assert next_batch.decoder_attention_mask is None
129
    assert next_batch.encoder_last_hidden_state.shape == (1, sequence_length, 512)
130
131
132
133
134
135

    assert next_batch.decoder_input_lengths == [2]
    assert next_batch.max_decoder_input_length == 2

    assert next_batch.past_key_values is not None
    assert all(
136
        [p[0].shape == (len(next_batch), 6, 1, 64) for p in next_batch.past_key_values]
137
138
    )
    assert all(
139
        [p[1].shape == (len(next_batch), 6, 1, 64) for p in next_batch.past_key_values]
140
141
    )
    assert all(
142
        [
143
            p[2].shape == (len(next_batch), 6, sequence_length, 64)
144
145
            for p in next_batch.past_key_values
        ]
146
147
    )
    assert all(
148
        [
149
            p[3].shape == (len(next_batch), 6, sequence_length, 64)
150
151
            for p in next_batch.past_key_values
        ]
152
    )
153
154
    assert all([generation.generated_text is None for generation in generations])
    assert all([len(generation.prefill_tokens) == 1 for generation in generations])
OlivierDehaene's avatar
OlivierDehaene committed
155
156
157
158
159
160
161
162
163
164
165
166
167
168
    assert all(
        [
            token_id.item() == 259
            for generation in generations
            for token_id in generation.tokens.token_ids
        ]
    )
    assert all(
        [
            token_text == " "
            for generation in generations
            for token_text in generation.tokens.texts
        ]
    )
169
    assert generations[0].request_id == 0
170
171
172
173
174
175
176


def test_seq2seq_lm_generate_token_completion(
    default_seq2seq_lm, default_seq2seq_lm_batch
):
    next_batch = default_seq2seq_lm_batch
    for _ in range(6):
177
        generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
178
        assert len(generations) == len(next_batch)
179

180
    generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
181
182
    assert next_batch is None

183
184
185
186
    assert len(generations) == 1
    assert generations[0].generated_text.text == "a few weeks"
    assert generations[0].request_id == default_seq2seq_lm_batch.requests[0].id
    assert generations[0].generated_text.generated_tokens == 7
187
188
189
190
191
192
193
194


def test_seq2seq_lm_generate_token_completion_multi(
    default_seq2seq_lm, default_multi_requests_seq2seq_lm_batch
):
    next_batch = default_multi_requests_seq2seq_lm_batch

    for i in range(4):
195
        generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
196
        assert len(generations) == len(next_batch)
197

198
    generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
199
200
    assert next_batch is not None

201
202
    assert len(generations) == 2
    assert generations[1].generated_text.text == "a few "
203
    assert (
204
205
        generations[1].request_id
        == default_multi_requests_seq2seq_lm_batch.requests[1].id
206
    )
207
    assert generations[1].generated_text.generated_tokens == 5
208

209
    next_batch = next_batch.filter([next_batch.requests[0].id])
210

211
    generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
212
    assert len(generations) == len(next_batch)
213

214
    generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
215
216
    assert next_batch is None

217
218
    assert len(generations) == 1
    assert generations[0].generated_text.text == "a few weeks"
219
    assert (
220
221
        generations[0].request_id
        == default_multi_requests_seq2seq_lm_batch.requests[0].id
222
    )
223
    assert generations[0].generated_text.generated_tokens == 7
224
225
226
227
228
229
230
231


def test_batch_concatenate(
    default_seq2seq_lm,
    default_seq2seq_lm_batch,
    default_multi_requests_seq2seq_lm_batch,
):
    next_batch_0 = default_seq2seq_lm_batch
232
233
    _, next_batch_0, _ = default_seq2seq_lm.generate_token(next_batch_0)
    _, next_batch_0, _ = default_seq2seq_lm.generate_token(next_batch_0)
234
235

    next_batch_1 = default_multi_requests_seq2seq_lm_batch
236
    _, next_batch_1, _ = default_seq2seq_lm.generate_token(next_batch_1)
237

238
239
240
241
242
243
244
245
246
247
248
249
250
    # Copy hidden state because it is removed from the concatenated branches
    next_batch_0_encoder_last_hidden_state = next_batch_0.encoder_last_hidden_state
    next_batch_1_encoder_last_hidden_state = next_batch_1.encoder_last_hidden_state

    # Clone past_key_values before concatenating to compare after,
    # because they are removed from the concatenated batches
    next_batch_0_past_key_values = [
        [t.clone() for t in layer] for layer in next_batch_0.past_key_values
    ]
    next_batch_1_past_key_values = [
        [t.clone() for t in layer] for layer in next_batch_1.past_key_values
    ]

251
252
253
254
255
256
257
    next_batch = Seq2SeqLMBatch.concatenate([next_batch_0, next_batch_1])

    assert next_batch.batch_id == 0

    assert torch.equal(
        next_batch.decoder_input_ids[0], next_batch_0.decoder_input_ids[0]
    )
258
259
    assert next_batch.all_decoder_input_ids[1][0] == 0
    assert next_batch.all_decoder_input_ids[2][0] == 0
260
261
262
263
    assert torch.equal(
        next_batch.decoder_input_ids[1:, -2:], next_batch_1.decoder_input_ids
    )

264
265
    assert torch.all(next_batch.decoder_attention_mask[0, :3] == 1)
    assert torch.all(next_batch.decoder_attention_mask[0, 3:] == 0)
266
    assert torch.all(next_batch.decoder_attention_mask[1:, 0] == 0)
267
    assert torch.all(next_batch.decoder_attention_mask[1:, 1:3] == 1)
268
269
270

    assert torch.equal(
        next_batch.encoder_last_hidden_state[0],
271
        next_batch_0_encoder_last_hidden_state[0, -2:],
272
273
274
    )
    assert torch.equal(
        next_batch.encoder_last_hidden_state[1:],
275
        next_batch_1_encoder_last_hidden_state[:, -2:],
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
    )

    assert next_batch.input_lengths == [2, 2, 2]
    assert next_batch.decoder_input_lengths == [3, 2, 2]
    assert next_batch.max_input_length == 2
    assert next_batch.max_decoder_input_length == 3

    assert next_batch.requests[0] == next_batch_0.requests[0]
    assert next_batch.requests[1:] == next_batch_1.requests

    assert next_batch.next_token_choosers[0] == next_batch_0.next_token_choosers[0]
    assert next_batch.next_token_choosers[1:] == next_batch_1.next_token_choosers

    assert next_batch.stopping_criterias[0] == next_batch_0.stopping_criterias[0]
    assert next_batch.stopping_criterias[1:] == next_batch_1.stopping_criterias

    assert next_batch.past_key_values is not None
    assert all(
294
        [p[0].shape == (len(next_batch), 6, 2, 64) for p in next_batch.past_key_values]
295
296
    )
    assert all(
297
        [p[1].shape == (len(next_batch), 6, 2, 64) for p in next_batch.past_key_values]
298
299
    )
    assert all(
300
        [p[2].shape == (len(next_batch), 6, 2, 64) for p in next_batch.past_key_values]
301
302
    )
    assert all(
303
        [p[3].shape == (len(next_batch), 6, 2, 64) for p in next_batch.past_key_values]
304
305
306
    )

    for i, past in enumerate(next_batch.past_key_values):
307
        assert torch.equal(next_batch_0_past_key_values[i][0][0, :, -2:, :], past[0][0])
308
        assert torch.equal(
309
            next_batch_1_past_key_values[i][0][:, :, -1:, :], past[0][1:, :, -1:, :]
310
311
        )

312
        assert torch.equal(next_batch_0_past_key_values[i][1][0, :, -2:, :], past[1][0])
313
        assert torch.equal(
314
            next_batch_1_past_key_values[i][1][:, :, -1:, :], past[1][1:, :, -1:, :]
315
316
        )

317
        assert torch.equal(next_batch_0_past_key_values[i][2][0, :, -2:, :], past[2][0])
318
        assert torch.equal(
319
            next_batch_1_past_key_values[i][2][:, :, -2:, :], past[2][1:]
320
321
        )

322
        assert torch.equal(next_batch_0_past_key_values[i][3][0, :, -2:, :], past[3][0])
323
        assert torch.equal(
324
            next_batch_1_past_key_values[i][3][:, :, -2:, :], past[3][1:]
325
326
327
        )

    for _ in range(3):
328
        generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
329
        assert len(generations) == len(next_batch)
330

331
    generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
332
333
    assert next_batch is not None

334
335
    assert len(generations) == 3
    assert generations[2].generated_text.text == "a few "
336
    assert (
337
338
        generations[2].request_id
        == default_multi_requests_seq2seq_lm_batch.requests[1].id
339
    )
340
    assert generations[2].generated_text.generated_tokens == 5
341

342
343
344
    next_batch = next_batch.filter(
        [next_batch.requests[0].id, next_batch.requests[1].id]
    )
345

346
    generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
347
348
    assert next_batch is not None

349
350
351
352
    assert len(generations) == 2
    assert generations[0].generated_text.text == "a few weeks"
    assert generations[0].request_id == default_seq2seq_lm_batch.requests[0].id
    assert generations[0].generated_text.generated_tokens == 7
353

354
    next_batch = next_batch.filter([next_batch.requests[1].id])
355

356
    generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
357
358
    assert next_batch is None

359
360
    assert len(generations) == 1
    assert generations[0].generated_text.text == "a few weeks"
361
    assert (
362
363
        generations[0].request_id
        == default_multi_requests_seq2seq_lm_batch.requests[0].id
364
    )
365
    assert generations[0].generated_text.generated_tokens == 7