test_seq2seq_lm.py 12.7 KB
Newer Older
1
2
3
4
5
import pytest
import torch

from copy import copy

6
7
from transformers import AutoTokenizer

8
9
from text_generation_server.pb import generate_pb2
from text_generation_server.models.seq2seq_lm import Seq2SeqLM, Seq2SeqLMBatch
10
11


12
13
14
15
16
17
18
19
20
21
22
23
24
25
@pytest.fixture(scope="session")
def mt0_small_tokenizer():
    tokenizer = AutoTokenizer.from_pretrained(
        "bigscience/mt0-small", padding_side="left"
    )
    tokenizer.bos_token_id = 0
    return tokenizer


@pytest.fixture(scope="session")
def default_seq2seq_lm():
    return Seq2SeqLM("bigscience/mt0-small")


26
@pytest.fixture
27
def default_pb_request(default_pb_parameters, default_pb_stop_parameters):
28
29
30
    return generate_pb2.Request(
        id=0,
        inputs="Test",
31
        prefill_logprobs=True,
32
        truncate=100,
33
        parameters=default_pb_parameters,
34
        stopping_parameters=default_pb_stop_parameters,
35
36
37
38
39
40
41
42
43
44
45
    )


@pytest.fixture
def default_pb_batch(default_pb_request):
    return generate_pb2.Batch(id=0, requests=[default_pb_request], size=1)


@pytest.fixture
def default_seq2seq_lm_batch(default_pb_batch, mt0_small_tokenizer):
    return Seq2SeqLMBatch.from_pb(
46
        default_pb_batch, mt0_small_tokenizer, torch.float32, torch.device("cpu")
47
48
49
50
51
52
    )


@pytest.fixture
def default_multi_requests_seq2seq_lm_batch(default_pb_request, mt0_small_tokenizer):
    req_0 = copy(default_pb_request)
53
    req_0.id = 1
54
    req_1 = default_pb_request
55
    req_1.id = 2
56
    req_1.stopping_parameters.max_new_tokens = 5
57
58

    batch_pb = generate_pb2.Batch(id=0, requests=[req_0, req_1], size=2)
59
60
61
    return Seq2SeqLMBatch.from_pb(
        batch_pb, mt0_small_tokenizer, torch.float32, torch.device("cpu")
    )
62
63
64
65


def test_batch_from_pb(default_pb_batch, default_seq2seq_lm_batch):
    batch = default_seq2seq_lm_batch
66
    sequence_length = len(default_seq2seq_lm_batch.input_ids[0])
67
68
69
70

    assert batch.batch_id == default_pb_batch.id
    assert batch.requests == default_pb_batch.requests

71
    assert batch.input_ids.shape == (default_pb_batch.size, sequence_length)
72
73
74
75
76
77
78
    assert batch.input_ids[0][-2] == 4268
    assert batch.input_ids[0][-1] == 1
    assert torch.all(batch.input_ids[0][:-2] == 0)

    assert torch.all(batch.attention_mask[0][-2:] == 1)
    assert torch.all(batch.attention_mask[0][:-2] == 0)

79
    assert len(batch.decoder_input_ids) == default_pb_batch.size
80
81
82
83
84
85
86
87
    assert batch.decoder_attention_mask is None
    assert batch.encoder_last_hidden_state is None

    assert batch.past_key_values is None

    assert batch.input_lengths == [2]
    assert batch.decoder_input_lengths == [1]

88
89
    assert len(batch) == default_pb_batch.size
    assert len(batch.next_token_choosers) == len(batch.stopping_criterias) == len(batch)
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104

    assert batch.max_input_length == batch.input_lengths[0]
    assert batch.max_decoder_input_length == batch.decoder_input_lengths[0]


def test_batch_concatenate_no_prefill(default_seq2seq_lm_batch):
    with pytest.raises(ValueError):
        Seq2SeqLMBatch.concatenate([default_seq2seq_lm_batch, default_seq2seq_lm_batch])


def test_seq2seq_lm_batch_type(default_seq2seq_lm):
    assert default_seq2seq_lm.batch_type == Seq2SeqLMBatch


def test_seq2seq_lm_generate_token(default_seq2seq_lm, default_seq2seq_lm_batch):
105
    sequence_length = len(default_seq2seq_lm_batch.input_ids[0])
106
    generations, next_batch, _ = default_seq2seq_lm.generate_token(
107
108
109
        default_seq2seq_lm_batch
    )

110
    assert len(generations) == len(next_batch)
111
112
    assert isinstance(next_batch, Seq2SeqLMBatch)

113
    assert next_batch.input_ids is None
114
115
116
117
118
119
120
121
122
123
    assert torch.equal(
        next_batch.attention_mask, default_seq2seq_lm_batch.attention_mask
    )
    assert next_batch.input_lengths == default_seq2seq_lm_batch.input_lengths
    assert next_batch.max_input_length == default_seq2seq_lm_batch.max_input_length
    assert (
        next_batch.next_token_choosers == default_seq2seq_lm_batch.next_token_choosers
    )
    assert next_batch.stopping_criterias == default_seq2seq_lm_batch.stopping_criterias

124
125
126
    assert len(next_batch.decoder_input_ids) == len(next_batch)
    assert next_batch.all_decoder_input_ids[0][0] == 0
    assert next_batch.all_decoder_input_ids[0][1] == 259
127
    assert next_batch.decoder_attention_mask is None
128
    assert next_batch.encoder_last_hidden_state.shape == (1, sequence_length, 512)
129
130
131
132
133
134

    assert next_batch.decoder_input_lengths == [2]
    assert next_batch.max_decoder_input_length == 2

    assert next_batch.past_key_values is not None
    assert all(
135
        [p[0].shape == (len(next_batch), 6, 1, 64) for p in next_batch.past_key_values]
136
137
    )
    assert all(
138
        [p[1].shape == (len(next_batch), 6, 1, 64) for p in next_batch.past_key_values]
139
140
    )
    assert all(
141
        [
142
            p[2].shape == (len(next_batch), 6, sequence_length, 64)
143
144
            for p in next_batch.past_key_values
        ]
145
146
    )
    assert all(
147
        [
148
            p[3].shape == (len(next_batch), 6, sequence_length, 64)
149
150
            for p in next_batch.past_key_values
        ]
151
    )
152
153
    assert all([generation.generated_text is None for generation in generations])
    assert all([len(generation.prefill_tokens) == 1 for generation in generations])
OlivierDehaene's avatar
OlivierDehaene committed
154
155
156
157
158
159
160
161
162
163
164
165
166
167
    assert all(
        [
            token_id.item() == 259
            for generation in generations
            for token_id in generation.tokens.token_ids
        ]
    )
    assert all(
        [
            token_text == " "
            for generation in generations
            for token_text in generation.tokens.texts
        ]
    )
168
    assert generations[0].request_id == 0
169
170
171
172
173
174
175


def test_seq2seq_lm_generate_token_completion(
    default_seq2seq_lm, default_seq2seq_lm_batch
):
    next_batch = default_seq2seq_lm_batch
    for _ in range(6):
176
        generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
177
        assert len(generations) == len(next_batch)
178

179
    generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
180
181
    assert next_batch is None

182
183
184
185
    assert len(generations) == 1
    assert generations[0].generated_text.text == "a few weeks"
    assert generations[0].request_id == default_seq2seq_lm_batch.requests[0].id
    assert generations[0].generated_text.generated_tokens == 7
186
187
188
189
190
191
192
193


def test_seq2seq_lm_generate_token_completion_multi(
    default_seq2seq_lm, default_multi_requests_seq2seq_lm_batch
):
    next_batch = default_multi_requests_seq2seq_lm_batch

    for i in range(4):
194
        generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
195
        assert len(generations) == len(next_batch)
196

197
    generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
198
199
    assert next_batch is not None

200
201
    assert len(generations) == 2
    assert generations[1].generated_text.text == "a few "
202
    assert (
203
204
        generations[1].request_id
        == default_multi_requests_seq2seq_lm_batch.requests[1].id
205
    )
206
    assert generations[1].generated_text.generated_tokens == 5
207

208
    next_batch = next_batch.filter([next_batch.requests[0].id])
209

210
    generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
211
    assert len(generations) == len(next_batch)
212

213
    generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
214
215
    assert next_batch is None

216
217
    assert len(generations) == 1
    assert generations[0].generated_text.text == "a few weeks"
218
    assert (
219
220
        generations[0].request_id
        == default_multi_requests_seq2seq_lm_batch.requests[0].id
221
    )
222
    assert generations[0].generated_text.generated_tokens == 7
223
224
225
226
227
228
229
230


def test_batch_concatenate(
    default_seq2seq_lm,
    default_seq2seq_lm_batch,
    default_multi_requests_seq2seq_lm_batch,
):
    next_batch_0 = default_seq2seq_lm_batch
231
232
    _, next_batch_0, _ = default_seq2seq_lm.generate_token(next_batch_0)
    _, next_batch_0, _ = default_seq2seq_lm.generate_token(next_batch_0)
233
234

    next_batch_1 = default_multi_requests_seq2seq_lm_batch
235
    _, next_batch_1, _ = default_seq2seq_lm.generate_token(next_batch_1)
236

237
238
239
240
241
242
243
244
245
246
247
248
249
    # Copy hidden state because it is removed from the concatenated branches
    next_batch_0_encoder_last_hidden_state = next_batch_0.encoder_last_hidden_state
    next_batch_1_encoder_last_hidden_state = next_batch_1.encoder_last_hidden_state

    # Clone past_key_values before concatenating to compare after,
    # because they are removed from the concatenated batches
    next_batch_0_past_key_values = [
        [t.clone() for t in layer] for layer in next_batch_0.past_key_values
    ]
    next_batch_1_past_key_values = [
        [t.clone() for t in layer] for layer in next_batch_1.past_key_values
    ]

250
251
252
253
254
255
256
    next_batch = Seq2SeqLMBatch.concatenate([next_batch_0, next_batch_1])

    assert next_batch.batch_id == 0

    assert torch.equal(
        next_batch.decoder_input_ids[0], next_batch_0.decoder_input_ids[0]
    )
257
258
    assert next_batch.all_decoder_input_ids[1][0] == 0
    assert next_batch.all_decoder_input_ids[2][0] == 0
259
260
261
262
    assert torch.equal(
        next_batch.decoder_input_ids[1:, -2:], next_batch_1.decoder_input_ids
    )

263
264
    assert torch.all(next_batch.decoder_attention_mask[0, :3] == 1)
    assert torch.all(next_batch.decoder_attention_mask[0, 3:] == 0)
265
    assert torch.all(next_batch.decoder_attention_mask[1:, 0] == 0)
266
    assert torch.all(next_batch.decoder_attention_mask[1:, 1:3] == 1)
267
268
269

    assert torch.equal(
        next_batch.encoder_last_hidden_state[0],
270
        next_batch_0_encoder_last_hidden_state[0, -2:],
271
272
273
    )
    assert torch.equal(
        next_batch.encoder_last_hidden_state[1:],
274
        next_batch_1_encoder_last_hidden_state[:, -2:],
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
    )

    assert next_batch.input_lengths == [2, 2, 2]
    assert next_batch.decoder_input_lengths == [3, 2, 2]
    assert next_batch.max_input_length == 2
    assert next_batch.max_decoder_input_length == 3

    assert next_batch.requests[0] == next_batch_0.requests[0]
    assert next_batch.requests[1:] == next_batch_1.requests

    assert next_batch.next_token_choosers[0] == next_batch_0.next_token_choosers[0]
    assert next_batch.next_token_choosers[1:] == next_batch_1.next_token_choosers

    assert next_batch.stopping_criterias[0] == next_batch_0.stopping_criterias[0]
    assert next_batch.stopping_criterias[1:] == next_batch_1.stopping_criterias

    assert next_batch.past_key_values is not None
    assert all(
293
        [p[0].shape == (len(next_batch), 6, 2, 64) for p in next_batch.past_key_values]
294
295
    )
    assert all(
296
        [p[1].shape == (len(next_batch), 6, 2, 64) for p in next_batch.past_key_values]
297
298
    )
    assert all(
299
        [p[2].shape == (len(next_batch), 6, 2, 64) for p in next_batch.past_key_values]
300
301
    )
    assert all(
302
        [p[3].shape == (len(next_batch), 6, 2, 64) for p in next_batch.past_key_values]
303
304
305
    )

    for i, past in enumerate(next_batch.past_key_values):
306
        assert torch.equal(next_batch_0_past_key_values[i][0][0, :, -2:, :], past[0][0])
307
        assert torch.equal(
308
            next_batch_1_past_key_values[i][0][:, :, -1:, :], past[0][1:, :, -1:, :]
309
310
        )

311
        assert torch.equal(next_batch_0_past_key_values[i][1][0, :, -2:, :], past[1][0])
312
        assert torch.equal(
313
            next_batch_1_past_key_values[i][1][:, :, -1:, :], past[1][1:, :, -1:, :]
314
315
        )

316
        assert torch.equal(next_batch_0_past_key_values[i][2][0, :, -2:, :], past[2][0])
317
        assert torch.equal(
318
            next_batch_1_past_key_values[i][2][:, :, -2:, :], past[2][1:]
319
320
        )

321
        assert torch.equal(next_batch_0_past_key_values[i][3][0, :, -2:, :], past[3][0])
322
        assert torch.equal(
323
            next_batch_1_past_key_values[i][3][:, :, -2:, :], past[3][1:]
324
325
326
        )

    for _ in range(3):
327
        generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
328
        assert len(generations) == len(next_batch)
329

330
    generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
331
332
    assert next_batch is not None

333
334
    assert len(generations) == 3
    assert generations[2].generated_text.text == "a few "
335
    assert (
336
337
        generations[2].request_id
        == default_multi_requests_seq2seq_lm_batch.requests[1].id
338
    )
339
    assert generations[2].generated_text.generated_tokens == 5
340

341
342
343
    next_batch = next_batch.filter(
        [next_batch.requests[0].id, next_batch.requests[1].id]
    )
344

345
    generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
346
347
    assert next_batch is not None

348
349
350
351
    assert len(generations) == 2
    assert generations[0].generated_text.text == "a few weeks"
    assert generations[0].request_id == default_seq2seq_lm_batch.requests[0].id
    assert generations[0].generated_text.generated_tokens == 7
352

353
    next_batch = next_batch.filter([next_batch.requests[1].id])
354

355
    generations, next_batch, _ = default_seq2seq_lm.generate_token(next_batch)
356
357
    assert next_batch is None

358
359
    assert len(generations) == 1
    assert generations[0].generated_text.text == "a few weeks"
360
    assert (
361
362
        generations[0].request_id
        == default_multi_requests_seq2seq_lm_batch.requests[0].id
363
    )
364
    assert generations[0].generated_text.generated_tokens == 7