conftest.py 18.1 KB
Newer Older
1
import asyncio
2
import contextlib
3
4
import json
import math
5
6
import os
import random
7
import shutil
8
9
import subprocess
import sys
10
import tempfile
11
import time
12
from typing import Dict, List, Optional
13

14
15
16
import docker
import pytest
from aiohttp import ClientConnectorError, ClientOSError, ServerDisconnectedError
17
from docker.errors import NotFound
18
from syrupy.extensions.json import JSONSnapshotExtension
19
from text_generation import AsyncClient
drbh's avatar
drbh committed
20
21
from text_generation.types import (
    BestOfSequence,
drbh's avatar
drbh committed
22
23
    ChatComplete,
    ChatCompletionChunk,
24
    ChatCompletionComplete,
25
    Completion,
26
27
28
29
30
    Details,
    Grammar,
    InputToken,
    Response,
    Token,
drbh's avatar
drbh committed
31
)
32
33

DOCKER_IMAGE = os.getenv("DOCKER_IMAGE", None)
34
HF_TOKEN = os.getenv("HF_TOKEN", None)
35
DOCKER_VOLUME = os.getenv("DOCKER_VOLUME", "/data")
36
DOCKER_DEVICES = os.getenv("DOCKER_DEVICES")
37
38


39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
def pytest_addoption(parser):
    parser.addoption(
        "--release", action="store_true", default=False, help="run release tests"
    )


def pytest_configure(config):
    config.addinivalue_line("markers", "release: mark test as a release-only test")


def pytest_collection_modifyitems(config, items):
    if config.getoption("--release"):
        # --release given in cli: do not skip release tests
        return
    skip_release = pytest.mark.skip(reason="need --release option to run")
    for item in items:
        if "release" in item.keywords:
            item.add_marker(skip_release)


59
class ResponseComparator(JSONSnapshotExtension):
60
    rtol = 0.2
61
    ignore_logprob = False
OlivierDehaene's avatar
OlivierDehaene committed
62

63
64
65
66
67
68
69
    def serialize(
        self,
        data,
        *,
        exclude=None,
        matcher=None,
    ):
70
71
72
73
74
75
76
        if (
            isinstance(data, Response)
            or isinstance(data, ChatComplete)
            or isinstance(data, ChatCompletionChunk)
            or isinstance(data, ChatCompletionComplete)
        ):
            data = data.model_dump()
77

78
        if isinstance(data, List):
79
            data = [d.model_dump() for d in data]
80
81
82
83
84
85
86
87
88
89
90
91
92
93

        data = self._filter(
            data=data, depth=0, path=(), exclude=exclude, matcher=matcher
        )
        return json.dumps(data, indent=2, ensure_ascii=False, sort_keys=False) + "\n"

    def matches(
        self,
        *,
        serialized_data,
        snapshot_data,
    ) -> bool:
        def convert_data(data):
            data = json.loads(data)
drbh's avatar
drbh committed
94
95
            if isinstance(data, Dict) and "choices" in data:
                choices = data["choices"]
96
97
98
99
100
                if isinstance(choices, List) and len(choices) >= 1:
                    if "delta" in choices[0]:
                        return ChatCompletionChunk(**data)
                    if "text" in choices[0]:
                        return Completion(**data)
drbh's avatar
drbh committed
101
                return ChatComplete(**data)
102
103
104
105

            if isinstance(data, Dict):
                return Response(**data)
            if isinstance(data, List):
106
107
108
109
110
111
                if (
                    len(data) > 0
                    and "object" in data[0]
                    and data[0]["object"] == "text_completion"
                ):
                    return [Completion(**d) for d in data]
112
113
114
115
116
117
118
                return [Response(**d) for d in data]
            raise NotImplementedError

        def eq_token(token: Token, other: Token) -> bool:
            return (
                token.id == other.id
                and token.text == other.text
119
120
121
122
                and (
                    self.ignore_logprob
                    or math.isclose(token.logprob, other.logprob, rel_tol=self.rtol)
                )
123
124
125
                and token.special == other.special
            )

126
        def eq_prefill_token(prefill_token: InputToken, other: InputToken) -> bool:
127
128
129
130
131
            try:
                return (
                    prefill_token.id == other.id
                    and prefill_token.text == other.text
                    and (
132
133
134
135
136
                        self.ignore_logprob
                        or math.isclose(
                            prefill_token.logprob,
                            other.logprob,
                            rel_tol=self.rtol,
OlivierDehaene's avatar
OlivierDehaene committed
137
                        )
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
                        if prefill_token.logprob is not None
                        else prefill_token.logprob == other.logprob
                    )
                )
            except TypeError:
                return False

        def eq_best_of(details: BestOfSequence, other: BestOfSequence) -> bool:
            return (
                details.finish_reason == other.finish_reason
                and details.generated_tokens == other.generated_tokens
                and details.seed == other.seed
                and len(details.prefill) == len(other.prefill)
                and all(
                    [
                        eq_prefill_token(d, o)
                        for d, o in zip(details.prefill, other.prefill)
                    ]
                )
                and len(details.tokens) == len(other.tokens)
                and all([eq_token(d, o) for d, o in zip(details.tokens, other.tokens)])
            )

        def eq_details(details: Details, other: Details) -> bool:
            return (
                details.finish_reason == other.finish_reason
                and details.generated_tokens == other.generated_tokens
                and details.seed == other.seed
                and len(details.prefill) == len(other.prefill)
                and all(
                    [
                        eq_prefill_token(d, o)
                        for d, o in zip(details.prefill, other.prefill)
                    ]
                )
                and len(details.tokens) == len(other.tokens)
                and all([eq_token(d, o) for d, o in zip(details.tokens, other.tokens)])
                and (
                    len(details.best_of_sequences)
                    if details.best_of_sequences is not None
                    else 0
                )
                == (
                    len(other.best_of_sequences)
                    if other.best_of_sequences is not None
                    else 0
                )
                and (
                    all(
                        [
                            eq_best_of(d, o)
                            for d, o in zip(
                                details.best_of_sequences, other.best_of_sequences
                            )
                        ]
                    )
                    if details.best_of_sequences is not None
                    else details.best_of_sequences == other.best_of_sequences
                )
            )

199
200
201
        def eq_completion(response: Completion, other: Completion) -> bool:
            return response.choices[0].text == other.choices[0].text

drbh's avatar
drbh committed
202
203
204
205
206
207
208
209
210
211
        def eq_chat_complete(response: ChatComplete, other: ChatComplete) -> bool:
            return (
                response.choices[0].message.content == other.choices[0].message.content
            )

        def eq_chat_complete_chunk(
            response: ChatCompletionChunk, other: ChatCompletionChunk
        ) -> bool:
            return response.choices[0].delta.content == other.choices[0].delta.content

212
213
214
215
216
217
218
219
220
221
222
223
224
        def eq_response(response: Response, other: Response) -> bool:
            return response.generated_text == other.generated_text and eq_details(
                response.details, other.details
            )

        serialized_data = convert_data(serialized_data)
        snapshot_data = convert_data(snapshot_data)

        if not isinstance(serialized_data, List):
            serialized_data = [serialized_data]
        if not isinstance(snapshot_data, List):
            snapshot_data = [snapshot_data]

225
226
227
228
229
        if isinstance(serialized_data[0], Completion):
            return len(snapshot_data) == len(serialized_data) and all(
                [eq_completion(r, o) for r, o in zip(serialized_data, snapshot_data)]
            )

drbh's avatar
drbh committed
230
231
232
233
234
235
236
237
238
239
240
241
242
        if isinstance(serialized_data[0], ChatComplete):
            return len(snapshot_data) == len(serialized_data) and all(
                [eq_chat_complete(r, o) for r, o in zip(serialized_data, snapshot_data)]
            )

        if isinstance(serialized_data[0], ChatCompletionChunk):
            return len(snapshot_data) == len(serialized_data) and all(
                [
                    eq_chat_complete_chunk(r, o)
                    for r, o in zip(serialized_data, snapshot_data)
                ]
            )

243
244
245
246
247
        return len(snapshot_data) == len(serialized_data) and all(
            [eq_response(r, o) for r, o in zip(serialized_data, snapshot_data)]
        )


248
249
250
251
class GenerousResponseComparator(ResponseComparator):
    # Needed for GPTQ with exllama which has serious numerical fluctuations.
    rtol = 0.75

OlivierDehaene's avatar
OlivierDehaene committed
252

253
254
255
256
class IgnoreLogProbResponseComparator(ResponseComparator):
    ignore_logprob = True


257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
class LauncherHandle:
    def __init__(self, port: int):
        self.client = AsyncClient(f"http://localhost:{port}")

    def _inner_health(self):
        raise NotImplementedError

    async def health(self, timeout: int = 60):
        assert timeout > 0
        for _ in range(timeout):
            if not self._inner_health():
                raise RuntimeError("Launcher crashed")

            try:
                await self.client.generate("test")
                return
273
            except (ClientConnectorError, ClientOSError, ServerDisconnectedError):
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
                time.sleep(1)
        raise RuntimeError("Health check failed")


class ContainerLauncherHandle(LauncherHandle):
    def __init__(self, docker_client, container_name, port: int):
        super(ContainerLauncherHandle, self).__init__(port)
        self.docker_client = docker_client
        self.container_name = container_name

    def _inner_health(self) -> bool:
        container = self.docker_client.containers.get(self.container_name)
        return container.status in ["running", "created"]


class ProcessLauncherHandle(LauncherHandle):
    def __init__(self, process, port: int):
        super(ProcessLauncherHandle, self).__init__(port)
        self.process = process

    def _inner_health(self) -> bool:
        return self.process.poll() is None


298
@pytest.fixture
299
300
def response_snapshot(snapshot):
    return snapshot.use_extension(ResponseComparator)
301

OlivierDehaene's avatar
OlivierDehaene committed
302

303
304
305
306
@pytest.fixture
def generous_response_snapshot(snapshot):
    return snapshot.use_extension(GenerousResponseComparator)

307

308
309
310
311
312
@pytest.fixture
def ignore_logprob_response_snapshot(snapshot):
    return snapshot.use_extension(IgnoreLogProbResponseComparator)


313
314
315
316
317
318
319
320
321
322
323
@pytest.fixture(scope="module")
def event_loop():
    loop = asyncio.get_event_loop()
    yield loop
    loop.close()


@pytest.fixture(scope="module")
def launcher(event_loop):
    @contextlib.contextmanager
    def local_launcher(
324
325
326
327
        model_id: str,
        num_shard: Optional[int] = None,
        quantize: Optional[str] = None,
        trust_remote_code: bool = False,
328
        use_flash_attention: bool = True,
drbh's avatar
drbh committed
329
        disable_grammar_support: bool = False,
OlivierDehaene's avatar
OlivierDehaene committed
330
        dtype: Optional[str] = None,
331
        revision: Optional[str] = None,
332
        max_input_length: Optional[int] = None,
Nicolas Patry's avatar
Nicolas Patry committed
333
        max_batch_prefill_tokens: Optional[int] = None,
334
        max_total_tokens: Optional[int] = None,
335
336
        lora_adapters: Optional[List[str]] = None,
        cuda_graphs: Optional[List[int]] = None,
337
    ):
338
339
        port = random.randint(8000, 10_000)
        master_port = random.randint(10_000, 20_000)
340

341
342
343
        shard_uds_path = (
            f"/tmp/tgi-tests-{model_id.split('/')[-1]}-{num_shard}-{quantize}-server"
        )
344
345
346
347
348
349
350
351
352
353
354
355
356

        args = [
            "text-generation-launcher",
            "--model-id",
            model_id,
            "--port",
            str(port),
            "--master-port",
            str(master_port),
            "--shard-uds-path",
            shard_uds_path,
        ]

357
358
        env = os.environ

drbh's avatar
drbh committed
359
360
        if disable_grammar_support:
            args.append("--disable-grammar-support")
361
362
        if num_shard is not None:
            args.extend(["--num-shard", str(num_shard)])
363
        if quantize is not None:
364
            args.append("--quantize")
365
            args.append(quantize)
366
367
368
        if dtype is not None:
            args.append("--dtype")
            args.append(dtype)
369
370
371
        if revision is not None:
            args.append("--revision")
            args.append(revision)
372
373
        if trust_remote_code:
            args.append("--trust-remote-code")
374
375
376
        if max_input_length:
            args.append("--max-input-length")
            args.append(str(max_input_length))
Nicolas Patry's avatar
Nicolas Patry committed
377
378
379
        if max_batch_prefill_tokens:
            args.append("--max-batch-prefill-tokens")
            args.append(str(max_batch_prefill_tokens))
380
381
382
        if max_total_tokens:
            args.append("--max-total-tokens")
            args.append(str(max_total_tokens))
383
384
385
386
387
388
389
390
        if lora_adapters:
            args.append("--lora-adapters")
            args.append(",".join(lora_adapters))
        if cuda_graphs:
            args.append("--cuda-graphs")
            args.append(",".join(map(str, cuda_graphs)))

        print(" ".join(args), file=sys.stderr)
391

392
393
        env["LOG_LEVEL"] = "info,text_generation_router=debug"

394
395
396
        if not use_flash_attention:
            env["USE_FLASH_ATTENTION"] = "false"

397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
        with tempfile.TemporaryFile("w+") as tmp:
            # We'll output stdout/stderr to a temporary file. Using a pipe
            # cause the process to block until stdout is read.
            with subprocess.Popen(
                args,
                stdout=tmp,
                stderr=subprocess.STDOUT,
                env=env,
            ) as process:
                yield ProcessLauncherHandle(process, port)

                process.terminate()
                process.wait(60)

                tmp.seek(0)
                shutil.copyfileobj(tmp, sys.stderr)
413

414
415
416
        if not use_flash_attention:
            del env["USE_FLASH_ATTENTION"]

417
418
    @contextlib.contextmanager
    def docker_launcher(
419
420
421
422
        model_id: str,
        num_shard: Optional[int] = None,
        quantize: Optional[str] = None,
        trust_remote_code: bool = False,
423
        use_flash_attention: bool = True,
drbh's avatar
drbh committed
424
        disable_grammar_support: bool = False,
OlivierDehaene's avatar
OlivierDehaene committed
425
        dtype: Optional[str] = None,
426
        revision: Optional[str] = None,
427
        max_input_length: Optional[int] = None,
Nicolas Patry's avatar
Nicolas Patry committed
428
        max_batch_prefill_tokens: Optional[int] = None,
429
        max_total_tokens: Optional[int] = None,
430
431
        lora_adapters: Optional[List[str]] = None,
        cuda_graphs: Optional[List[int]] = None,
432
    ):
433
        port = random.randint(8000, 10_000)
434
435
436

        args = ["--model-id", model_id, "--env"]

drbh's avatar
drbh committed
437
438
        if disable_grammar_support:
            args.append("--disable-grammar-support")
439
440
        if num_shard is not None:
            args.extend(["--num-shard", str(num_shard)])
441
        if quantize is not None:
442
            args.append("--quantize")
443
            args.append(quantize)
444
445
446
        if dtype is not None:
            args.append("--dtype")
            args.append(dtype)
447
448
449
        if revision is not None:
            args.append("--revision")
            args.append(revision)
450
451
        if trust_remote_code:
            args.append("--trust-remote-code")
452
453
454
        if max_input_length:
            args.append("--max-input-length")
            args.append(str(max_input_length))
Nicolas Patry's avatar
Nicolas Patry committed
455
456
457
        if max_batch_prefill_tokens:
            args.append("--max-batch-prefill-tokens")
            args.append(str(max_batch_prefill_tokens))
458
459
460
        if max_total_tokens:
            args.append("--max-total-tokens")
            args.append(str(max_total_tokens))
461
462
463
464
465
466
        if lora_adapters:
            args.append("--lora-adapters")
            args.append(",".join(lora_adapters))
        if cuda_graphs:
            args.append("--cuda-graphs")
            args.append(",".join(map(str, cuda_graphs)))
467
468
469
470
471
472
473
474
475
476
477
478
479
480

        client = docker.from_env()

        container_name = f"tgi-tests-{model_id.split('/')[-1]}-{num_shard}-{quantize}"

        try:
            container = client.containers.get(container_name)
            container.stop()
            container.wait()
        except NotFound:
            pass

        gpu_count = num_shard if num_shard is not None else 1

481
482
483
        env = {
            "LOG_LEVEL": "info,text_generation_router=debug",
        }
484
485
486
        if not use_flash_attention:
            env["USE_FLASH_ATTENTION"] = "false"

487
488
        if HF_TOKEN is not None:
            env["HF_TOKEN"] = HF_TOKEN
489
490
491
492
493

        volumes = []
        if DOCKER_VOLUME:
            volumes = [f"{DOCKER_VOLUME}:/data"]

494
495
496
497
498
499
500
501
502
503
504
505
        if DOCKER_DEVICES:
            devices = DOCKER_DEVICES.split(",")
            visible = os.getenv("ROCR_VISIBLE_DEVICES")
            if visible:
                env["ROCR_VISIBLE_DEVICES"] = visible
            device_requests = []
        else:
            devices = []
            device_requests = [
                docker.types.DeviceRequest(count=gpu_count, capabilities=[["gpu"]])
            ]

506
507
508
509
510
        container = client.containers.run(
            DOCKER_IMAGE,
            command=args,
            name=container_name,
            environment=env,
511
            auto_remove=False,
512
            detach=True,
513
514
            device_requests=device_requests,
            devices=devices,
515
516
            volumes=volumes,
            ports={"80/tcp": port},
OlivierDehaene's avatar
OlivierDehaene committed
517
            shm_size="1G",
518
519
        )

520
        yield ContainerLauncherHandle(client, container.name, port)
521

522
523
524
        if not use_flash_attention:
            del env["USE_FLASH_ATTENTION"]

525
526
527
528
529
        try:
            container.stop()
            container.wait()
        except NotFound:
            pass
530
531

        container_output = container.logs().decode("utf-8")
532
        print(container_output, file=sys.stderr)
533

534
535
        container.remove()

536
537
538
539
540
541
542
543
    if DOCKER_IMAGE is not None:
        return docker_launcher
    return local_launcher


@pytest.fixture(scope="module")
def generate_load():
    async def generate_load_inner(
drbh's avatar
drbh committed
544
545
546
547
548
549
550
        client: AsyncClient,
        prompt: str,
        max_new_tokens: int,
        n: int,
        seed: Optional[int] = None,
        grammar: Optional[Grammar] = None,
        stop_sequences: Optional[List[str]] = None,
551
552
    ) -> List[Response]:
        futures = [
553
            client.generate(
drbh's avatar
drbh committed
554
555
556
557
558
559
                prompt,
                max_new_tokens=max_new_tokens,
                decoder_input_details=True,
                seed=seed,
                grammar=grammar,
                stop_sequences=stop_sequences,
560
561
            )
            for _ in range(n)
562
563
        ]

564
        return await asyncio.gather(*futures)
565
566

    return generate_load_inner