conftest.py 20.5 KB
Newer Older
1
import asyncio
2
import contextlib
3
4
import json
import math
5
6
import os
import random
7
import shutil
8
9
import subprocess
import sys
10
import tempfile
11
import time
12
from typing import Dict, List, Optional
13

14
15
16
import docker
import pytest
from aiohttp import ClientConnectorError, ClientOSError, ServerDisconnectedError
17
from docker.errors import NotFound
18
from syrupy.extensions.json import JSONSnapshotExtension
19
from text_generation import AsyncClient
drbh's avatar
drbh committed
20
21
from text_generation.types import (
    BestOfSequence,
22
    Message,
drbh's avatar
drbh committed
23
24
    ChatComplete,
    ChatCompletionChunk,
25
    ChatCompletionComplete,
26
    Completion,
27
28
29
30
31
    Details,
    Grammar,
    InputToken,
    Response,
    Token,
drbh's avatar
drbh committed
32
)
33
34

DOCKER_IMAGE = os.getenv("DOCKER_IMAGE", None)
35
HF_TOKEN = os.getenv("HF_TOKEN", None)
36
DOCKER_VOLUME = os.getenv("DOCKER_VOLUME", "/data")
37
DOCKER_DEVICES = os.getenv("DOCKER_DEVICES")
38
39


40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
def pytest_addoption(parser):
    parser.addoption(
        "--release", action="store_true", default=False, help="run release tests"
    )


def pytest_configure(config):
    config.addinivalue_line("markers", "release: mark test as a release-only test")


def pytest_collection_modifyitems(config, items):
    if config.getoption("--release"):
        # --release given in cli: do not skip release tests
        return
    skip_release = pytest.mark.skip(reason="need --release option to run")
    for item in items:
        if "release" in item.keywords:
            item.add_marker(skip_release)


60
class ResponseComparator(JSONSnapshotExtension):
61
    rtol = 0.2
62
    ignore_logprob = False
OlivierDehaene's avatar
OlivierDehaene committed
63

64
65
66
67
    def serialize(
        self,
        data,
        *,
68
        include=None,
69
70
71
        exclude=None,
        matcher=None,
    ):
72
73
74
75
76
77
78
        if (
            isinstance(data, Response)
            or isinstance(data, ChatComplete)
            or isinstance(data, ChatCompletionChunk)
            or isinstance(data, ChatCompletionComplete)
        ):
            data = data.model_dump()
79

80
        if isinstance(data, List):
81
            data = [d.model_dump() for d in data]
82
83

        data = self._filter(
84
85
86
87
88
89
            data=data,
            depth=0,
            path=(),
            exclude=exclude,
            include=include,
            matcher=matcher,
90
91
92
93
94
95
96
97
98
99
100
        )
        return json.dumps(data, indent=2, ensure_ascii=False, sort_keys=False) + "\n"

    def matches(
        self,
        *,
        serialized_data,
        snapshot_data,
    ) -> bool:
        def convert_data(data):
            data = json.loads(data)
101
            return _convert_data(data)
102

103
        def _convert_data(data):
104
            if isinstance(data, Dict):
105
106
107
108
109
110
111
112
113
114
115
116
117
                if "choices" in data:
                    data["choices"] = list(
                        sorted(data["choices"], key=lambda x: x["index"])
                    )
                    choices = data["choices"]
                    if isinstance(choices, List) and len(choices) >= 1:
                        if "delta" in choices[0]:
                            return ChatCompletionChunk(**data)
                        if "text" in choices[0]:
                            return Completion(**data)
                    return ChatComplete(**data)
                else:
                    return Response(**data)
118
            if isinstance(data, List):
119
                return [_convert_data(d) for d in data]
120
121
122
123
124
125
            raise NotImplementedError

        def eq_token(token: Token, other: Token) -> bool:
            return (
                token.id == other.id
                and token.text == other.text
126
127
                and (
                    self.ignore_logprob
Nicolas Patry's avatar
Nicolas Patry committed
128
                    or (token.logprob == other.logprob and token.logprob is None)
129
130
                    or math.isclose(token.logprob, other.logprob, rel_tol=self.rtol)
                )
131
132
133
                and token.special == other.special
            )

134
        def eq_prefill_token(prefill_token: InputToken, other: InputToken) -> bool:
135
136
137
138
139
            try:
                return (
                    prefill_token.id == other.id
                    and prefill_token.text == other.text
                    and (
140
141
142
143
144
                        self.ignore_logprob
                        or math.isclose(
                            prefill_token.logprob,
                            other.logprob,
                            rel_tol=self.rtol,
OlivierDehaene's avatar
OlivierDehaene committed
145
                        )
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
                        if prefill_token.logprob is not None
                        else prefill_token.logprob == other.logprob
                    )
                )
            except TypeError:
                return False

        def eq_best_of(details: BestOfSequence, other: BestOfSequence) -> bool:
            return (
                details.finish_reason == other.finish_reason
                and details.generated_tokens == other.generated_tokens
                and details.seed == other.seed
                and len(details.prefill) == len(other.prefill)
                and all(
                    [
                        eq_prefill_token(d, o)
                        for d, o in zip(details.prefill, other.prefill)
                    ]
                )
                and len(details.tokens) == len(other.tokens)
                and all([eq_token(d, o) for d, o in zip(details.tokens, other.tokens)])
            )

        def eq_details(details: Details, other: Details) -> bool:
            return (
                details.finish_reason == other.finish_reason
                and details.generated_tokens == other.generated_tokens
                and details.seed == other.seed
                and len(details.prefill) == len(other.prefill)
                and all(
                    [
                        eq_prefill_token(d, o)
                        for d, o in zip(details.prefill, other.prefill)
                    ]
                )
                and len(details.tokens) == len(other.tokens)
                and all([eq_token(d, o) for d, o in zip(details.tokens, other.tokens)])
                and (
                    len(details.best_of_sequences)
                    if details.best_of_sequences is not None
                    else 0
                )
                == (
                    len(other.best_of_sequences)
                    if other.best_of_sequences is not None
                    else 0
                )
                and (
                    all(
                        [
                            eq_best_of(d, o)
                            for d, o in zip(
                                details.best_of_sequences, other.best_of_sequences
                            )
                        ]
                    )
                    if details.best_of_sequences is not None
                    else details.best_of_sequences == other.best_of_sequences
                )
            )

207
208
209
        def eq_completion(response: Completion, other: Completion) -> bool:
            return response.choices[0].text == other.choices[0].text

drbh's avatar
drbh committed
210
211
212
213
214
215
216
217
218
219
        def eq_chat_complete(response: ChatComplete, other: ChatComplete) -> bool:
            return (
                response.choices[0].message.content == other.choices[0].message.content
            )

        def eq_chat_complete_chunk(
            response: ChatCompletionChunk, other: ChatCompletionChunk
        ) -> bool:
            return response.choices[0].delta.content == other.choices[0].delta.content

220
221
222
223
224
225
226
227
228
229
230
231
232
        def eq_response(response: Response, other: Response) -> bool:
            return response.generated_text == other.generated_text and eq_details(
                response.details, other.details
            )

        serialized_data = convert_data(serialized_data)
        snapshot_data = convert_data(snapshot_data)

        if not isinstance(serialized_data, List):
            serialized_data = [serialized_data]
        if not isinstance(snapshot_data, List):
            snapshot_data = [snapshot_data]

233
234
235
236
237
        if isinstance(serialized_data[0], Completion):
            return len(snapshot_data) == len(serialized_data) and all(
                [eq_completion(r, o) for r, o in zip(serialized_data, snapshot_data)]
            )

drbh's avatar
drbh committed
238
239
240
241
242
243
244
245
246
247
248
249
250
        if isinstance(serialized_data[0], ChatComplete):
            return len(snapshot_data) == len(serialized_data) and all(
                [eq_chat_complete(r, o) for r, o in zip(serialized_data, snapshot_data)]
            )

        if isinstance(serialized_data[0], ChatCompletionChunk):
            return len(snapshot_data) == len(serialized_data) and all(
                [
                    eq_chat_complete_chunk(r, o)
                    for r, o in zip(serialized_data, snapshot_data)
                ]
            )

251
252
253
254
255
        return len(snapshot_data) == len(serialized_data) and all(
            [eq_response(r, o) for r, o in zip(serialized_data, snapshot_data)]
        )


256
257
258
259
class GenerousResponseComparator(ResponseComparator):
    # Needed for GPTQ with exllama which has serious numerical fluctuations.
    rtol = 0.75

OlivierDehaene's avatar
OlivierDehaene committed
260

261
262
263
264
class IgnoreLogProbResponseComparator(ResponseComparator):
    ignore_logprob = True


265
266
class LauncherHandle:
    def __init__(self, port: int):
drbh's avatar
drbh committed
267
        self.client = AsyncClient(f"http://localhost:{port}", timeout=30)
268
269
270
271
272
273
274
275
276
277
278
279
280

    def _inner_health(self):
        raise NotImplementedError

    async def health(self, timeout: int = 60):
        assert timeout > 0
        for _ in range(timeout):
            if not self._inner_health():
                raise RuntimeError("Launcher crashed")

            try:
                await self.client.generate("test")
                return
281
            except (ClientConnectorError, ClientOSError, ServerDisconnectedError):
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
                time.sleep(1)
        raise RuntimeError("Health check failed")


class ContainerLauncherHandle(LauncherHandle):
    def __init__(self, docker_client, container_name, port: int):
        super(ContainerLauncherHandle, self).__init__(port)
        self.docker_client = docker_client
        self.container_name = container_name

    def _inner_health(self) -> bool:
        container = self.docker_client.containers.get(self.container_name)
        return container.status in ["running", "created"]


class ProcessLauncherHandle(LauncherHandle):
    def __init__(self, process, port: int):
        super(ProcessLauncherHandle, self).__init__(port)
        self.process = process

    def _inner_health(self) -> bool:
        return self.process.poll() is None


306
@pytest.fixture
307
308
def response_snapshot(snapshot):
    return snapshot.use_extension(ResponseComparator)
309

OlivierDehaene's avatar
OlivierDehaene committed
310

311
312
313
314
@pytest.fixture
def generous_response_snapshot(snapshot):
    return snapshot.use_extension(GenerousResponseComparator)

315

316
317
318
319
320
@pytest.fixture
def ignore_logprob_response_snapshot(snapshot):
    return snapshot.use_extension(IgnoreLogProbResponseComparator)


321
322
323
324
325
326
327
328
329
330
331
@pytest.fixture(scope="module")
def event_loop():
    loop = asyncio.get_event_loop()
    yield loop
    loop.close()


@pytest.fixture(scope="module")
def launcher(event_loop):
    @contextlib.contextmanager
    def local_launcher(
332
333
334
335
        model_id: str,
        num_shard: Optional[int] = None,
        quantize: Optional[str] = None,
        trust_remote_code: bool = False,
336
        use_flash_attention: bool = True,
drbh's avatar
drbh committed
337
        disable_grammar_support: bool = False,
OlivierDehaene's avatar
OlivierDehaene committed
338
        dtype: Optional[str] = None,
339
        kv_cache_dtype: Optional[str] = None,
340
        revision: Optional[str] = None,
341
        max_input_length: Optional[int] = None,
Nicolas Patry's avatar
Nicolas Patry committed
342
        max_batch_prefill_tokens: Optional[int] = None,
343
        max_total_tokens: Optional[int] = None,
344
345
        lora_adapters: Optional[List[str]] = None,
        cuda_graphs: Optional[List[int]] = None,
Nicolas Patry's avatar
Nicolas Patry committed
346
        attention: Optional[str] = None,
347
    ):
348
349
        port = random.randint(8000, 10_000)
        master_port = random.randint(10_000, 20_000)
350

351
352
353
        shard_uds_path = (
            f"/tmp/tgi-tests-{model_id.split('/')[-1]}-{num_shard}-{quantize}-server"
        )
354
355
356
357
358
359
360
361
362
363
364
365
366

        args = [
            "text-generation-launcher",
            "--model-id",
            model_id,
            "--port",
            str(port),
            "--master-port",
            str(master_port),
            "--shard-uds-path",
            shard_uds_path,
        ]

367
368
        env = os.environ

drbh's avatar
drbh committed
369
370
        if disable_grammar_support:
            args.append("--disable-grammar-support")
371
372
        if num_shard is not None:
            args.extend(["--num-shard", str(num_shard)])
373
        if quantize is not None:
374
            args.append("--quantize")
375
            args.append(quantize)
376
377
378
        if dtype is not None:
            args.append("--dtype")
            args.append(dtype)
379
380
381
        if kv_cache_dtype is not None:
            args.append("--kv-cache-dtype")
            args.append(kv_cache_dtype)
382
383
384
        if revision is not None:
            args.append("--revision")
            args.append(revision)
385
386
        if trust_remote_code:
            args.append("--trust-remote-code")
387
388
389
        if max_input_length:
            args.append("--max-input-length")
            args.append(str(max_input_length))
Nicolas Patry's avatar
Nicolas Patry committed
390
391
392
        if max_batch_prefill_tokens:
            args.append("--max-batch-prefill-tokens")
            args.append(str(max_batch_prefill_tokens))
393
394
395
        if max_total_tokens:
            args.append("--max-total-tokens")
            args.append(str(max_total_tokens))
396
397
398
399
400
401
402
403
        if lora_adapters:
            args.append("--lora-adapters")
            args.append(",".join(lora_adapters))
        if cuda_graphs:
            args.append("--cuda-graphs")
            args.append(",".join(map(str, cuda_graphs)))

        print(" ".join(args), file=sys.stderr)
404

405
406
        env["LOG_LEVEL"] = "info,text_generation_router=debug"

407
408
        if not use_flash_attention:
            env["USE_FLASH_ATTENTION"] = "false"
Nicolas Patry's avatar
Nicolas Patry committed
409
410
        if attention is not None:
            env["ATTENTION"] = attention
411

412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
        with tempfile.TemporaryFile("w+") as tmp:
            # We'll output stdout/stderr to a temporary file. Using a pipe
            # cause the process to block until stdout is read.
            with subprocess.Popen(
                args,
                stdout=tmp,
                stderr=subprocess.STDOUT,
                env=env,
            ) as process:
                yield ProcessLauncherHandle(process, port)

                process.terminate()
                process.wait(60)

                tmp.seek(0)
                shutil.copyfileobj(tmp, sys.stderr)
428

429
430
431
        if not use_flash_attention:
            del env["USE_FLASH_ATTENTION"]

432
433
    @contextlib.contextmanager
    def docker_launcher(
434
435
436
437
        model_id: str,
        num_shard: Optional[int] = None,
        quantize: Optional[str] = None,
        trust_remote_code: bool = False,
438
        use_flash_attention: bool = True,
drbh's avatar
drbh committed
439
        disable_grammar_support: bool = False,
OlivierDehaene's avatar
OlivierDehaene committed
440
        dtype: Optional[str] = None,
441
        kv_cache_dtype: Optional[str] = None,
442
        revision: Optional[str] = None,
443
        max_input_length: Optional[int] = None,
Nicolas Patry's avatar
Nicolas Patry committed
444
        max_batch_prefill_tokens: Optional[int] = None,
445
        max_total_tokens: Optional[int] = None,
446
447
        lora_adapters: Optional[List[str]] = None,
        cuda_graphs: Optional[List[int]] = None,
Nicolas Patry's avatar
Nicolas Patry committed
448
        attention: Optional[str] = None,
449
    ):
450
        port = random.randint(8000, 10_000)
451
452
453

        args = ["--model-id", model_id, "--env"]

drbh's avatar
drbh committed
454
455
        if disable_grammar_support:
            args.append("--disable-grammar-support")
456
457
        if num_shard is not None:
            args.extend(["--num-shard", str(num_shard)])
458
        if quantize is not None:
459
            args.append("--quantize")
460
            args.append(quantize)
461
462
463
        if dtype is not None:
            args.append("--dtype")
            args.append(dtype)
464
465
466
        if kv_cache_dtype is not None:
            args.append("--kv-cache-dtype")
            args.append(kv_cache_dtype)
467
468
469
        if revision is not None:
            args.append("--revision")
            args.append(revision)
470
471
        if trust_remote_code:
            args.append("--trust-remote-code")
472
473
474
        if max_input_length:
            args.append("--max-input-length")
            args.append(str(max_input_length))
Nicolas Patry's avatar
Nicolas Patry committed
475
476
477
        if max_batch_prefill_tokens:
            args.append("--max-batch-prefill-tokens")
            args.append(str(max_batch_prefill_tokens))
478
479
480
        if max_total_tokens:
            args.append("--max-total-tokens")
            args.append(str(max_total_tokens))
481
482
483
484
485
486
        if lora_adapters:
            args.append("--lora-adapters")
            args.append(",".join(lora_adapters))
        if cuda_graphs:
            args.append("--cuda-graphs")
            args.append(",".join(map(str, cuda_graphs)))
487
488
489
490
491
492
493
494

        client = docker.from_env()

        container_name = f"tgi-tests-{model_id.split('/')[-1]}-{num_shard}-{quantize}"

        try:
            container = client.containers.get(container_name)
            container.stop()
Nicolas Patry's avatar
Nicolas Patry committed
495
            container.remove()
496
497
498
499
500
501
            container.wait()
        except NotFound:
            pass

        gpu_count = num_shard if num_shard is not None else 1

502
503
504
        env = {
            "LOG_LEVEL": "info,text_generation_router=debug",
        }
505
506
        if not use_flash_attention:
            env["USE_FLASH_ATTENTION"] = "false"
Nicolas Patry's avatar
Nicolas Patry committed
507
508
        if attention is not None:
            env["ATTENTION"] = attention
509

510
511
        if HF_TOKEN is not None:
            env["HF_TOKEN"] = HF_TOKEN
512
513
514
515
516

        volumes = []
        if DOCKER_VOLUME:
            volumes = [f"{DOCKER_VOLUME}:/data"]

517
        if DOCKER_DEVICES:
Nicolas Patry's avatar
Nicolas Patry committed
518
519
520
521
            if DOCKER_DEVICES.lower() == "none":
                devices = []
            else:
                devices = DOCKER_DEVICES.strip().split(",")
522
523
524
525
            visible = os.getenv("ROCR_VISIBLE_DEVICES")
            if visible:
                env["ROCR_VISIBLE_DEVICES"] = visible
            device_requests = []
Nicolas Patry's avatar
Nicolas Patry committed
526
527
528
529
530
531
532
533
534
535
536
537
            if not devices:
                devices = None
            elif devices == ["nvidia.com/gpu=all"]:
                devices = None
                device_requests = [
                    docker.types.DeviceRequest(
                        driver="cdi",
                        # count=gpu_count,
                        device_ids=[f"nvidia.com/gpu={i}"],
                    )
                    for i in range(gpu_count)
                ]
538
        else:
Nicolas Patry's avatar
Nicolas Patry committed
539
            devices = None
540
541
542
543
            device_requests = [
                docker.types.DeviceRequest(count=gpu_count, capabilities=[["gpu"]])
            ]

544
545
546
547
548
        container = client.containers.run(
            DOCKER_IMAGE,
            command=args,
            name=container_name,
            environment=env,
549
            auto_remove=False,
550
            detach=True,
551
552
            device_requests=device_requests,
            devices=devices,
553
554
            volumes=volumes,
            ports={"80/tcp": port},
Nicolas Patry's avatar
Nicolas Patry committed
555
            healthcheck={"timeout": int(10 * 1e9)},
OlivierDehaene's avatar
OlivierDehaene committed
556
            shm_size="1G",
557
558
        )

Nicolas Patry's avatar
Nicolas Patry committed
559
560
        try:
            yield ContainerLauncherHandle(client, container.name, port)
561

Nicolas Patry's avatar
Nicolas Patry committed
562
563
            if not use_flash_attention:
                del env["USE_FLASH_ATTENTION"]
564

Nicolas Patry's avatar
Nicolas Patry committed
565
566
567
568
569
            try:
                container.stop()
                container.wait()
            except NotFound:
                pass
570

Nicolas Patry's avatar
Nicolas Patry committed
571
572
            container_output = container.logs().decode("utf-8")
            print(container_output, file=sys.stderr)
573

Nicolas Patry's avatar
Nicolas Patry committed
574
575
        finally:
            container.remove()
576

577
578
579
580
581
582
583
584
    if DOCKER_IMAGE is not None:
        return docker_launcher
    return local_launcher


@pytest.fixture(scope="module")
def generate_load():
    async def generate_load_inner(
drbh's avatar
drbh committed
585
586
587
588
589
590
591
        client: AsyncClient,
        prompt: str,
        max_new_tokens: int,
        n: int,
        seed: Optional[int] = None,
        grammar: Optional[Grammar] = None,
        stop_sequences: Optional[List[str]] = None,
592
593
    ) -> List[Response]:
        futures = [
594
            client.generate(
drbh's avatar
drbh committed
595
596
597
598
599
600
                prompt,
                max_new_tokens=max_new_tokens,
                decoder_input_details=True,
                seed=seed,
                grammar=grammar,
                stop_sequences=stop_sequences,
601
602
            )
            for _ in range(n)
603
604
        ]

605
        return await asyncio.gather(*futures)
606
607

    return generate_load_inner
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641


@pytest.fixture(scope="module")
def generate_multi():
    async def generate_load_inner(
        client: AsyncClient,
        prompts: List[str],
        max_new_tokens: int,
        seed: Optional[int] = None,
    ) -> List[Response]:
        import numpy as np

        arange = np.arange(len(prompts))
        perm = np.random.permutation(arange)
        rperm = [-1] * len(perm)
        for i, p in enumerate(perm):
            rperm[p] = i

        shuffled_prompts = [prompts[p] for p in perm]
        futures = [
            client.chat(
                messages=[Message(role="user", content=prompt)],
                max_tokens=max_new_tokens,
                temperature=0,
                seed=seed,
            )
            for prompt in shuffled_prompts
        ]

        shuffled_responses = await asyncio.gather(*futures)
        responses = [shuffled_responses[p] for p in rperm]
        return responses

    return generate_load_inner