"mmdet3d/vscode:/vscode.git/clone" did not exist on "d71edf6c771c3bb0ac51a52b4987843a06fb6bee"
conftest.py 19.6 KB
Newer Older
1
import asyncio
2
import contextlib
3
4
import json
import math
5
6
import os
import random
7
import shutil
8
9
import subprocess
import sys
10
import tempfile
11
import time
12
from typing import Dict, List, Optional
13

14
15
16
import docker
import pytest
from aiohttp import ClientConnectorError, ClientOSError, ServerDisconnectedError
17
from docker.errors import NotFound
18
from syrupy.extensions.json import JSONSnapshotExtension
19
from text_generation import AsyncClient
drbh's avatar
drbh committed
20
21
from text_generation.types import (
    BestOfSequence,
22
    Message,
drbh's avatar
drbh committed
23
24
    ChatComplete,
    ChatCompletionChunk,
25
    ChatCompletionComplete,
26
    Completion,
27
28
29
30
31
    Details,
    Grammar,
    InputToken,
    Response,
    Token,
drbh's avatar
drbh committed
32
)
33
34

DOCKER_IMAGE = os.getenv("DOCKER_IMAGE", None)
35
HF_TOKEN = os.getenv("HF_TOKEN", None)
36
DOCKER_VOLUME = os.getenv("DOCKER_VOLUME", "/data")
37
DOCKER_DEVICES = os.getenv("DOCKER_DEVICES")
38
39


40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
def pytest_addoption(parser):
    parser.addoption(
        "--release", action="store_true", default=False, help="run release tests"
    )


def pytest_configure(config):
    config.addinivalue_line("markers", "release: mark test as a release-only test")


def pytest_collection_modifyitems(config, items):
    if config.getoption("--release"):
        # --release given in cli: do not skip release tests
        return
    skip_release = pytest.mark.skip(reason="need --release option to run")
    for item in items:
        if "release" in item.keywords:
            item.add_marker(skip_release)


60
class ResponseComparator(JSONSnapshotExtension):
61
    rtol = 0.2
62
    ignore_logprob = False
OlivierDehaene's avatar
OlivierDehaene committed
63

64
65
66
67
    def serialize(
        self,
        data,
        *,
68
        include=None,
69
70
71
        exclude=None,
        matcher=None,
    ):
72
73
74
75
76
77
78
        if (
            isinstance(data, Response)
            or isinstance(data, ChatComplete)
            or isinstance(data, ChatCompletionChunk)
            or isinstance(data, ChatCompletionComplete)
        ):
            data = data.model_dump()
79

80
        if isinstance(data, List):
81
            data = [d.model_dump() for d in data]
82
83

        data = self._filter(
84
85
86
87
88
89
            data=data,
            depth=0,
            path=(),
            exclude=exclude,
            include=include,
            matcher=matcher,
90
91
92
93
94
95
96
97
98
99
100
        )
        return json.dumps(data, indent=2, ensure_ascii=False, sort_keys=False) + "\n"

    def matches(
        self,
        *,
        serialized_data,
        snapshot_data,
    ) -> bool:
        def convert_data(data):
            data = json.loads(data)
101
            return _convert_data(data)
102

103
        def _convert_data(data):
104
            if isinstance(data, Dict):
105
106
107
108
109
110
111
112
113
114
115
116
117
                if "choices" in data:
                    data["choices"] = list(
                        sorted(data["choices"], key=lambda x: x["index"])
                    )
                    choices = data["choices"]
                    if isinstance(choices, List) and len(choices) >= 1:
                        if "delta" in choices[0]:
                            return ChatCompletionChunk(**data)
                        if "text" in choices[0]:
                            return Completion(**data)
                    return ChatComplete(**data)
                else:
                    return Response(**data)
118
            if isinstance(data, List):
119
                return [_convert_data(d) for d in data]
120
121
122
123
124
125
            raise NotImplementedError

        def eq_token(token: Token, other: Token) -> bool:
            return (
                token.id == other.id
                and token.text == other.text
126
127
                and (
                    self.ignore_logprob
Nicolas Patry's avatar
Nicolas Patry committed
128
                    or (token.logprob == other.logprob and token.logprob is None)
129
130
                    or math.isclose(token.logprob, other.logprob, rel_tol=self.rtol)
                )
131
132
133
                and token.special == other.special
            )

134
        def eq_prefill_token(prefill_token: InputToken, other: InputToken) -> bool:
135
136
137
138
139
            try:
                return (
                    prefill_token.id == other.id
                    and prefill_token.text == other.text
                    and (
140
141
142
143
144
                        self.ignore_logprob
                        or math.isclose(
                            prefill_token.logprob,
                            other.logprob,
                            rel_tol=self.rtol,
OlivierDehaene's avatar
OlivierDehaene committed
145
                        )
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
                        if prefill_token.logprob is not None
                        else prefill_token.logprob == other.logprob
                    )
                )
            except TypeError:
                return False

        def eq_best_of(details: BestOfSequence, other: BestOfSequence) -> bool:
            return (
                details.finish_reason == other.finish_reason
                and details.generated_tokens == other.generated_tokens
                and details.seed == other.seed
                and len(details.prefill) == len(other.prefill)
                and all(
                    [
                        eq_prefill_token(d, o)
                        for d, o in zip(details.prefill, other.prefill)
                    ]
                )
                and len(details.tokens) == len(other.tokens)
                and all([eq_token(d, o) for d, o in zip(details.tokens, other.tokens)])
            )

        def eq_details(details: Details, other: Details) -> bool:
            return (
                details.finish_reason == other.finish_reason
                and details.generated_tokens == other.generated_tokens
                and details.seed == other.seed
                and len(details.prefill) == len(other.prefill)
                and all(
                    [
                        eq_prefill_token(d, o)
                        for d, o in zip(details.prefill, other.prefill)
                    ]
                )
                and len(details.tokens) == len(other.tokens)
                and all([eq_token(d, o) for d, o in zip(details.tokens, other.tokens)])
                and (
                    len(details.best_of_sequences)
                    if details.best_of_sequences is not None
                    else 0
                )
                == (
                    len(other.best_of_sequences)
                    if other.best_of_sequences is not None
                    else 0
                )
                and (
                    all(
                        [
                            eq_best_of(d, o)
                            for d, o in zip(
                                details.best_of_sequences, other.best_of_sequences
                            )
                        ]
                    )
                    if details.best_of_sequences is not None
                    else details.best_of_sequences == other.best_of_sequences
                )
            )

207
208
209
        def eq_completion(response: Completion, other: Completion) -> bool:
            return response.choices[0].text == other.choices[0].text

drbh's avatar
drbh committed
210
211
212
213
214
215
216
217
218
219
        def eq_chat_complete(response: ChatComplete, other: ChatComplete) -> bool:
            return (
                response.choices[0].message.content == other.choices[0].message.content
            )

        def eq_chat_complete_chunk(
            response: ChatCompletionChunk, other: ChatCompletionChunk
        ) -> bool:
            return response.choices[0].delta.content == other.choices[0].delta.content

220
221
222
223
224
225
226
227
228
229
230
231
232
        def eq_response(response: Response, other: Response) -> bool:
            return response.generated_text == other.generated_text and eq_details(
                response.details, other.details
            )

        serialized_data = convert_data(serialized_data)
        snapshot_data = convert_data(snapshot_data)

        if not isinstance(serialized_data, List):
            serialized_data = [serialized_data]
        if not isinstance(snapshot_data, List):
            snapshot_data = [snapshot_data]

233
234
235
236
237
        if isinstance(serialized_data[0], Completion):
            return len(snapshot_data) == len(serialized_data) and all(
                [eq_completion(r, o) for r, o in zip(serialized_data, snapshot_data)]
            )

drbh's avatar
drbh committed
238
239
240
241
242
243
244
245
246
247
248
249
250
        if isinstance(serialized_data[0], ChatComplete):
            return len(snapshot_data) == len(serialized_data) and all(
                [eq_chat_complete(r, o) for r, o in zip(serialized_data, snapshot_data)]
            )

        if isinstance(serialized_data[0], ChatCompletionChunk):
            return len(snapshot_data) == len(serialized_data) and all(
                [
                    eq_chat_complete_chunk(r, o)
                    for r, o in zip(serialized_data, snapshot_data)
                ]
            )

251
252
253
254
255
        return len(snapshot_data) == len(serialized_data) and all(
            [eq_response(r, o) for r, o in zip(serialized_data, snapshot_data)]
        )


256
257
258
259
class GenerousResponseComparator(ResponseComparator):
    # Needed for GPTQ with exllama which has serious numerical fluctuations.
    rtol = 0.75

OlivierDehaene's avatar
OlivierDehaene committed
260

261
262
263
264
class IgnoreLogProbResponseComparator(ResponseComparator):
    ignore_logprob = True


265
266
class LauncherHandle:
    def __init__(self, port: int):
drbh's avatar
drbh committed
267
        self.client = AsyncClient(f"http://localhost:{port}", timeout=30)
268
269
270
271
272
273
274
275
276
277
278
279
280

    def _inner_health(self):
        raise NotImplementedError

    async def health(self, timeout: int = 60):
        assert timeout > 0
        for _ in range(timeout):
            if not self._inner_health():
                raise RuntimeError("Launcher crashed")

            try:
                await self.client.generate("test")
                return
281
            except (ClientConnectorError, ClientOSError, ServerDisconnectedError):
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
                time.sleep(1)
        raise RuntimeError("Health check failed")


class ContainerLauncherHandle(LauncherHandle):
    def __init__(self, docker_client, container_name, port: int):
        super(ContainerLauncherHandle, self).__init__(port)
        self.docker_client = docker_client
        self.container_name = container_name

    def _inner_health(self) -> bool:
        container = self.docker_client.containers.get(self.container_name)
        return container.status in ["running", "created"]


class ProcessLauncherHandle(LauncherHandle):
    def __init__(self, process, port: int):
        super(ProcessLauncherHandle, self).__init__(port)
        self.process = process

    def _inner_health(self) -> bool:
        return self.process.poll() is None


306
@pytest.fixture
307
308
def response_snapshot(snapshot):
    return snapshot.use_extension(ResponseComparator)
309

OlivierDehaene's avatar
OlivierDehaene committed
310

311
312
313
314
@pytest.fixture
def generous_response_snapshot(snapshot):
    return snapshot.use_extension(GenerousResponseComparator)

315

316
317
318
319
320
@pytest.fixture
def ignore_logprob_response_snapshot(snapshot):
    return snapshot.use_extension(IgnoreLogProbResponseComparator)


321
322
323
324
325
326
327
328
329
330
331
@pytest.fixture(scope="module")
def event_loop():
    loop = asyncio.get_event_loop()
    yield loop
    loop.close()


@pytest.fixture(scope="module")
def launcher(event_loop):
    @contextlib.contextmanager
    def local_launcher(
332
333
334
335
        model_id: str,
        num_shard: Optional[int] = None,
        quantize: Optional[str] = None,
        trust_remote_code: bool = False,
336
        use_flash_attention: bool = True,
drbh's avatar
drbh committed
337
        disable_grammar_support: bool = False,
OlivierDehaene's avatar
OlivierDehaene committed
338
        dtype: Optional[str] = None,
339
        revision: Optional[str] = None,
340
        max_input_length: Optional[int] = None,
Nicolas Patry's avatar
Nicolas Patry committed
341
        max_batch_prefill_tokens: Optional[int] = None,
342
        max_total_tokens: Optional[int] = None,
343
344
        lora_adapters: Optional[List[str]] = None,
        cuda_graphs: Optional[List[int]] = None,
Nicolas Patry's avatar
Nicolas Patry committed
345
        attention: Optional[str] = None,
346
    ):
347
348
        port = random.randint(8000, 10_000)
        master_port = random.randint(10_000, 20_000)
349

350
351
352
        shard_uds_path = (
            f"/tmp/tgi-tests-{model_id.split('/')[-1]}-{num_shard}-{quantize}-server"
        )
353
354
355
356
357
358
359
360
361
362
363
364
365

        args = [
            "text-generation-launcher",
            "--model-id",
            model_id,
            "--port",
            str(port),
            "--master-port",
            str(master_port),
            "--shard-uds-path",
            shard_uds_path,
        ]

366
367
        env = os.environ

drbh's avatar
drbh committed
368
369
        if disable_grammar_support:
            args.append("--disable-grammar-support")
370
371
        if num_shard is not None:
            args.extend(["--num-shard", str(num_shard)])
372
        if quantize is not None:
373
            args.append("--quantize")
374
            args.append(quantize)
375
376
377
        if dtype is not None:
            args.append("--dtype")
            args.append(dtype)
378
379
380
        if revision is not None:
            args.append("--revision")
            args.append(revision)
381
382
        if trust_remote_code:
            args.append("--trust-remote-code")
383
384
385
        if max_input_length:
            args.append("--max-input-length")
            args.append(str(max_input_length))
Nicolas Patry's avatar
Nicolas Patry committed
386
387
388
        if max_batch_prefill_tokens:
            args.append("--max-batch-prefill-tokens")
            args.append(str(max_batch_prefill_tokens))
389
390
391
        if max_total_tokens:
            args.append("--max-total-tokens")
            args.append(str(max_total_tokens))
392
393
394
395
396
397
398
399
        if lora_adapters:
            args.append("--lora-adapters")
            args.append(",".join(lora_adapters))
        if cuda_graphs:
            args.append("--cuda-graphs")
            args.append(",".join(map(str, cuda_graphs)))

        print(" ".join(args), file=sys.stderr)
400

401
402
        env["LOG_LEVEL"] = "info,text_generation_router=debug"

403
404
        if not use_flash_attention:
            env["USE_FLASH_ATTENTION"] = "false"
Nicolas Patry's avatar
Nicolas Patry committed
405
406
        if attention is not None:
            env["ATTENTION"] = attention
407

408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
        with tempfile.TemporaryFile("w+") as tmp:
            # We'll output stdout/stderr to a temporary file. Using a pipe
            # cause the process to block until stdout is read.
            with subprocess.Popen(
                args,
                stdout=tmp,
                stderr=subprocess.STDOUT,
                env=env,
            ) as process:
                yield ProcessLauncherHandle(process, port)

                process.terminate()
                process.wait(60)

                tmp.seek(0)
                shutil.copyfileobj(tmp, sys.stderr)
424

425
426
427
        if not use_flash_attention:
            del env["USE_FLASH_ATTENTION"]

428
429
    @contextlib.contextmanager
    def docker_launcher(
430
431
432
433
        model_id: str,
        num_shard: Optional[int] = None,
        quantize: Optional[str] = None,
        trust_remote_code: bool = False,
434
        use_flash_attention: bool = True,
drbh's avatar
drbh committed
435
        disable_grammar_support: bool = False,
OlivierDehaene's avatar
OlivierDehaene committed
436
        dtype: Optional[str] = None,
437
        revision: Optional[str] = None,
438
        max_input_length: Optional[int] = None,
Nicolas Patry's avatar
Nicolas Patry committed
439
        max_batch_prefill_tokens: Optional[int] = None,
440
        max_total_tokens: Optional[int] = None,
441
442
        lora_adapters: Optional[List[str]] = None,
        cuda_graphs: Optional[List[int]] = None,
Nicolas Patry's avatar
Nicolas Patry committed
443
        attention: Optional[str] = None,
444
    ):
445
        port = random.randint(8000, 10_000)
446
447
448

        args = ["--model-id", model_id, "--env"]

drbh's avatar
drbh committed
449
450
        if disable_grammar_support:
            args.append("--disable-grammar-support")
451
452
        if num_shard is not None:
            args.extend(["--num-shard", str(num_shard)])
453
        if quantize is not None:
454
            args.append("--quantize")
455
            args.append(quantize)
456
457
458
        if dtype is not None:
            args.append("--dtype")
            args.append(dtype)
459
460
461
        if revision is not None:
            args.append("--revision")
            args.append(revision)
462
463
        if trust_remote_code:
            args.append("--trust-remote-code")
464
465
466
        if max_input_length:
            args.append("--max-input-length")
            args.append(str(max_input_length))
Nicolas Patry's avatar
Nicolas Patry committed
467
468
469
        if max_batch_prefill_tokens:
            args.append("--max-batch-prefill-tokens")
            args.append(str(max_batch_prefill_tokens))
470
471
472
        if max_total_tokens:
            args.append("--max-total-tokens")
            args.append(str(max_total_tokens))
473
474
475
476
477
478
        if lora_adapters:
            args.append("--lora-adapters")
            args.append(",".join(lora_adapters))
        if cuda_graphs:
            args.append("--cuda-graphs")
            args.append(",".join(map(str, cuda_graphs)))
479
480
481
482
483
484
485
486
487
488
489
490
491
492

        client = docker.from_env()

        container_name = f"tgi-tests-{model_id.split('/')[-1]}-{num_shard}-{quantize}"

        try:
            container = client.containers.get(container_name)
            container.stop()
            container.wait()
        except NotFound:
            pass

        gpu_count = num_shard if num_shard is not None else 1

493
494
495
        env = {
            "LOG_LEVEL": "info,text_generation_router=debug",
        }
496
497
        if not use_flash_attention:
            env["USE_FLASH_ATTENTION"] = "false"
Nicolas Patry's avatar
Nicolas Patry committed
498
499
        if attention is not None:
            env["ATTENTION"] = attention
500

501
502
        if HF_TOKEN is not None:
            env["HF_TOKEN"] = HF_TOKEN
503
504
505
506
507

        volumes = []
        if DOCKER_VOLUME:
            volumes = [f"{DOCKER_VOLUME}:/data"]

508
509
510
511
512
513
514
515
516
517
518
519
        if DOCKER_DEVICES:
            devices = DOCKER_DEVICES.split(",")
            visible = os.getenv("ROCR_VISIBLE_DEVICES")
            if visible:
                env["ROCR_VISIBLE_DEVICES"] = visible
            device_requests = []
        else:
            devices = []
            device_requests = [
                docker.types.DeviceRequest(count=gpu_count, capabilities=[["gpu"]])
            ]

520
521
522
523
524
        container = client.containers.run(
            DOCKER_IMAGE,
            command=args,
            name=container_name,
            environment=env,
525
            auto_remove=False,
526
            detach=True,
527
528
            device_requests=device_requests,
            devices=devices,
529
530
            volumes=volumes,
            ports={"80/tcp": port},
Nicolas Patry's avatar
Nicolas Patry committed
531
            healthcheck={"timeout": int(10 * 1e9)},
OlivierDehaene's avatar
OlivierDehaene committed
532
            shm_size="1G",
533
534
        )

535
        yield ContainerLauncherHandle(client, container.name, port)
536

537
538
539
        if not use_flash_attention:
            del env["USE_FLASH_ATTENTION"]

540
541
542
543
544
        try:
            container.stop()
            container.wait()
        except NotFound:
            pass
545
546

        container_output = container.logs().decode("utf-8")
547
        print(container_output, file=sys.stderr)
548

549
550
        container.remove()

551
552
553
554
555
556
557
558
    if DOCKER_IMAGE is not None:
        return docker_launcher
    return local_launcher


@pytest.fixture(scope="module")
def generate_load():
    async def generate_load_inner(
drbh's avatar
drbh committed
559
560
561
562
563
564
565
        client: AsyncClient,
        prompt: str,
        max_new_tokens: int,
        n: int,
        seed: Optional[int] = None,
        grammar: Optional[Grammar] = None,
        stop_sequences: Optional[List[str]] = None,
566
567
    ) -> List[Response]:
        futures = [
568
            client.generate(
drbh's avatar
drbh committed
569
570
571
572
573
574
                prompt,
                max_new_tokens=max_new_tokens,
                decoder_input_details=True,
                seed=seed,
                grammar=grammar,
                stop_sequences=stop_sequences,
575
576
            )
            for _ in range(n)
577
578
        ]

579
        return await asyncio.gather(*futures)
580
581

    return generate_load_inner
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616


@pytest.fixture(scope="module")
def generate_multi():
    async def generate_load_inner(
        client: AsyncClient,
        prompts: List[str],
        max_new_tokens: int,
        seed: Optional[int] = None,
    ) -> List[Response]:

        import numpy as np

        arange = np.arange(len(prompts))
        perm = np.random.permutation(arange)
        rperm = [-1] * len(perm)
        for i, p in enumerate(perm):
            rperm[p] = i

        shuffled_prompts = [prompts[p] for p in perm]
        futures = [
            client.chat(
                messages=[Message(role="user", content=prompt)],
                max_tokens=max_new_tokens,
                temperature=0,
                seed=seed,
            )
            for prompt in shuffled_prompts
        ]

        shuffled_responses = await asyncio.gather(*futures)
        responses = [shuffled_responses[p] for p in rperm]
        return responses

    return generate_load_inner