conftest.py 12.4 KB
Newer Older
1
import sys
2
3
4
5
6
7
import subprocess
import contextlib
import pytest
import asyncio
import os
import docker
8
9
10
import json
import math
import time
11
import random
12
13

from docker.errors import NotFound
14
15
16
from typing import Optional, List, Dict
from syrupy.extensions.json import JSONSnapshotExtension
from aiohttp import ClientConnectorError, ClientOSError, ServerDisconnectedError
17
18

from text_generation import AsyncClient
drbh's avatar
drbh committed
19
20
21
22
23
24
25
26
from text_generation.types import (
    Response,
    Details,
    InputToken,
    Token,
    BestOfSequence,
    Grammar,
)
27
28
29
30
31
32

DOCKER_IMAGE = os.getenv("DOCKER_IMAGE", None)
HUGGING_FACE_HUB_TOKEN = os.getenv("HUGGING_FACE_HUB_TOKEN", None)
DOCKER_VOLUME = os.getenv("DOCKER_VOLUME", "/data")


33
class ResponseComparator(JSONSnapshotExtension):
34
    rtol = 0.2
OlivierDehaene's avatar
OlivierDehaene committed
35

36
37
38
39
40
41
42
    def serialize(
        self,
        data,
        *,
        exclude=None,
        matcher=None,
    ):
43
44
45
        if isinstance(data, Response):
            data = data.dict()

46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
        if isinstance(data, List):
            data = [d.dict() for d in data]

        data = self._filter(
            data=data, depth=0, path=(), exclude=exclude, matcher=matcher
        )
        return json.dumps(data, indent=2, ensure_ascii=False, sort_keys=False) + "\n"

    def matches(
        self,
        *,
        serialized_data,
        snapshot_data,
    ) -> bool:
        def convert_data(data):
            data = json.loads(data)

            if isinstance(data, Dict):
                return Response(**data)
            if isinstance(data, List):
                return [Response(**d) for d in data]
            raise NotImplementedError

        def eq_token(token: Token, other: Token) -> bool:
            return (
                token.id == other.id
                and token.text == other.text
73
                and math.isclose(token.logprob, other.logprob, rel_tol=self.rtol)
74
75
76
                and token.special == other.special
            )

77
        def eq_prefill_token(prefill_token: InputToken, other: InputToken) -> bool:
78
79
80
81
82
            try:
                return (
                    prefill_token.id == other.id
                    and prefill_token.text == other.text
                    and (
OlivierDehaene's avatar
OlivierDehaene committed
83
84
85
                        math.isclose(
                            prefill_token.logprob, other.logprob, rel_tol=self.rtol
                        )
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
                        if prefill_token.logprob is not None
                        else prefill_token.logprob == other.logprob
                    )
                )
            except TypeError:
                return False

        def eq_best_of(details: BestOfSequence, other: BestOfSequence) -> bool:
            return (
                details.finish_reason == other.finish_reason
                and details.generated_tokens == other.generated_tokens
                and details.seed == other.seed
                and len(details.prefill) == len(other.prefill)
                and all(
                    [
                        eq_prefill_token(d, o)
                        for d, o in zip(details.prefill, other.prefill)
                    ]
                )
                and len(details.tokens) == len(other.tokens)
                and all([eq_token(d, o) for d, o in zip(details.tokens, other.tokens)])
            )

        def eq_details(details: Details, other: Details) -> bool:
            return (
                details.finish_reason == other.finish_reason
                and details.generated_tokens == other.generated_tokens
                and details.seed == other.seed
                and len(details.prefill) == len(other.prefill)
                and all(
                    [
                        eq_prefill_token(d, o)
                        for d, o in zip(details.prefill, other.prefill)
                    ]
                )
                and len(details.tokens) == len(other.tokens)
                and all([eq_token(d, o) for d, o in zip(details.tokens, other.tokens)])
                and (
                    len(details.best_of_sequences)
                    if details.best_of_sequences is not None
                    else 0
                )
                == (
                    len(other.best_of_sequences)
                    if other.best_of_sequences is not None
                    else 0
                )
                and (
                    all(
                        [
                            eq_best_of(d, o)
                            for d, o in zip(
                                details.best_of_sequences, other.best_of_sequences
                            )
                        ]
                    )
                    if details.best_of_sequences is not None
                    else details.best_of_sequences == other.best_of_sequences
                )
            )

        def eq_response(response: Response, other: Response) -> bool:
            return response.generated_text == other.generated_text and eq_details(
                response.details, other.details
            )

        serialized_data = convert_data(serialized_data)
        snapshot_data = convert_data(snapshot_data)

        if not isinstance(serialized_data, List):
            serialized_data = [serialized_data]
        if not isinstance(snapshot_data, List):
            snapshot_data = [snapshot_data]

        return len(snapshot_data) == len(serialized_data) and all(
            [eq_response(r, o) for r, o in zip(serialized_data, snapshot_data)]
        )


165
166
167
168
class GenerousResponseComparator(ResponseComparator):
    # Needed for GPTQ with exllama which has serious numerical fluctuations.
    rtol = 0.75

OlivierDehaene's avatar
OlivierDehaene committed
169

170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
class LauncherHandle:
    def __init__(self, port: int):
        self.client = AsyncClient(f"http://localhost:{port}")

    def _inner_health(self):
        raise NotImplementedError

    async def health(self, timeout: int = 60):
        assert timeout > 0
        for _ in range(timeout):
            if not self._inner_health():
                raise RuntimeError("Launcher crashed")

            try:
                await self.client.generate("test")
                return
            except (ClientConnectorError, ClientOSError, ServerDisconnectedError) as e:
                time.sleep(1)
        raise RuntimeError("Health check failed")


class ContainerLauncherHandle(LauncherHandle):
    def __init__(self, docker_client, container_name, port: int):
        super(ContainerLauncherHandle, self).__init__(port)
        self.docker_client = docker_client
        self.container_name = container_name

    def _inner_health(self) -> bool:
        container = self.docker_client.containers.get(self.container_name)
        return container.status in ["running", "created"]


class ProcessLauncherHandle(LauncherHandle):
    def __init__(self, process, port: int):
        super(ProcessLauncherHandle, self).__init__(port)
        self.process = process

    def _inner_health(self) -> bool:
        return self.process.poll() is None


211
@pytest.fixture
212
213
def response_snapshot(snapshot):
    return snapshot.use_extension(ResponseComparator)
214

OlivierDehaene's avatar
OlivierDehaene committed
215

216
217
218
219
@pytest.fixture
def generous_response_snapshot(snapshot):
    return snapshot.use_extension(GenerousResponseComparator)

220
221
222
223
224
225
226
227
228
229
230
231

@pytest.fixture(scope="module")
def event_loop():
    loop = asyncio.get_event_loop()
    yield loop
    loop.close()


@pytest.fixture(scope="module")
def launcher(event_loop):
    @contextlib.contextmanager
    def local_launcher(
232
233
234
235
        model_id: str,
        num_shard: Optional[int] = None,
        quantize: Optional[str] = None,
        trust_remote_code: bool = False,
236
        use_flash_attention: bool = True,
drbh's avatar
drbh committed
237
        disable_grammar_support: bool = False,
OlivierDehaene's avatar
OlivierDehaene committed
238
        dtype: Optional[str] = None,
239
    ):
240
241
        port = random.randint(8000, 10_000)
        master_port = random.randint(10_000, 20_000)
242

243
244
245
        shard_uds_path = (
            f"/tmp/tgi-tests-{model_id.split('/')[-1]}-{num_shard}-{quantize}-server"
        )
246
247
248
249
250
251
252
253
254
255
256
257
258

        args = [
            "text-generation-launcher",
            "--model-id",
            model_id,
            "--port",
            str(port),
            "--master-port",
            str(master_port),
            "--shard-uds-path",
            shard_uds_path,
        ]

259
260
        env = os.environ

drbh's avatar
drbh committed
261
262
        if disable_grammar_support:
            args.append("--disable-grammar-support")
263
264
        if num_shard is not None:
            args.extend(["--num-shard", str(num_shard)])
265
        if quantize is not None:
266
            args.append("--quantize")
267
            args.append(quantize)
268
269
270
        if dtype is not None:
            args.append("--dtype")
            args.append(dtype)
271
272
        if trust_remote_code:
            args.append("--trust-remote-code")
273

274
275
        env["LOG_LEVEL"] = "info,text_generation_router=debug"

276
277
278
        if not use_flash_attention:
            env["USE_FLASH_ATTENTION"] = "false"

279
        with subprocess.Popen(
280
            args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env
281
        ) as process:
282
            yield ProcessLauncherHandle(process, port)
283
284
285
286
287

            process.terminate()
            process.wait(60)

            launcher_output = process.stdout.read().decode("utf-8")
288
            print(launcher_output, file=sys.stderr)
289
290
291
292

            process.stdout.close()
            process.stderr.close()

293
294
295
        if not use_flash_attention:
            del env["USE_FLASH_ATTENTION"]

296
297
    @contextlib.contextmanager
    def docker_launcher(
298
299
300
301
        model_id: str,
        num_shard: Optional[int] = None,
        quantize: Optional[str] = None,
        trust_remote_code: bool = False,
302
        use_flash_attention: bool = True,
drbh's avatar
drbh committed
303
        disable_grammar_support: bool = False,
OlivierDehaene's avatar
OlivierDehaene committed
304
        dtype: Optional[str] = None,
305
    ):
306
        port = random.randint(8000, 10_000)
307
308
309

        args = ["--model-id", model_id, "--env"]

drbh's avatar
drbh committed
310
311
        if disable_grammar_support:
            args.append("--disable-grammar-support")
312
313
        if num_shard is not None:
            args.extend(["--num-shard", str(num_shard)])
314
        if quantize is not None:
315
            args.append("--quantize")
316
            args.append(quantize)
317
318
319
        if dtype is not None:
            args.append("--dtype")
            args.append(dtype)
320
321
        if trust_remote_code:
            args.append("--trust-remote-code")
322
323
324
325
326
327
328
329
330
331
332
333
334
335

        client = docker.from_env()

        container_name = f"tgi-tests-{model_id.split('/')[-1]}-{num_shard}-{quantize}"

        try:
            container = client.containers.get(container_name)
            container.stop()
            container.wait()
        except NotFound:
            pass

        gpu_count = num_shard if num_shard is not None else 1

336
337
338
339
        env = {
            "LOG_LEVEL": "info,text_generation_router=debug",
            "ENABLE_CUDA_GRAPHS": "true",
        }
340
341
342
        if not use_flash_attention:
            env["USE_FLASH_ATTENTION"] = "false"

343
344
345
346
347
348
349
350
351
352
353
354
        if HUGGING_FACE_HUB_TOKEN is not None:
            env["HUGGING_FACE_HUB_TOKEN"] = HUGGING_FACE_HUB_TOKEN

        volumes = []
        if DOCKER_VOLUME:
            volumes = [f"{DOCKER_VOLUME}:/data"]

        container = client.containers.run(
            DOCKER_IMAGE,
            command=args,
            name=container_name,
            environment=env,
355
            auto_remove=False,
356
357
358
359
360
361
            detach=True,
            device_requests=[
                docker.types.DeviceRequest(count=gpu_count, capabilities=[["gpu"]])
            ],
            volumes=volumes,
            ports={"80/tcp": port},
OlivierDehaene's avatar
OlivierDehaene committed
362
            shm_size="1G",
363
364
        )

365
        yield ContainerLauncherHandle(client, container.name, port)
366

367
368
369
        if not use_flash_attention:
            del env["USE_FLASH_ATTENTION"]

370
371
372
373
374
        try:
            container.stop()
            container.wait()
        except NotFound:
            pass
375
376

        container_output = container.logs().decode("utf-8")
377
        print(container_output, file=sys.stderr)
378

379
380
        container.remove()

381
382
383
384
385
386
387
388
    if DOCKER_IMAGE is not None:
        return docker_launcher
    return local_launcher


@pytest.fixture(scope="module")
def generate_load():
    async def generate_load_inner(
drbh's avatar
drbh committed
389
390
391
392
393
394
395
        client: AsyncClient,
        prompt: str,
        max_new_tokens: int,
        n: int,
        seed: Optional[int] = None,
        grammar: Optional[Grammar] = None,
        stop_sequences: Optional[List[str]] = None,
396
397
    ) -> List[Response]:
        futures = [
398
            client.generate(
drbh's avatar
drbh committed
399
400
401
402
403
404
                prompt,
                max_new_tokens=max_new_tokens,
                decoder_input_details=True,
                seed=seed,
                grammar=grammar,
                stop_sequences=stop_sequences,
405
406
            )
            for _ in range(n)
407
408
        ]

409
        return await asyncio.gather(*futures)
410
411

    return generate_load_inner