conftest.py 12.4 KB
Newer Older
1
import sys
2
3
4
5
6
7
import subprocess
import contextlib
import pytest
import asyncio
import os
import docker
8
9
10
import json
import math
import time
11
import random
12
13

from docker.errors import NotFound
14
15
16
from typing import Optional, List, Dict
from syrupy.extensions.json import JSONSnapshotExtension
from aiohttp import ClientConnectorError, ClientOSError, ServerDisconnectedError
17
18

from text_generation import AsyncClient
drbh's avatar
drbh committed
19
20
21
22
23
24
25
26
from text_generation.types import (
    Response,
    Details,
    InputToken,
    Token,
    BestOfSequence,
    Grammar,
)
27
28
29
30
31
32

DOCKER_IMAGE = os.getenv("DOCKER_IMAGE", None)
HUGGING_FACE_HUB_TOKEN = os.getenv("HUGGING_FACE_HUB_TOKEN", None)
DOCKER_VOLUME = os.getenv("DOCKER_VOLUME", "/data")


33
class ResponseComparator(JSONSnapshotExtension):
34
    rtol = 0.2
OlivierDehaene's avatar
OlivierDehaene committed
35

36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
    def serialize(
        self,
        data,
        *,
        exclude=None,
        matcher=None,
    ):
        if isinstance(data, List):
            data = [d.dict() for d in data]

        data = self._filter(
            data=data, depth=0, path=(), exclude=exclude, matcher=matcher
        )
        return json.dumps(data, indent=2, ensure_ascii=False, sort_keys=False) + "\n"

    def matches(
        self,
        *,
        serialized_data,
        snapshot_data,
    ) -> bool:
        def convert_data(data):
            data = json.loads(data)

            if isinstance(data, Dict):
                return Response(**data)
            if isinstance(data, List):
                return [Response(**d) for d in data]
            raise NotImplementedError

        def eq_token(token: Token, other: Token) -> bool:
            return (
                token.id == other.id
                and token.text == other.text
70
                and math.isclose(token.logprob, other.logprob, rel_tol=self.rtol)
71
72
73
                and token.special == other.special
            )

74
        def eq_prefill_token(prefill_token: InputToken, other: InputToken) -> bool:
75
76
77
78
79
            try:
                return (
                    prefill_token.id == other.id
                    and prefill_token.text == other.text
                    and (
OlivierDehaene's avatar
OlivierDehaene committed
80
81
82
                        math.isclose(
                            prefill_token.logprob, other.logprob, rel_tol=self.rtol
                        )
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
                        if prefill_token.logprob is not None
                        else prefill_token.logprob == other.logprob
                    )
                )
            except TypeError:
                return False

        def eq_best_of(details: BestOfSequence, other: BestOfSequence) -> bool:
            return (
                details.finish_reason == other.finish_reason
                and details.generated_tokens == other.generated_tokens
                and details.seed == other.seed
                and len(details.prefill) == len(other.prefill)
                and all(
                    [
                        eq_prefill_token(d, o)
                        for d, o in zip(details.prefill, other.prefill)
                    ]
                )
                and len(details.tokens) == len(other.tokens)
                and all([eq_token(d, o) for d, o in zip(details.tokens, other.tokens)])
            )

        def eq_details(details: Details, other: Details) -> bool:
            return (
                details.finish_reason == other.finish_reason
                and details.generated_tokens == other.generated_tokens
                and details.seed == other.seed
                and len(details.prefill) == len(other.prefill)
                and all(
                    [
                        eq_prefill_token(d, o)
                        for d, o in zip(details.prefill, other.prefill)
                    ]
                )
                and len(details.tokens) == len(other.tokens)
                and all([eq_token(d, o) for d, o in zip(details.tokens, other.tokens)])
                and (
                    len(details.best_of_sequences)
                    if details.best_of_sequences is not None
                    else 0
                )
                == (
                    len(other.best_of_sequences)
                    if other.best_of_sequences is not None
                    else 0
                )
                and (
                    all(
                        [
                            eq_best_of(d, o)
                            for d, o in zip(
                                details.best_of_sequences, other.best_of_sequences
                            )
                        ]
                    )
                    if details.best_of_sequences is not None
                    else details.best_of_sequences == other.best_of_sequences
                )
            )

        def eq_response(response: Response, other: Response) -> bool:
            return response.generated_text == other.generated_text and eq_details(
                response.details, other.details
            )

        serialized_data = convert_data(serialized_data)
        snapshot_data = convert_data(snapshot_data)

        if not isinstance(serialized_data, List):
            serialized_data = [serialized_data]
        if not isinstance(snapshot_data, List):
            snapshot_data = [snapshot_data]

        return len(snapshot_data) == len(serialized_data) and all(
            [eq_response(r, o) for r, o in zip(serialized_data, snapshot_data)]
        )


162
163
164
165
class GenerousResponseComparator(ResponseComparator):
    # Needed for GPTQ with exllama which has serious numerical fluctuations.
    rtol = 0.75

OlivierDehaene's avatar
OlivierDehaene committed
166

167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
class LauncherHandle:
    def __init__(self, port: int):
        self.client = AsyncClient(f"http://localhost:{port}")

    def _inner_health(self):
        raise NotImplementedError

    async def health(self, timeout: int = 60):
        assert timeout > 0
        for _ in range(timeout):
            if not self._inner_health():
                raise RuntimeError("Launcher crashed")

            try:
                await self.client.generate("test")
                return
            except (ClientConnectorError, ClientOSError, ServerDisconnectedError) as e:
                time.sleep(1)
        raise RuntimeError("Health check failed")


class ContainerLauncherHandle(LauncherHandle):
    def __init__(self, docker_client, container_name, port: int):
        super(ContainerLauncherHandle, self).__init__(port)
        self.docker_client = docker_client
        self.container_name = container_name

    def _inner_health(self) -> bool:
        container = self.docker_client.containers.get(self.container_name)
        return container.status in ["running", "created"]


class ProcessLauncherHandle(LauncherHandle):
    def __init__(self, process, port: int):
        super(ProcessLauncherHandle, self).__init__(port)
        self.process = process

    def _inner_health(self) -> bool:
        return self.process.poll() is None


208
@pytest.fixture
209
210
def response_snapshot(snapshot):
    return snapshot.use_extension(ResponseComparator)
211

OlivierDehaene's avatar
OlivierDehaene committed
212

213
214
215
216
@pytest.fixture
def generous_response_snapshot(snapshot):
    return snapshot.use_extension(GenerousResponseComparator)

217
218
219
220
221
222
223
224
225
226
227
228

@pytest.fixture(scope="module")
def event_loop():
    loop = asyncio.get_event_loop()
    yield loop
    loop.close()


@pytest.fixture(scope="module")
def launcher(event_loop):
    @contextlib.contextmanager
    def local_launcher(
229
230
231
232
        model_id: str,
        num_shard: Optional[int] = None,
        quantize: Optional[str] = None,
        trust_remote_code: bool = False,
233
        use_flash_attention: bool = True,
drbh's avatar
drbh committed
234
        disable_grammar_support: bool = False,
OlivierDehaene's avatar
OlivierDehaene committed
235
        dtype: Optional[str] = None,
236
    ):
237
238
        port = random.randint(8000, 10_000)
        master_port = random.randint(10_000, 20_000)
239

240
241
242
        shard_uds_path = (
            f"/tmp/tgi-tests-{model_id.split('/')[-1]}-{num_shard}-{quantize}-server"
        )
243
244
245
246
247
248
249
250
251
252
253
254
255

        args = [
            "text-generation-launcher",
            "--model-id",
            model_id,
            "--port",
            str(port),
            "--master-port",
            str(master_port),
            "--shard-uds-path",
            shard_uds_path,
        ]

256
257
        env = os.environ

drbh's avatar
drbh committed
258
259
        if disable_grammar_support:
            args.append("--disable-grammar-support")
260
261
        if num_shard is not None:
            args.extend(["--num-shard", str(num_shard)])
262
        if quantize is not None:
263
            args.append("--quantize")
264
            args.append(quantize)
265
266
267
        if dtype is not None:
            args.append("--dtype")
            args.append(dtype)
268
269
        if trust_remote_code:
            args.append("--trust-remote-code")
270

271
272
        env["LOG_LEVEL"] = "info,text_generation_router=debug"

273
274
275
        if not use_flash_attention:
            env["USE_FLASH_ATTENTION"] = "false"

276
        with subprocess.Popen(
277
            args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env
278
        ) as process:
279
            yield ProcessLauncherHandle(process, port)
280
281
282
283
284

            process.terminate()
            process.wait(60)

            launcher_output = process.stdout.read().decode("utf-8")
285
            print(launcher_output, file=sys.stderr)
286
287
288
289

            process.stdout.close()
            process.stderr.close()

290
291
292
        if not use_flash_attention:
            del env["USE_FLASH_ATTENTION"]

293
294
    @contextlib.contextmanager
    def docker_launcher(
295
296
297
298
        model_id: str,
        num_shard: Optional[int] = None,
        quantize: Optional[str] = None,
        trust_remote_code: bool = False,
299
        use_flash_attention: bool = True,
drbh's avatar
drbh committed
300
        disable_grammar_support: bool = False,
OlivierDehaene's avatar
OlivierDehaene committed
301
        dtype: Optional[str] = None,
302
    ):
303
        port = random.randint(8000, 10_000)
304
305
306

        args = ["--model-id", model_id, "--env"]

drbh's avatar
drbh committed
307
308
        if disable_grammar_support:
            args.append("--disable-grammar-support")
309
310
        if num_shard is not None:
            args.extend(["--num-shard", str(num_shard)])
311
        if quantize is not None:
312
            args.append("--quantize")
313
            args.append(quantize)
314
315
316
        if dtype is not None:
            args.append("--dtype")
            args.append(dtype)
317
318
        if trust_remote_code:
            args.append("--trust-remote-code")
319
320
321
322
323
324
325
326
327
328
329
330
331
332

        client = docker.from_env()

        container_name = f"tgi-tests-{model_id.split('/')[-1]}-{num_shard}-{quantize}"

        try:
            container = client.containers.get(container_name)
            container.stop()
            container.wait()
        except NotFound:
            pass

        gpu_count = num_shard if num_shard is not None else 1

333
334
335
336
        env = {
            "LOG_LEVEL": "info,text_generation_router=debug",
            "ENABLE_CUDA_GRAPHS": "true",
        }
337
338
339
        if not use_flash_attention:
            env["USE_FLASH_ATTENTION"] = "false"

340
341
342
343
344
345
346
347
348
349
350
351
        if HUGGING_FACE_HUB_TOKEN is not None:
            env["HUGGING_FACE_HUB_TOKEN"] = HUGGING_FACE_HUB_TOKEN

        volumes = []
        if DOCKER_VOLUME:
            volumes = [f"{DOCKER_VOLUME}:/data"]

        container = client.containers.run(
            DOCKER_IMAGE,
            command=args,
            name=container_name,
            environment=env,
352
            auto_remove=False,
353
354
355
356
357
358
            detach=True,
            device_requests=[
                docker.types.DeviceRequest(count=gpu_count, capabilities=[["gpu"]])
            ],
            volumes=volumes,
            ports={"80/tcp": port},
OlivierDehaene's avatar
OlivierDehaene committed
359
            shm_size="1G",
360
361
        )

362
        yield ContainerLauncherHandle(client, container.name, port)
363

364
365
366
        if not use_flash_attention:
            del env["USE_FLASH_ATTENTION"]

367
368
369
370
371
        try:
            container.stop()
            container.wait()
        except NotFound:
            pass
372
373

        container_output = container.logs().decode("utf-8")
374
        print(container_output, file=sys.stderr)
375

376
377
        container.remove()

378
379
380
381
382
383
384
385
    if DOCKER_IMAGE is not None:
        return docker_launcher
    return local_launcher


@pytest.fixture(scope="module")
def generate_load():
    async def generate_load_inner(
drbh's avatar
drbh committed
386
387
388
389
390
391
392
        client: AsyncClient,
        prompt: str,
        max_new_tokens: int,
        n: int,
        seed: Optional[int] = None,
        grammar: Optional[Grammar] = None,
        stop_sequences: Optional[List[str]] = None,
393
394
    ) -> List[Response]:
        futures = [
395
            client.generate(
drbh's avatar
drbh committed
396
397
398
399
400
401
                prompt,
                max_new_tokens=max_new_tokens,
                decoder_input_details=True,
                seed=seed,
                grammar=grammar,
                stop_sequences=stop_sequences,
402
403
            )
            for _ in range(n)
404
405
        ]

406
        return await asyncio.gather(*futures)
407
408

    return generate_load_inner