server.py 8.64 KB
Newer Older
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
1
import asyncio
Olivier Dehaene's avatar
Olivier Dehaene committed
2
import os
3
import torch
4
import time
5
import signal
Olivier Dehaene's avatar
Olivier Dehaene committed
6

Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
7
from grpc import aio
8
from loguru import logger
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
9
10
11

from grpc_reflection.v1alpha import reflection
from pathlib import Path
12
from typing import List, Optional
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
13

14
15
16
from text_generation_server.cache import Cache
from text_generation_server.interceptor import ExceptionInterceptor
from text_generation_server.models import Model, get_model
drbh's avatar
drbh committed
17
18
19
20
from text_generation_server.models.pali_gemma import PaliGemmaBatch
from text_generation_server.models.vlm_causal_lm import (
    VlmCausalLMBatch,
)
21
22
from text_generation_server.pb import generate_pb2_grpc, generate_pb2
from text_generation_server.tracing import UDSOpenTelemetryAioServerInterceptor
23
from text_generation_server.models.idefics_causal_lm import IdeficsCausalLMBatch
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
24

OlivierDehaene's avatar
OlivierDehaene committed
25

26
27
28
29
30
31
32
33
34
35
36
37
class SignalHandler:
    KEEP_PROCESSING = True

    def __init__(self):
        signal.signal(signal.SIGINT, self.exit_gracefully)
        signal.signal(signal.SIGTERM, self.exit_gracefully)

    def exit_gracefully(self, signum, frame):
        print(f"Exiting gracefully: Signal {signum}")
        self.KEEP_PROCESSING = False


Olivier Dehaene's avatar
Olivier Dehaene committed
38
class TextGenerationService(generate_pb2_grpc.TextGenerationServiceServicer):
39
40
41
42
43
44
45
    def __init__(
        self,
        model: Model,
        cache: Cache,
        quantize: Optional[str],
        server_urls: List[str],
    ):
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
46
47
        self.cache = cache
        self.model = model
48
        self.quantize = quantize
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
49
        self.server_urls = server_urls
50
51
52
53
        # For some reason, inference_mode does not work well with GLOO which we use on CPU
        if model.device.type == "cuda":
            # Force inference mode for the lifetime of TextGenerationService
            self._inference_mode_raii_guard = torch._C._InferenceMode(True)
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
54

55
56
57
    async def Info(self, request, context):
        return self.model.info

58
59
60
61
62
    async def Health(self, request, context):
        if self.model.device.type == "cuda":
            torch.zeros((2, 2)).cuda()
        return generate_pb2.HealthResponse()

Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
63
64
65
66
    async def ServiceDiscovery(self, request, context):
        return generate_pb2.ServiceDiscoveryResponse(urls=self.server_urls)

    async def ClearCache(self, request, context):
67
68
69
70
        if request.HasField("id"):
            self.cache.delete(request.id)
        else:
            self.cache.clear()
Olivier Dehaene's avatar
Olivier Dehaene committed
71
        return generate_pb2.ClearCacheResponse()
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
72

73
74
75
76
    async def FilterBatch(self, request, context):
        batch = self.cache.pop(request.batch_id)
        if batch is None:
            raise ValueError(f"Batch ID {request.batch_id} not found in cache.")
77
        filtered_batch = batch.filter(request.request_ids)
78
79
80
81
        self.cache.set(filtered_batch)

        return generate_pb2.FilterBatchResponse(batch=filtered_batch.to_pb())

82
    async def Warmup(self, request, context):
83
84
85
86
87
        if self.quantize == "gptq":
            try:
                # When using GPTQ, Exllama kernels need some global kernels
                # For which we have the finale shapes only after the model has loaded
                # This will allocate those buffers.
Nicolas Patry's avatar
Nicolas Patry committed
88
                from text_generation_server.layers.gptq import (
89
90
91
92
93
94
95
96
97
                    create_exllama_buffers,
                    set_device,
                )

                set_device(self.model.device)
                create_exllama_buffers(request.max_prefill_tokens)
            except ImportError:
                pass

98
99
100
        if self.model.batch_type in {
            IdeficsCausalLMBatch,
            VlmCausalLMBatch,
drbh's avatar
drbh committed
101
            PaliGemmaBatch,
102
103
        }:  # Hack, i would rather use kwargs in the `from_pb` call
            batch = self.model.batch_type.from_pb_processor(
OlivierDehaene's avatar
OlivierDehaene committed
104
105
106
                request.batch,
                self.model.tokenizer,
                self.model.processor,
107
                self.model.model.config,
OlivierDehaene's avatar
OlivierDehaene committed
108
109
                self.model.dtype,
                self.model.device,
110
111
112
113
114
            )
        else:
            batch = self.model.batch_type.from_pb(
                request.batch, self.model.tokenizer, self.model.dtype, self.model.device
            )
115
        max_supported_total_tokens = self.model.warmup(batch)
116

117
118
119
        return generate_pb2.WarmupResponse(
            max_supported_total_tokens=max_supported_total_tokens
        )
120

121
    async def Prefill(self, request, context):
122
        start = time.time_ns()
123
124
125
        if self.model.batch_type in {
            IdeficsCausalLMBatch,
            VlmCausalLMBatch,
drbh's avatar
drbh committed
126
            PaliGemmaBatch,
127
128
        }:  # Hack, i would rather use kwargs in the `from_pb` call
            batch = self.model.batch_type.from_pb_processor(
OlivierDehaene's avatar
OlivierDehaene committed
129
130
131
                request.batch,
                self.model.tokenizer,
                self.model.processor,
132
                self.model.model.config,
OlivierDehaene's avatar
OlivierDehaene committed
133
134
                self.model.dtype,
                self.model.device,
135
136
137
138
139
            )
        else:
            batch = self.model.batch_type.from_pb(
                request.batch, self.model.tokenizer, self.model.dtype, self.model.device
            )
Olivier Dehaene's avatar
Olivier Dehaene committed
140

141
        generations, next_batch, timings = self.model.generate_token(batch)
Olivier Dehaene's avatar
Olivier Dehaene committed
142
143
        self.cache.set(next_batch)

144
145
        return generate_pb2.PrefillResponse(
            generations=[generation.to_pb() for generation in generations],
Olivier Dehaene's avatar
Olivier Dehaene committed
146
            batch=next_batch.to_pb() if next_batch else None,
147
148
149
            forward_ns=timings[0],
            decode_ns=timings[1],
            total_ns=time.time_ns() - start,
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
150
151
        )

152
    async def Decode(self, request, context):
153
        start = time.time_ns()
Olivier Dehaene's avatar
Olivier Dehaene committed
154
155
156
157
158
159
160
161
        if len(request.batches) == 0:
            raise ValueError("Must provide at least one batch")

        batches = []
        for batch_pb in request.batches:
            batch = self.cache.pop(batch_pb.id)
            if batch is None:
                raise ValueError(f"Batch ID {batch_pb.id} not found in cache.")
162
            batches.append(batch)
163
164
165

        if len(batches) == 0:
            raise ValueError("All batches are empty")
Olivier Dehaene's avatar
Olivier Dehaene committed
166
167

        if len(batches) > 1:
168
            start_concat = time.time_ns()
169
            batch = self.model.batch_type.concatenate(batches)
170
            concat_ns = time.time_ns() - start_concat
Olivier Dehaene's avatar
Olivier Dehaene committed
171
172
        else:
            batch = batches[0]
173
            concat_ns = None
Olivier Dehaene's avatar
Olivier Dehaene committed
174

175
        generations, next_batch, timings = self.model.generate_token(batch)
Olivier Dehaene's avatar
Olivier Dehaene committed
176
177
        self.cache.set(next_batch)

178
179
        return generate_pb2.DecodeResponse(
            generations=[generation.to_pb() for generation in generations],
Olivier Dehaene's avatar
Olivier Dehaene committed
180
            batch=next_batch.to_pb() if next_batch else None,
181
182
183
184
            concat_ns=concat_ns,
            forward_ns=timings[0],
            decode_ns=timings[1],
            total_ns=time.time_ns() - start,
Olivier Dehaene's avatar
Olivier Dehaene committed
185
186
        )

Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
187

Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
188
def serve(
189
190
191
192
    model_id: str,
    revision: Optional[str],
    sharded: bool,
    quantize: Optional[str],
Nicolas Patry's avatar
Nicolas Patry committed
193
    speculate: Optional[int],
194
195
196
    dtype: Optional[str],
    trust_remote_code: bool,
    uds_path: Path,
197
198
):
    async def serve_inner(
199
200
201
202
        model_id: str,
        revision: Optional[str],
        sharded: bool = False,
        quantize: Optional[str] = None,
Nicolas Patry's avatar
Nicolas Patry committed
203
        speculate: Optional[int] = None,
204
205
        dtype: Optional[str] = None,
        trust_remote_code: bool = False,
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
206
    ):
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
207
        unix_socket_template = "unix://{}-{}"
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
208
209
        if sharded:
            server_urls = [
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
210
                unix_socket_template.format(uds_path, rank)
211
                for rank in range(int(os.environ["WORLD_SIZE"]))
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
212
            ]
213
            local_url = server_urls[int(os.environ["RANK"])]
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
214
        else:
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
215
            local_url = unix_socket_template.format(uds_path, 0)
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
216
217
            server_urls = [local_url]

218
        try:
219
            model = get_model(
OlivierDehaene's avatar
OlivierDehaene committed
220
221
222
223
224
225
226
                model_id,
                revision,
                sharded,
                quantize,
                speculate,
                dtype,
                trust_remote_code,
227
            )
228
229
230
        except Exception:
            logger.exception("Error when initializing model")
            raise
231

232
233
234
235
236
237
        server = aio.server(
            interceptors=[
                ExceptionInterceptor(),
                UDSOpenTelemetryAioServerInterceptor(),
            ]
        )
Olivier Dehaene's avatar
Olivier Dehaene committed
238
        generate_pb2_grpc.add_TextGenerationServiceServicer_to_server(
239
            TextGenerationService(model, Cache(), quantize, server_urls), server
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
240
241
        )
        SERVICE_NAMES = (
Olivier Dehaene's avatar
Olivier Dehaene committed
242
            generate_pb2.DESCRIPTOR.services_by_name["TextGenerationService"].full_name,
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
243
244
245
246
            reflection.SERVICE_NAME,
        )
        reflection.enable_server_reflection(SERVICE_NAMES, server)
        server.add_insecure_port(local_url)
247

Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
248
        await server.start()
249

250
        logger.info("Server started at {}".format(local_url))
Nicolas Patry's avatar
Nicolas Patry committed
251
        signal_handler = SignalHandler()
252
253
        while signal_handler.KEEP_PROCESSING:
            await asyncio.sleep(0.5)
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
254

255
    asyncio.run(
OlivierDehaene's avatar
OlivierDehaene committed
256
257
258
        serve_inner(
            model_id, revision, sharded, quantize, speculate, dtype, trust_remote_code
        )
259
    )