cli.py 12.1 KB
Newer Older
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
1
import os
2
import sys
Olivier Dehaene's avatar
Olivier Dehaene committed
3
4
5
import typer

from pathlib import Path
6
from loguru import logger
7
from typing import Optional
8
from enum import Enum
9
from huggingface_hub import hf_hub_download
10
from text_generation_server.utils.adapter import parse_lora_adapters
Olivier Dehaene's avatar
Olivier Dehaene committed
11
12
13
14
15


app = typer.Typer()


16
17
class Quantization(str, Enum):
    bitsandbytes = "bitsandbytes"
Nicolas Patry's avatar
Nicolas Patry committed
18
19
    bitsandbytes_nf4 = "bitsandbytes-nf4"
    bitsandbytes_fp4 = "bitsandbytes-fp4"
20
    gptq = "gptq"
21
    awq = "awq"
22
    eetq = "eetq"
23
    exl2 = "exl2"
Nicolas Patry's avatar
Nicolas Patry committed
24
    fp8 = "fp8"
25
    marlin = "marlin"
26
27


28
29
30
31
32
class Dtype(str, Enum):
    float16 = "float16"
    bloat16 = "bfloat16"


33
class KVCacheDtype(str, Enum):
34
    fp8_e4m3fn = "fp8_e4m3fn"
35
36
37
    fp8_e5m2 = "fp8_e5m2"


Olivier Dehaene's avatar
Olivier Dehaene committed
38
@app.command()
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
39
def serve(
40
    model_id: str,
41
    revision: Optional[str] = None,
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
42
    sharded: bool = False,
43
    quantize: Optional[Quantization] = None,
Nicolas Patry's avatar
Nicolas Patry committed
44
    speculate: Optional[int] = None,
45
    dtype: Optional[Dtype] = None,
46
    kv_cache_dtype: Optional[KVCacheDtype] = None,
47
    trust_remote_code: bool = False,
48
    uds_path: Path = "/tmp/text-generation-server",
49
50
    logger_level: str = "INFO",
    json_output: bool = False,
51
    otlp_endpoint: Optional[str] = None,
52
    otlp_service_name: str = "text-generation-inference.server",
53
    max_input_tokens: Optional[int] = None,
Olivier Dehaene's avatar
Olivier Dehaene committed
54
):
Olivier Dehaene's avatar
v0.1.0  
Olivier Dehaene committed
55
56
57
58
59
60
61
62
63
64
65
66
67
68
    if sharded:
        assert (
            os.getenv("RANK", None) is not None
        ), "RANK must be set when sharded is True"
        assert (
            os.getenv("WORLD_SIZE", None) is not None
        ), "WORLD_SIZE must be set when sharded is True"
        assert (
            os.getenv("MASTER_ADDR", None) is not None
        ), "MASTER_ADDR must be set when sharded is True"
        assert (
            os.getenv("MASTER_PORT", None) is not None
        ), "MASTER_PORT must be set when sharded is True"

69
70
71
72
73
    # Remove default handler
    logger.remove()
    logger.add(
        sys.stdout,
        format="{message}",
74
        filter="text_generation_server",
75
76
77
78
79
        level=logger_level,
        serialize=json_output,
        backtrace=True,
        diagnose=False,
    )
80
81
82
83
84

    # Import here after the logger is added to log potential import exceptions
    from text_generation_server import server
    from text_generation_server.tracing import setup_tracing

85
86
    # Setup OpenTelemetry distributed tracing
    if otlp_endpoint is not None:
87
        setup_tracing(otlp_service_name=otlp_service_name, otlp_endpoint=otlp_endpoint)
88

89
    lora_adapters = parse_lora_adapters(os.getenv("LORA_ADAPTERS"))
drbh's avatar
drbh committed
90

91
92
    # TODO: enable lora with cuda graphs. for now disable cuda graphs if lora is enabled
    # and warn the user
93
94
95
96
97
98
99
100
101
    if lora_adapters:
        logger.warning("LoRA adapters enabled (experimental feature).")

        if "CUDA_GRAPHS" in os.environ:
            logger.warning(
                "LoRA adapters incompatible with CUDA Graphs. Disabling CUDA Graphs."
            )
            global CUDA_GRAPHS
            CUDA_GRAPHS = None
102

103
104
    # Downgrade enum into str for easier management later on
    quantize = None if quantize is None else quantize.value
105
    dtype = None if dtype is None else dtype.value
106
    kv_cache_dtype = None if kv_cache_dtype is None else kv_cache_dtype.value
OlivierDehaene's avatar
OlivierDehaene committed
107
108
109
110
111
112
    if dtype is not None and quantize not in {
        None,
        "bitsandbytes",
        "bitsandbytes-nf4",
        "bitsandbytes-fp4",
    }:
113
114
115
116
        raise RuntimeError(
            "Only 1 can be set between `dtype` and `quantize`, as they both decide how goes the final model."
        )
    server.serve(
OlivierDehaene's avatar
OlivierDehaene committed
117
        model_id,
118
        lora_adapters,
OlivierDehaene's avatar
OlivierDehaene committed
119
120
121
122
123
        revision,
        sharded,
        quantize,
        speculate,
        dtype,
124
        kv_cache_dtype,
OlivierDehaene's avatar
OlivierDehaene committed
125
126
        trust_remote_code,
        uds_path,
127
        max_input_tokens,
128
    )
Olivier Dehaene's avatar
Olivier Dehaene committed
129
130
131


@app.command()
Nicolas Patry's avatar
Nicolas Patry committed
132
def download_weights(
133
    model_id: str,
134
    revision: Optional[str] = None,
135
    extension: str = ".safetensors",
136
    auto_convert: bool = True,
137
138
    logger_level: str = "INFO",
    json_output: bool = False,
139
    trust_remote_code: bool = False,
drbh's avatar
drbh committed
140
    merge_lora: bool = False,
Olivier Dehaene's avatar
Olivier Dehaene committed
141
):
142
143
144
145
146
    # Remove default handler
    logger.remove()
    logger.add(
        sys.stdout,
        format="{message}",
147
        filter="text_generation_server",
148
149
150
151
152
153
        level=logger_level,
        serialize=json_output,
        backtrace=True,
        diagnose=False,
    )

154
155
156
    # Import here after the logger is added to log potential import exceptions
    from text_generation_server import utils

157
158
159
    # Test if files were already download
    try:
        utils.weight_files(model_id, revision, extension)
160
        logger.info("Files are already present on the host. " "Skipping download.")
161
162
        return
    # Local files not found
Nicolas Patry's avatar
Nicolas Patry committed
163
    except (utils.LocalEntryNotFoundError, FileNotFoundError, utils.EntryNotFoundError):
164
165
        pass

166
167
168
169
170
    is_local_model = (Path(model_id).exists() and Path(model_id).is_dir()) or os.getenv(
        "WEIGHTS_CACHE_OVERRIDE", None
    ) is not None

    if not is_local_model:
drbh's avatar
drbh committed
171
172
173
174
        # TODO: maybe reverse the default value of merge_lora?
        # currently by default we don't merge the weights with the base model
        if merge_lora:
            try:
175
                hf_hub_download(
drbh's avatar
drbh committed
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
                    model_id, revision=revision, filename="adapter_config.json"
                )
                utils.download_and_unload_peft(
                    model_id, revision, trust_remote_code=trust_remote_code
                )
                is_local_model = True
                utils.weight_files(model_id, revision, extension)
                return
            except (utils.LocalEntryNotFoundError, utils.EntryNotFoundError):
                pass
        else:
            try:
                utils.peft.download_peft(
                    model_id, revision, trust_remote_code=trust_remote_code
                )
            except Exception:
                pass
193

Nicolas Patry's avatar
Nicolas Patry committed
194
195
        try:
            import json
OlivierDehaene's avatar
OlivierDehaene committed
196

197
            config = hf_hub_download(
OlivierDehaene's avatar
OlivierDehaene committed
198
199
                model_id, revision=revision, filename="config.json"
            )
200
            with open(config, "r") as f:
Nicolas Patry's avatar
Nicolas Patry committed
201
202
                config = json.load(f)

203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
            base_model_id = config.get("base_model_name_or_path", None)
            if base_model_id and base_model_id != model_id:
                try:
                    logger.info(f"Downloading parent model {base_model_id}")
                    download_weights(
                        model_id=base_model_id,
                        revision="main",
                        extension=extension,
                        auto_convert=auto_convert,
                        logger_level=logger_level,
                        json_output=json_output,
                        trust_remote_code=trust_remote_code,
                    )
                except Exception:
                    pass
Nicolas Patry's avatar
Nicolas Patry committed
218
219
220
        except (utils.LocalEntryNotFoundError, utils.EntryNotFoundError):
            pass

221
222
223
224
225
226
227
228
229
230
231
232
233
        # Try to download weights from the hub
        try:
            filenames = utils.weight_hub_files(model_id, revision, extension)
            utils.download_weights(filenames, model_id, revision)
            # Successfully downloaded weights
            return

        # No weights found on the hub with this extension
        except utils.EntryNotFoundError as e:
            # Check if we want to automatically convert to safetensors or if we can use .bin weights instead
            if not extension == ".safetensors" or not auto_convert:
                raise e

234
    elif (Path(model_id) / "adapter_config.json").exists():
235
236
237
238
239
240
241
242
243
        # Try to load as a local PEFT model
        try:
            utils.download_and_unload_peft(
                model_id, revision, trust_remote_code=trust_remote_code
            )
            utils.weight_files(model_id, revision, extension)
            return
        except (utils.LocalEntryNotFoundError, utils.EntryNotFoundError):
            pass
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
    elif (Path(model_id) / "config.json").exists():
        # Try to load as a local Medusa model
        try:
            import json

            config = Path(model_id) / "config.json"
            with open(config, "r") as f:
                config = json.load(f)

            base_model_id = config.get("base_model_name_or_path", None)
            if base_model_id:
                try:
                    logger.info(f"Downloading parent model {base_model_id}")
                    download_weights(
                        model_id=base_model_id,
                        revision="main",
                        extension=extension,
                        auto_convert=auto_convert,
                        logger_level=logger_level,
                        json_output=json_output,
                        trust_remote_code=trust_remote_code,
                    )
                except Exception:
                    pass
        except (utils.LocalEntryNotFoundError, utils.EntryNotFoundError):
            pass
270

271
    # Try to see if there are local pytorch weights
272
    try:
273
        # Get weights for a local model, a hub cached model and inside the WEIGHTS_CACHE_OVERRIDE
274
275
276
277
        try:
            local_pt_files = utils.weight_files(model_id, revision, ".bin")
        except Exception:
            local_pt_files = utils.weight_files(model_id, revision, ".pt")
278

279
    # No local pytorch weights
280
    except (utils.LocalEntryNotFoundError, utils.EntryNotFoundError):
281
282
283
284
285
        if extension == ".safetensors":
            logger.warning(
                f"No safetensors weights found for model {model_id} at revision {revision}. "
                f"Downloading PyTorch weights."
            )
286

287
        # Try to see if there are pytorch weights on the hub
288
289
290
        pt_filenames = utils.weight_hub_files(model_id, revision, ".bin")
        # Download pytorch weights
        local_pt_files = utils.download_weights(pt_filenames, model_id, revision)
291
292

    if auto_convert:
293
294
        if not trust_remote_code:
            logger.warning(
295
296
297
                "🚨🚨BREAKING CHANGE in 2.0🚨🚨: Safetensors conversion is disabled without `--trust-remote-code` because "
                "Pickle files are unsafe and can essentially contain remote code execution!"
                "Please check for more information here: https://huggingface.co/docs/text-generation-inference/basic_tutorials/safety",
298
299
            )

300
301
302
303
304
305
        logger.warning(
            f"No safetensors weights found for model {model_id} at revision {revision}. "
            f"Converting PyTorch weights to safetensors."
        )

        # Safetensors final filenames
306
307
308
309
        local_st_files = [
            p.parent / f"{p.stem.lstrip('pytorch_')}.safetensors"
            for p in local_pt_files
        ]
310
311
        try:
            import transformers
312
            import json
313

314
315
316
            if is_local_model:
                config_filename = os.path.join(model_id, "config.json")
            else:
OlivierDehaene's avatar
OlivierDehaene committed
317
318
319
                config_filename = hf_hub_download(
                    model_id, revision=revision, filename="config.json"
                )
320
321
322
            with open(config_filename, "r") as f:
                config = json.load(f)
            architecture = config["architectures"][0]
323
324
325
326
327
328

            class_ = getattr(transformers, architecture)

            # Name for this varible depends on transformers version.
            discard_names = getattr(class_, "_tied_weights_keys", [])

329
        except Exception:
330
            discard_names = []
331
        # Convert pytorch weights to safetensors
332
        utils.convert_files(local_pt_files, local_st_files, discard_names)
Olivier Dehaene's avatar
Olivier Dehaene committed
333
334


335
336
337
338
339
340
341
342
343
344
345
@app.command()
def quantize(
    model_id: str,
    output_dir: str,
    revision: Optional[str] = None,
    logger_level: str = "INFO",
    json_output: bool = False,
    trust_remote_code: bool = False,
    upload_to_model_id: Optional[str] = None,
    percdamp: float = 0.01,
    act_order: bool = False,
346
    groupsize: int = 128,
347
):
348
349
    if revision is None:
        revision = "main"
350
351
352
353
354
355
    download_weights(
        model_id=model_id,
        revision=revision,
        logger_level=logger_level,
        json_output=json_output,
    )
356
    from text_generation_server.layers.gptq.quantize import quantize
357
358
359
360

    quantize(
        model_id=model_id,
        bits=4,
361
        groupsize=groupsize,
362
        output_dir=output_dir,
363
        revision=revision,
364
365
366
367
        trust_remote_code=trust_remote_code,
        upload_to_model_id=upload_to_model_id,
        percdamp=percdamp,
        act_order=act_order,
368
        sym=True,
369
370
371
    )


Olivier Dehaene's avatar
Olivier Dehaene committed
372
373
if __name__ == "__main__":
    app()