flash_phi.py 3.54 KB
Newer Older
drbh's avatar
drbh committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
import torch
import torch.distributed

from opentelemetry import trace
from transformers import AutoConfig, AutoTokenizer
from typing import Optional

from text_generation_server.models import FlashCausalLM
from text_generation_server.models.custom_modeling.flash_phi_modeling import (
    FlashPhiForCausalLM,
)
from text_generation_server.utils import (
    initialize_torch_distributed,
    weight_files,
    Weights,
)

tracer = trace.get_tracer(__name__)


class FlashPhi(FlashCausalLM):
    def __init__(
        self,
        model_id: str,
        revision: Optional[str] = None,
        quantize: Optional[str] = None,
Nicolas Patry's avatar
Nicolas Patry committed
27
        speculator: Optional[str] = None,
drbh's avatar
drbh committed
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
        dtype: Optional[torch.dtype] = None,
        trust_remote_code: bool = False,
    ):
        self.process_group, rank, world_size = initialize_torch_distributed()
        if torch.cuda.is_available():
            device = torch.device(f"cuda:{rank}")
            dtype = torch.float16 if dtype is None else dtype
        else:
            raise NotImplementedError("FlashPhi is only available on GPU")

        tokenizer = AutoTokenizer.from_pretrained(
            model_id,
            revision=revision,
            padding_side="left",
            truncation_side="left",
            trust_remote_code=trust_remote_code,
        )

46
        config = AutoConfig.from_pretrained(
drbh's avatar
drbh committed
47
48
49
            model_id, revision=revision, trust_remote_code=trust_remote_code
        )
        config.quantize = quantize
Nicolas Patry's avatar
Nicolas Patry committed
50
        config.speculator = speculator
drbh's avatar
drbh committed
51
52
53
54
55

        torch.distributed.barrier(group=self.process_group)

        filenames = weight_files(model_id, revision=revision, extension=".safetensors")
        weights = Weights(filenames, device, dtype, process_group=self.process_group)
56
        if config.quantize in ["gptq", "awq", "marlin"]:
drbh's avatar
drbh committed
57
58
59
            weights._set_gptq_params(model_id, revision)

        model = FlashPhiForCausalLM(config, weights)
Nicolas Patry's avatar
Nicolas Patry committed
60
        if speculator:
drbh's avatar
drbh committed
61
62
63
64
65
            from text_generation_server.utils.medusa import MedusaModel
            from huggingface_hub import hf_hub_download
            import json
            import os
            from pathlib import Path
OlivierDehaene's avatar
OlivierDehaene committed
66
67

            is_local_model = (
Nicolas Patry's avatar
Nicolas Patry committed
68
                Path(speculator).exists() and Path(speculator).is_dir()
OlivierDehaene's avatar
OlivierDehaene committed
69
70
            ) or os.getenv("WEIGHTS_CACHE_OVERRIDE", None) is not None

drbh's avatar
drbh committed
71
72
            if not is_local_model:
                medusa_config = hf_hub_download(
Nicolas Patry's avatar
Nicolas Patry committed
73
                    speculator, revision=revision, filename="config.json"
drbh's avatar
drbh committed
74
75
                )
                medusa_head = hf_hub_download(
Nicolas Patry's avatar
Nicolas Patry committed
76
                    speculator, revision=revision, filename="medusa_lm_head.pt"
drbh's avatar
drbh committed
77
78
                )
            else:
Nicolas Patry's avatar
Nicolas Patry committed
79
80
                medusa_config = str(Path(speculator) / "config.json")
                medusa_head = str(Path(speculator) / "medusa_lm_head.pt")
OlivierDehaene's avatar
OlivierDehaene committed
81

drbh's avatar
drbh committed
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
            with open(medusa_config, "r") as f:
                config = json.load(f)
            medusa_sf = medusa_head[: -len(".pt")] + ".safetensors"
            weights = Weights(
                [medusa_sf], device, dtype, process_group=self.process_group
            )
            lm_head = model.lm_head
            model.lm_head = MedusaModel(config, weights, lm_head)

        torch.distributed.barrier(group=self.process_group)
        super(FlashPhi, self).__init__(
            model=model,
            tokenizer=tokenizer,
            num_layers=len(model.model.layers),
            num_kv_heads=model.model.num_key_value_heads,
            head_size=model.model.head_size,
            dtype=dtype,
            device=device,
            rank=rank,
            world_size=world_size,
        )