flash_phi.py 3.54 KB
Newer Older
drbh's avatar
drbh committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
import torch
import torch.distributed

from opentelemetry import trace
from transformers import AutoConfig, AutoTokenizer
from typing import Optional

from text_generation_server.models import FlashCausalLM
from text_generation_server.models.custom_modeling.flash_phi_modeling import (
    FlashPhiForCausalLM,
    PhiConfig,
)
from text_generation_server.utils import (
    initialize_torch_distributed,
    weight_files,
    Weights,
)

tracer = trace.get_tracer(__name__)


class FlashPhi(FlashCausalLM):
    def __init__(
        self,
        model_id: str,
        revision: Optional[str] = None,
        quantize: Optional[str] = None,
Nicolas Patry's avatar
Nicolas Patry committed
28
        speculator: Optional[str] = None,
drbh's avatar
drbh committed
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
        dtype: Optional[torch.dtype] = None,
        trust_remote_code: bool = False,
    ):
        self.process_group, rank, world_size = initialize_torch_distributed()
        if torch.cuda.is_available():
            device = torch.device(f"cuda:{rank}")
            dtype = torch.float16 if dtype is None else dtype
        else:
            raise NotImplementedError("FlashPhi is only available on GPU")

        tokenizer = AutoTokenizer.from_pretrained(
            model_id,
            revision=revision,
            padding_side="left",
            truncation_side="left",
            trust_remote_code=trust_remote_code,
        )

        config = PhiConfig.from_pretrained(
            model_id, revision=revision, trust_remote_code=trust_remote_code
        )
        config.quantize = quantize
Nicolas Patry's avatar
Nicolas Patry committed
51
        config.speculator = speculator
drbh's avatar
drbh committed
52
53
54
55
56
57
58
59
60

        torch.distributed.barrier(group=self.process_group)

        filenames = weight_files(model_id, revision=revision, extension=".safetensors")
        weights = Weights(filenames, device, dtype, process_group=self.process_group)
        if config.quantize in ["gptq", "awq"]:
            weights._set_gptq_params(model_id, revision)

        model = FlashPhiForCausalLM(config, weights)
Nicolas Patry's avatar
Nicolas Patry committed
61
        if speculator:
drbh's avatar
drbh committed
62
63
64
65
66
            from text_generation_server.utils.medusa import MedusaModel
            from huggingface_hub import hf_hub_download
            import json
            import os
            from pathlib import Path
OlivierDehaene's avatar
OlivierDehaene committed
67
68

            is_local_model = (
Nicolas Patry's avatar
Nicolas Patry committed
69
                Path(speculator).exists() and Path(speculator).is_dir()
OlivierDehaene's avatar
OlivierDehaene committed
70
71
            ) or os.getenv("WEIGHTS_CACHE_OVERRIDE", None) is not None

drbh's avatar
drbh committed
72
73
            if not is_local_model:
                medusa_config = hf_hub_download(
Nicolas Patry's avatar
Nicolas Patry committed
74
                    speculator, revision=revision, filename="config.json"
drbh's avatar
drbh committed
75
76
                )
                medusa_head = hf_hub_download(
Nicolas Patry's avatar
Nicolas Patry committed
77
                    speculator, revision=revision, filename="medusa_lm_head.pt"
drbh's avatar
drbh committed
78
79
                )
            else:
Nicolas Patry's avatar
Nicolas Patry committed
80
81
                medusa_config = str(Path(speculator) / "config.json")
                medusa_head = str(Path(speculator) / "medusa_lm_head.pt")
OlivierDehaene's avatar
OlivierDehaene committed
82

drbh's avatar
drbh committed
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
            with open(medusa_config, "r") as f:
                config = json.load(f)
            medusa_sf = medusa_head[: -len(".pt")] + ".safetensors"
            weights = Weights(
                [medusa_sf], device, dtype, process_group=self.process_group
            )
            lm_head = model.lm_head
            model.lm_head = MedusaModel(config, weights, lm_head)

        torch.distributed.barrier(group=self.process_group)
        super(FlashPhi, self).__init__(
            model=model,
            tokenizer=tokenizer,
            num_layers=len(model.model.layers),
            num_kv_heads=model.model.num_key_value_heads,
            head_size=model.model.head_size,
            dtype=dtype,
            device=device,
            rank=rank,
            world_size=world_size,
        )