flash_mistral.py 3.99 KB
Newer Older
1
2
3
4
import torch
import torch.distributed

from opentelemetry import trace
5
6
from transformers import AutoTokenizer, AutoConfig
from typing import Optional, Tuple
7
8

from text_generation_server.models import FlashCausalLM
9
from text_generation_server.models.flash_causal_lm import set_sliding_window
10
11
12
13
14
15
16
17
18
from text_generation_server.models.custom_modeling.flash_mistral_modeling import (
    FlashMistralForCausalLM,
    MistralConfig,
)
from text_generation_server.utils import (
    initialize_torch_distributed,
    weight_files,
    Weights,
)
Wang, Yi's avatar
Wang, Yi committed
19
from text_generation_server.utils.import_utils import SYSTEM, IPEX_AVAIL
20

21
tracer = trace.get_tracer(__name__)
22
23


OlivierDehaene's avatar
OlivierDehaene committed
24
class BaseFlashMistral(FlashCausalLM):
25
    def __init__(
OlivierDehaene's avatar
OlivierDehaene committed
26
27
28
        self,
        model_cls,
        model_id: str,
29
        config_cls=AutoConfig,
OlivierDehaene's avatar
OlivierDehaene committed
30
31
        revision: Optional[str] = None,
        quantize: Optional[str] = None,
Nicolas Patry's avatar
Nicolas Patry committed
32
        speculator: Optional[str] = None,
OlivierDehaene's avatar
OlivierDehaene committed
33
34
        dtype: Optional[torch.dtype] = None,
        trust_remote_code: bool = False,
35
        tokenizer_class=AutoTokenizer,
36
37
38
39
40
    ):
        self.process_group, rank, world_size = initialize_torch_distributed()
        if torch.cuda.is_available():
            device = torch.device(f"cuda:{rank}")
            dtype = torch.float16 if dtype is None else dtype
Nicolas Patry's avatar
Nicolas Patry committed
41
        elif SYSTEM == "xpu":
42
43
            device = torch.device(f"xpu:{rank}")
            dtype = torch.float16 if dtype is None else dtype
Wang, Yi's avatar
Wang, Yi committed
44
45
46
        elif IPEX_AVAIL:
            device = torch.device("cpu")
            dtype = torch.bfloat16 if dtype is None else dtype
47
        else:
OlivierDehaene's avatar
OlivierDehaene committed
48
            raise NotImplementedError("FlashMistral is only available on GPU")
49

50
51
52
53
54
55
56
        tokenizer = tokenizer_class.from_pretrained(
            model_id,
            revision=revision,
            padding_side="left",
            truncation_side="left",
            trust_remote_code=trust_remote_code,
        )
57

OlivierDehaene's avatar
OlivierDehaene committed
58
        config = config_cls.from_pretrained(
59
60
61
            model_id, revision=revision, trust_remote_code=trust_remote_code
        )
        config.quantize = quantize
Nicolas Patry's avatar
Nicolas Patry committed
62
        config.speculator = speculator
63
64

        # Set context windows
65
        if getattr(config, "sliding_window", None) is not None:
66
            set_sliding_window(config.sliding_window)
67
68
        else:
            config.sliding_window = None
69
70
71
72
73

        torch.distributed.barrier(group=self.process_group)

        filenames = weight_files(model_id, revision=revision, extension=".safetensors")
        weights = Weights(filenames, device, dtype, process_group=self.process_group)
74
        if config.quantize in ["gptq", "awq", "marlin"]:
OlivierDehaene's avatar
OlivierDehaene committed
75
            weights._set_gptq_params(model_id, revision)
76

77
78
        prefix = ""
        model = model_cls(prefix, config, weights)
79

80
81
        self.cuda_graphs = {}

82
        torch.distributed.barrier(group=self.process_group)
83
84
        num_layers, num_kv_heads, head_size = self.get_layer_config(model)
        super().__init__(
85
86
            model=model,
            tokenizer=tokenizer,
87
88
89
            num_layers=num_layers,
            num_kv_heads=num_kv_heads,
            head_size=head_size,
90
91
92
93
94
95
96
            dtype=dtype,
            device=device,
            rank=rank,
            world_size=world_size,
            sliding_window=config.sliding_window,
        )

97
98
99
100
101
102
103
    def get_layer_config(self, model) -> Tuple[int, int, int]:
        return (
            len(model.model.layers),
            model.model.num_key_value_heads,
            model.model.head_size,
        )

OlivierDehaene's avatar
OlivierDehaene committed
104
105
106

class FlashMistral(BaseFlashMistral):
    def __init__(
OlivierDehaene's avatar
OlivierDehaene committed
107
108
109
110
        self,
        model_id: str,
        revision: Optional[str] = None,
        quantize: Optional[str] = None,
Nicolas Patry's avatar
Nicolas Patry committed
111
        speculator: Optional[str] = None,
OlivierDehaene's avatar
OlivierDehaene committed
112
113
        dtype: Optional[torch.dtype] = None,
        trust_remote_code: bool = False,
OlivierDehaene's avatar
OlivierDehaene committed
114
115
116
117
118
119
120
    ):
        super(FlashMistral, self).__init__(
            config_cls=MistralConfig,
            model_cls=FlashMistralForCausalLM,
            model_id=model_id,
            revision=revision,
            quantize=quantize,
Nicolas Patry's avatar
Nicolas Patry committed
121
            speculator=speculator,
OlivierDehaene's avatar
OlivierDehaene committed
122
            dtype=dtype,
OlivierDehaene's avatar
OlivierDehaene committed
123
            trust_remote_code=trust_remote_code,
OlivierDehaene's avatar
OlivierDehaene committed
124
        )