flash_santacoder.py 3.11 KB
Newer Older
1
2
3
4
import torch
import torch.distributed

from opentelemetry import trace
5
from transformers import AutoTokenizer, AutoConfig
6
from typing import Optional, List
7
8
import json
import os
9

10
from huggingface_hub import hf_hub_download
11
12
from text_generation_server.models import FlashCausalLM
from text_generation_server.models.custom_modeling.flash_santacoder_modeling import (
13
    FlashSantacoderForCausalLM,
14
15
)
from text_generation_server.utils import (
16
    initialize_torch_distributed,
17
    weight_files,
18
    Weights,
19
20
)

21
from text_generation_server.utils.import_utils import IS_XPU_SYSTEM
Nicolas Patry's avatar
Nicolas Patry committed
22

23
24
25
tracer = trace.get_tracer(__name__)


26
class FlashSantacoderSharded(FlashCausalLM):
27
    def __init__(
28
29
30
31
        self,
        model_id: str,
        revision: Optional[str] = None,
        quantize: Optional[str] = None,
32
        use_medusa: Optional[str] = None,
33
        dtype: Optional[torch.dtype] = None,
34
        trust_remote_code: bool = False,
35
    ):
36
        self.process_group, rank, world_size = initialize_torch_distributed()
37
        if torch.cuda.is_available():
38
            device = torch.device(f"cuda:{rank}")
39
            dtype = torch.float16 if dtype is None else dtype
40
41
42
        elif IS_XPU_SYSTEM:
            device = torch.device(f"xpu:{rank}")
            dtype = torch.float16 if dtype is None else dtype
43
44
45
46
        else:
            raise NotImplementedError("FlashSantacoderSharded is only available on GPU")

        tokenizer = AutoTokenizer.from_pretrained(
47
48
49
50
51
            model_id,
            revision=revision,
            padding_side="left",
            truncation_side="left",
            trust_remote_code=trust_remote_code,
52
53
        )

54
        config = AutoConfig.from_pretrained(
55
56
            model_id,
            revision=revision,
57
            trust_remote_code=True,
58
        )
59
        config.quantize = quantize
60
        config.use_medusa = use_medusa
61
        config.transpose = config.architectures[0].startswith("GPT2")
62
63
64

        torch.distributed.barrier(group=self.process_group)
        filenames = weight_files(model_id, revision=revision, extension=".safetensors")
65
        weights = Weights(
66
67
68
69
70
            filenames,
            device=device,
            dtype=dtype,
            process_group=self.process_group,
            aliases={"transformer.wte.weight": ["lm_head.weight"]},
71
        )
72
        if config.quantize == "gptq":
OlivierDehaene's avatar
OlivierDehaene committed
73
            weights._set_gptq_params(model_id, revision)
74

75
        model = FlashSantacoderForCausalLM(config, weights)
76
77

        torch.distributed.barrier(group=self.process_group)
78
        super(FlashSantacoderSharded, self).__init__(
79
            model=model.to(device),
80
            tokenizer=tokenizer,
81
82
83
            num_layers=len(model.transformer.h),
            num_kv_heads=1,
            head_size=model.transformer.head_size,
84
            dtype=dtype,
85
            device=device,
86
87
            rank=rank,
            world_size=world_size,
88
89
        )

90
91
92
93
94
    def decode(self, generated_ids: List[int]) -> str:
        # Do not skip special tokens as they are used for custom parsing rules of the generated text
        return self.tokenizer.decode(
            generated_ids, skip_special_tokens=False, clean_up_tokenization_spaces=False
        )