flash_llama.py 3.11 KB
Newer Older
1
2
3
4
import torch
import torch.distributed

from opentelemetry import trace
5
from transformers import AutoConfig, AutoTokenizer, GenerationConfig
6
from transformers.models.llama import LlamaTokenizer
7
from typing import Optional
8
9
10
11
12
13
14
15

from text_generation_server.models import FlashCausalLM
from text_generation_server.models.custom_modeling.flash_llama_modeling import (
    FlashLlamaForCausalLM,
)
from text_generation_server.utils import (
    initialize_torch_distributed,
    weight_files,
16
    Weights,
17
18
19
20
21
22
23
)

tracer = trace.get_tracer(__name__)


class FlashLlama(FlashCausalLM):
    def __init__(
24
25
26
27
        self,
        model_id: str,
        revision: Optional[str] = None,
        quantize: Optional[str] = None,
28
        use_medusa: Optional[str] = None,
29
        dtype: Optional[torch.dtype] = None,
30
        trust_remote_code: bool = False,
31
    ):
32
        self.process_group, rank, world_size = initialize_torch_distributed()
33
        if torch.cuda.is_available():
34
            device = torch.device(f"cuda:{rank}")
35
            dtype = torch.float16 if dtype is None else dtype
36
37
38
        else:
            raise NotImplementedError("FlashLlama is only available on GPU")

39
40
41
42
43
44
45
46
47
        try:
            tokenizer = LlamaTokenizer.from_pretrained(
                model_id,
                revision=revision,
                padding_side="left",
                truncation_side="left",
                trust_remote_code=trust_remote_code,
            )
        except Exception:
48
            tokenizer = AutoTokenizer.from_pretrained(
49
50
51
52
53
54
                model_id,
                revision=revision,
                padding_side="left",
                truncation_side="left",
                trust_remote_code=trust_remote_code,
            )
55
56
57
58
59
60
61
62
63
        try:
            generation_config = GenerationConfig.from_pretrained(
                model_id, revision=revision, trust_remote_code=trust_remote_code
            )
            if isinstance(generation_config.eos_token_id, (list, set)):
                # TODO Huge hack
                tokenizer._eos_token_ids = set(generation_config.eos_token_id)
        except Exception:
            pass
64

65
        config = AutoConfig.from_pretrained(
66
            model_id, revision=revision, trust_remote_code=trust_remote_code
67
        )
68
        config.quantize = quantize
69
        config.use_medusa = use_medusa
70
71

        torch.distributed.barrier(group=self.process_group)
72

73
        filenames = weight_files(model_id, revision=revision, extension=".safetensors")
74
        weights = Weights(filenames, device, dtype, process_group=self.process_group)
75
        if config.quantize in ["gptq", "awq"]:
OlivierDehaene's avatar
OlivierDehaene committed
76
            weights._set_gptq_params(model_id, revision)
77

78
79
        prefix = ""
        model = FlashLlamaForCausalLM(prefix, config, weights)
80
        torch.distributed.barrier(group=self.process_group)
81
        super(FlashLlama, self).__init__(
82
            model=model,
83
            tokenizer=tokenizer,
84
            num_layers=len(model.model.layers),
85
            num_kv_heads=model.model.num_key_value_heads,
86
            head_size=model.model.head_size,
87
            dtype=dtype,
88
            device=device,
89
90
            rank=rank,
            world_size=world_size,
91
        )