flash_neox.py 2.55 KB
Newer Older
1
2
3
4
import torch
import torch.distributed

from opentelemetry import trace
5
from transformers import AutoTokenizer, AutoConfig
6
from typing import Optional
7

8
9
from text_generation_server.models import FlashCausalLM
from text_generation_server.models.custom_modeling.flash_neox_modeling import (
10
11
12
13
14
    FlashGPTNeoXForCausalLM,
)
from text_generation_server.utils import (
    initialize_torch_distributed,
    weight_files,
15
    Weights,
16
)
17
from text_generation_server.utils.import_utils import IS_XPU_SYSTEM
Nicolas Patry's avatar
Nicolas Patry committed
18

19
20
21
tracer = trace.get_tracer(__name__)


22
class FlashNeoXSharded(FlashCausalLM):
23
    def __init__(
24
25
26
27
        self,
        model_id: str,
        revision: Optional[str] = None,
        quantize: Optional[str] = None,
28
        use_medusa: Optional[str] = None,
29
        dtype: Optional[torch.dtype] = None,
30
        trust_remote_code: bool = False,
31
    ):
32
        self.process_group, rank, world_size = initialize_torch_distributed()
33
        if torch.cuda.is_available():
34
            device = torch.device(f"cuda:{rank}")
35
            dtype = torch.float16 if dtype is None else dtype
36
37
38
        elif IS_XPU_SYSTEM:
            device = torch.device(f"xpu:{rank}")
            dtype = torch.float16 if dtype is None else dtype
39
40
41
42
        else:
            raise NotImplementedError("FlashNeoX is only available on GPU")

        tokenizer = AutoTokenizer.from_pretrained(
43
44
45
46
47
            model_id,
            revision=revision,
            padding_side="left",
            truncation_side="left",
            trust_remote_code=trust_remote_code,
48
49
50
        )

        config = AutoConfig.from_pretrained(
51
            model_id, revision=revision, trust_remote_code=trust_remote_code
52
        )
53
        config.quantize = quantize
54
        config.use_medusa = use_medusa
55
56
57

        torch.distributed.barrier(group=self.process_group)
        filenames = weight_files(model_id, revision=revision, extension=".safetensors")
58
59
60
        weights = Weights(
            filenames, device=device, dtype=dtype, process_group=self.process_group
        )
61
        if config.quantize == "gptq":
OlivierDehaene's avatar
OlivierDehaene committed
62
            weights._set_gptq_params(model_id, revision)
63

64
        model = FlashGPTNeoXForCausalLM(config, weights)
65
66

        torch.distributed.barrier(group=self.process_group)
67
        super(FlashNeoXSharded, self).__init__(
68
            model=model.to(device),
69
            tokenizer=tokenizer,
70
71
72
            num_layers=len(model.gpt_neox.layers),
            num_kv_heads=model.gpt_neox.num_heads,
            head_size=model.gpt_neox.head_size,
73
            dtype=dtype,
74
            device=device,
75
76
            rank=rank,
            world_size=world_size,
77
        )