__init__.py 3.24 KB
Newer Older
1
import os
2
3
import torch

4
from loguru import logger
5
from transformers import AutoConfig
6
from transformers.models.auto import modeling_auto
7
8
from typing import Optional

9
10
11
12
13
14
from text_generation_server.models.model import Model
from text_generation_server.models.causal_lm import CausalLM
from text_generation_server.models.bloom import BLOOM, BLOOMSharded
from text_generation_server.models.seq2seq_lm import Seq2SeqLM
from text_generation_server.models.galactica import Galactica, GalacticaSharded
from text_generation_server.models.santacoder import SantaCoder
15
from text_generation_server.models.gpt_neox import GPTNeoxSharded
16
from text_generation_server.models.t5 import T5Sharded
17

18
19
try:
    from text_generation_server.models.flash_neox import FlashNeoX, FlashNeoXSharded
20

21
22
23
24
25
26
    FLASH_NEOX = torch.cuda.is_available() and int(os.environ.get("FLASH_NEOX", 0)) == 1
except ImportError:
    if int(os.environ.get("FLASH_NEOX", 0)) == 1:
        logger.exception("Could not import FlashNeoX")
    FLASH_NEOX = False

27
28
29
30
31
__all__ = [
    "Model",
    "BLOOM",
    "BLOOMSharded",
    "CausalLM",
32
33
34
    "Galactica",
    "GalacticaSharded",
    "GPTNeoxSharded",
35
36
    "Seq2SeqLM",
    "SantaCoder",
37
    "T5Sharded",
38
39
40
    "get_model",
]

41
42
43
44
if FLASH_NEOX:
    __all__.append(FlashNeoX)
    __all__.append(FlashNeoXSharded)

45
46
47
# The flag below controls whether to allow TF32 on matmul. This flag defaults to False
# in PyTorch 1.12 and later.
torch.backends.cuda.matmul.allow_tf32 = True
48

49
50
# The flag below controls whether to allow TF32 on cuDNN. This flag defaults to True.
torch.backends.cudnn.allow_tf32 = True
51

52
53
54
# Disable gradients
torch.set_grad_enabled(False)

55

56
def get_model(
57
    model_id: str, revision: Optional[str], sharded: bool, quantize: bool
58
) -> Model:
59
    if "facebook/galactica" in model_id:
60
61
62
63
64
65
66
67
        if sharded:
            return GalacticaSharded(model_id, revision, quantize=quantize)
        else:
            return Galactica(model_id, revision, quantize=quantize)

    if "santacoder" in model_id:
        return SantaCoder(model_id, revision, quantize)

68
    config = AutoConfig.from_pretrained(model_id, revision=revision)
69
    model_type = config.model_type
70

71
    if model_type == "bloom":
72
        if sharded:
73
            return BLOOMSharded(model_id, revision, quantize=quantize)
74
        else:
75
            return BLOOM(model_id, revision, quantize=quantize)
76

77
    if model_type == "gpt_neox":
78
        if sharded:
79
80
            neox_cls = FlashNeoXSharded if FLASH_NEOX else GPTNeoxSharded
            return neox_cls(model_id, revision, quantize=quantize)
81
        else:
82
83
            neox_cls = FlashNeoX if FLASH_NEOX else CausalLM
            return neox_cls(model_id, revision, quantize=quantize)
84

85
    if model_type == "t5":
86
87
88
89
        if sharded:
            return T5Sharded(model_id, revision, quantize=quantize)
        else:
            return Seq2SeqLM(model_id, revision, quantize=quantize)
90
91
92

    if sharded:
        raise ValueError("sharded is not supported for AutoModel")
93
94

    if model_type in modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES:
95
        return CausalLM(model_id, revision, quantize=quantize)
96
    if model_type in modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES:
97
        return Seq2SeqLM(model_id, revision, quantize=quantize)
98
99

    raise ValueError(f"Unsupported model type {model_type}")