__init__.py 2.45 KB
Newer Older
1
2
import torch

3
4
5
from transformers import AutoConfig
from typing import Optional

6
7
8
9
10
11
12
13
from text_generation_server.models.model import Model
from text_generation_server.models.causal_lm import CausalLM
from text_generation_server.models.bloom import BLOOM, BLOOMSharded
from text_generation_server.models.seq2seq_lm import Seq2SeqLM
from text_generation_server.models.galactica import Galactica, GalacticaSharded
from text_generation_server.models.santacoder import SantaCoder
from text_generation_server.models.gpt_neox import GPTNeox, GPTNeoxSharded
from text_generation_server.models.t5 import T5Sharded
14
15
16
17
18
19

__all__ = [
    "Model",
    "BLOOM",
    "BLOOMSharded",
    "CausalLM",
20
21
22
23
    "Galactica",
    "GalacticaSharded",
    "GPTNeox",
    "GPTNeoxSharded",
24
25
    "Seq2SeqLM",
    "SantaCoder",
26
    "T5Sharded",
27
28
29
30
31
32
    "get_model",
]

# The flag below controls whether to allow TF32 on matmul. This flag defaults to False
# in PyTorch 1.12 and later.
torch.backends.cuda.matmul.allow_tf32 = True
33

34
35
# The flag below controls whether to allow TF32 on cuDNN. This flag defaults to True.
torch.backends.cudnn.allow_tf32 = True
36

37
38
39
# Disable gradients
torch.set_grad_enabled(False)

40

41
def get_model(
42
    model_id: str, revision: Optional[str], sharded: bool, quantize: bool
43
) -> Model:
44
    if "facebook/galactica" in model_id:
45
46
47
48
49
50
51
52
        if sharded:
            return GalacticaSharded(model_id, revision, quantize=quantize)
        else:
            return Galactica(model_id, revision, quantize=quantize)

    if "santacoder" in model_id:
        return SantaCoder(model_id, revision, quantize)

53
    config = AutoConfig.from_pretrained(model_id, revision=revision)
54
55
56

    if config.model_type == "bloom":
        if sharded:
57
            return BLOOMSharded(model_id, revision, quantize=quantize)
58
        else:
59
            return BLOOM(model_id, revision, quantize=quantize)
60
61

    if config.model_type == "gpt_neox":
62
        if sharded:
63
            return GPTNeoxSharded(model_id, revision, quantize=quantize)
64
        else:
65
            return GPTNeox(model_id, revision, quantize=quantize)
66
67

    if config.model_type == "t5":
68
69
70
71
        if sharded:
            return T5Sharded(model_id, revision, quantize=quantize)
        else:
            return Seq2SeqLM(model_id, revision, quantize=quantize)
72
73
74
75
76
77
78

    if sharded:
        raise ValueError("sharded is not supported for AutoModel")
    try:
        return CausalLM(model_id, revision, quantize=quantize)
    except Exception:
        return Seq2SeqLM(model_id, revision, quantize=quantize)