__init__.py 2.1 KB
Newer Older
1
2
import torch

3
4
5
from transformers import AutoConfig
from typing import Optional

6
from text_generation.models.model import Model
7
from text_generation.models.causal_lm import CausalLM
8
from text_generation.models.bloom import BLOOM, BLOOMSharded
9
from text_generation.models.seq2seq_lm import Seq2SeqLM
10
from text_generation.models.galactica import Galactica, GalacticaSharded
11
from text_generation.models.santacoder import SantaCoder
12
from text_generation.models.gpt_neox import GPTNeox, GPTNeoxSharded
13
14
15
16
17
18
19
20
21
22
23
24
25
26

__all__ = [
    "Model",
    "BLOOM",
    "BLOOMSharded",
    "CausalLM",
    "Seq2SeqLM",
    "SantaCoder",
    "get_model",
]

# The flag below controls whether to allow TF32 on matmul. This flag defaults to False
# in PyTorch 1.12 and later.
torch.backends.cuda.matmul.allow_tf32 = True
27

28
29
# The flag below controls whether to allow TF32 on cuDNN. This flag defaults to True.
torch.backends.cudnn.allow_tf32 = True
30

31
32
33
# Disable gradients
torch.set_grad_enabled(False)

34

35
def get_model(
36
    model_id: str, revision: Optional[str], sharded: bool, quantize: bool
37
) -> Model:
38
    config = AutoConfig.from_pretrained(model_id, revision=revision)
39
40
41

    if config.model_type == "bloom":
        if sharded:
42
            return BLOOMSharded(model_id, revision, quantize=quantize)
43
        else:
44
            return BLOOM(model_id, revision, quantize=quantize)
45
    elif config.model_type == "gpt_neox":
46
        if sharded:
47
            return GPTNeoxSharded(model_id, revision, quantize=quantize)
48
        else:
49
50
            return GPTNeox(model_id, revision, quantize=quantize)
    elif model_id.startswith("facebook/galactica"):
51
        if sharded:
52
            return GalacticaSharded(model_id, revision, quantize=quantize)
53
        else:
54
55
56
            return Galactica(model_id, revision, quantize=quantize)
    elif "santacoder" in model_id:
        return SantaCoder(model_id, revision, quantize)
57
    else:
58
59
        if sharded:
            raise ValueError("sharded is not supported for AutoModel")
60
        try:
61
            return CausalLM(model_id, revision, quantize=quantize)
62
        except Exception:
63
            return Seq2SeqLM(model_id, revision, quantize=quantize)