__init__.py 1.58 KB
Newer Older
1
2
import torch

3
from text_generation.models.model import Model
4
from text_generation.models.causal_lm import CausalLM
5
from text_generation.models.bloom import BLOOM, BLOOMSharded
6
from text_generation.models.seq2seq_lm import Seq2SeqLM
7
from text_generation.models.galactica import Galactica, GalacticaSharded
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
from text_generation.models.santacoder import SantaCoder

__all__ = [
    "Model",
    "BLOOM",
    "BLOOMSharded",
    "CausalLM",
    "Seq2SeqLM",
    "SantaCoder",
    "get_model",
]

# The flag below controls whether to allow TF32 on matmul. This flag defaults to False
# in PyTorch 1.12 and later.
torch.backends.cuda.matmul.allow_tf32 = True
23

24
25
# The flag below controls whether to allow TF32 on cuDNN. This flag defaults to True.
torch.backends.cudnn.allow_tf32 = True
26
27
28
29
30


def get_model(model_name: str, sharded: bool, quantize: bool) -> Model:
    if model_name.startswith("bigscience/bloom"):
        if sharded:
OlivierDehaene's avatar
OlivierDehaene committed
31
            return BLOOMSharded(model_name, quantize=quantize)
32
        else:
33
            return BLOOM(model_name, quantize=quantize)
34
35
36
37
38
    elif model_name.startswith("facebook/galactica"):
        if sharded:
            return GalacticaSharded(model_name, quantize=quantize)
        else:
            return Galactica(model_name, quantize=quantize)
39
40
    elif "santacoder" in model_name:
        return SantaCoder(model_name, quantize)
41
    else:
42
43
        if sharded:
            raise ValueError("sharded is not supported for AutoModel")
44
        try:
OlivierDehaene's avatar
OlivierDehaene committed
45
            return CausalLM(model_name, quantize=quantize)
46
        except Exception:
OlivierDehaene's avatar
OlivierDehaene committed
47
            return Seq2SeqLM(model_name, quantize=quantize)