__init__.py 957 Bytes
Newer Older
1
from text_generation.models.model import Model
2
from text_generation.models.causal_lm import CausalLM
3
4
from text_generation.models.bloom import BLOOMSharded
from text_generation.models.seq2seq_lm import Seq2SeqLM
5

6
__all__ = ["Model", "BLOOMSharded", "CausalLM", "Seq2SeqLM"]
7
8
9
10
11
12
13
14
15


def get_model(model_name: str, sharded: bool, quantize: bool) -> Model:
    if model_name.startswith("bigscience/bloom"):
        if sharded:
            return BLOOMSharded(model_name, quantize)
        else:
            if quantize:
                raise ValueError("quantization is not supported for non-sharded BLOOM")
16
            return CausalLM(model_name)
17
    else:
18
19
20
21
        if sharded:
            raise ValueError("sharded is not supported for AutoModel")
        if quantize:
            raise ValueError("quantize is not supported for AutoModel")
22
23
24
25
        try:
            return CausalLM(model_name)
        except Exception as e:
            return Seq2SeqLM(model_name)