flash_mixtral.py 833 Bytes
Newer Older
OlivierDehaene's avatar
OlivierDehaene committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
import torch

from typing import Optional

from text_generation_server.models.flash_mistral import BaseFlashMistral
from text_generation_server.models.custom_modeling.flash_mixtral_modeling import MixtralConfig, FlashMixtralForCausalLM


class FlashMixtral(BaseFlashMistral):
    def __init__(
            self,
            model_id: str,
            revision: Optional[str] = None,
            quantize: Optional[str] = None,
            dtype: Optional[torch.dtype] = None,
            trust_remote_code: bool = False,
    ):
        super(FlashMixtral, self).__init__(
            config_cls=MixtralConfig,
            model_cls=FlashMixtralForCausalLM,
            model_id=model_id,
            revision=revision,
            quantize=quantize,
            dtype=dtype,
            trust_remote_code=trust_remote_code
        )