"vscode:/vscode.git/clone" did not exist on "cfdeebd4a8f0decc3d0e1f0f05a7112ddd1e0a29"
santacoder.py 2.05 KB
Newer Older
1
2
3
import torch
import torch.distributed

4
from typing import Optional, List
5
6
from transformers import AutoTokenizer, AutoModelForCausalLM

7
from text_generation_server.models import CausalLM
8
9
10
11
12
13
14
15
16

FIM_PREFIX = "<fim-prefix>"
FIM_MIDDLE = "<fim-middle>"
FIM_SUFFIX = "<fim-suffix>"
FIM_PAD = "<fim-pad>"
EOD = "<|endoftext|>"


class SantaCoder(CausalLM):
17
18
19
20
21
22
    def __init__(
        self,
        model_id: str,
        revision: Optional[str] = None,
        quantize: Optional[str] = None,
    ):
23
24
        if torch.cuda.is_available():
            device = torch.device("cuda")
25
            dtype = torch.float16
26
27
28
29
30
31
32
        else:
            if quantize:
                raise ValueError("quantization is not available on CPU")

            device = torch.device("cpu")
            dtype = torch.float32

33
        tokenizer = AutoTokenizer.from_pretrained(
34
            model_id, revision=revision, padding_side="left", truncation_side="left"
35
        )
36
37
38
39
40
41
42
43
44
45
46
47
48
        tokenizer.add_special_tokens(
            {
                "additional_special_tokens": [
                    EOD,
                    FIM_PREFIX,
                    FIM_MIDDLE,
                    FIM_SUFFIX,
                    FIM_PAD,
                ],
                "pad_token": EOD,
            }
        )

49
50
        self.model = (
            AutoModelForCausalLM.from_pretrained(
51
                model_id,
52
                revision=revision,
53
                torch_dtype=dtype,
54
                load_in_8bit=quantize == "bitsandbytes",
55
56
57
58
59
                trust_remote_code=True,  # required
            )
            .to(device)
            .eval()
        )
60
61

        super(CausalLM, self).__init__(
62
63
64
65
66
            tokenizer=tokenizer,
            requires_padding=True,
            dtype=dtype,
            device=device,
            decode_buffer=1,
67
68
69
70
71
        )

    def decode(self, generated_ids: List[int]) -> str:
        # Do not skip special tokens as they are used for custom parsing rules of the generated text
        return self.tokenizer.decode(
72
            generated_ids, skip_special_tokens=False, clean_up_tokenization_spaces=False
73
        )