cost_estimate.py 2.57 KB
Newer Older
Leo Gao's avatar
Leo Gao committed
1
2
import random
import transformers
3
4
from lm_eval import tasks, evaluator
from lm_eval.base import LM
Leo Gao's avatar
Leo Gao committed
5
6
7
8
9


class DryrunLM(LM):
    def __init__(self):
        self.tokencost = 0
bzantium's avatar
bzantium committed
10
        self.tokenizer = transformers.GPT2TokenizerFast.from_pretrained("gpt2")
Leo Gao's avatar
Leo Gao committed
11
12
13
14
15
16
17
18
        self.tokenizer.pad_token = "<|endoftext|>"

    @classmethod
    def create_from_arg_string(cls, arg_string):
        return cls()

    def loglikelihood(self, requests):
        res = []
bzantium's avatar
bzantium committed
19

Leo Gao's avatar
Leo Gao committed
20
21
22
23
24
        for ctx, cont in requests:
            res.append((-random.random(), False))
            self.tokencost += len(self.tokenizer.tokenize(ctx + cont))

        return res
bzantium's avatar
bzantium committed
25

Leo Gao's avatar
Leo Gao committed
26
27
    def greedy_until(self, requests):
        res = []
bzantium's avatar
bzantium committed
28
29

        for ctx, _ in requests:
Leo Gao's avatar
Leo Gao committed
30
31
32
33
34
35
            res.append("lol")

            # assume worst case - generates until 256
            self.tokencost += len(self.tokenizer.tokenize(ctx)) + 256

        return res
bzantium's avatar
bzantium committed
36

Leo Gao's avatar
Leo Gao committed
37
38
    def loglikelihood_rolling(self, requests):
        res = []
bzantium's avatar
bzantium committed
39
40

        for (s,) in requests:
Leo Gao's avatar
Leo Gao committed
41
42
43
44
            # assume worst case: extra full context
            self.tokencost += len(self.tokenizer.tokenize(s)) + 2048

        return res
Leo Gao's avatar
Leo Gao committed
45
46
47
48


def main():
    lm = DryrunLM()
bzantium's avatar
bzantium committed
49

Leo Gao's avatar
Leo Gao committed
50
    task_list = "arc_challenge,arc_easy,boolq,cola,copa,headqa,hellaswag,lambada,logiqa,mathqa,mc_taco,mrpc,multirc,openbookqa,piqa,prost,pubmedqa,qnli,qqp,race,record,rte,sciq,sst,triviaqa,webqs,wic,wikitext,winogrande,wnli,wsc"
Leo Gao's avatar
Leo Gao committed
51
    values = []
Leo Gao's avatar
Leo Gao committed
52
    for taskname in task_list.split(","):
Leo Gao's avatar
Leo Gao committed
53
        lm.tokencost = 0
54
55
56
57
58
59
        evaluator.evaluate(
            lm=lm,
            task_dict={taskname: tasks.get_task(taskname)()},
            num_fewshot=0,
            limit=None,
            bootstrap_iters=10,
bzantium's avatar
bzantium committed
60
            description_dict=None,
61
        )
Leo Gao's avatar
Leo Gao committed
62
63

        print(taskname, lm.tokencost)
bzantium's avatar
bzantium committed
64
65
66
67
68
69
70
71
72
73
        values.append(
            [
                taskname,
                lm.tokencost,
                lm.tokencost / 1000 * 0.0008,
                lm.tokencost / 1000 * 0.0012,
                lm.tokencost / 1000 * 0.006,
                lm.tokencost / 1000 * 0.06,
            ]
        )
Leo Gao's avatar
Leo Gao committed
74
75
76
    from pytablewriter import MarkdownTableWriter

    writer = MarkdownTableWriter()
Leo Gao's avatar
Leo Gao committed
77
    writer.headers = ["Task", "Tokens", "Ada", "Babbage", "Curie", "Davinci"]
Leo Gao's avatar
Leo Gao committed
78
79
80

    values.sort(key=lambda x: -x[1])
    totcost = sum([x[1] for x in values])
bzantium's avatar
bzantium committed
81
82
83
84
85
86
87
88
89
90
    values.append(
        [
            "**Total**",
            totcost,
            totcost / 1000 * 0.0008,
            totcost / 1000 * 0.0012,
            totcost / 1000 * 0.006,
            totcost / 1000 * 0.06,
        ]
    )
Leo Gao's avatar
Leo Gao committed
91
92
93
94

    writer.value_matrix = values

    print(writer.dumps())
bzantium's avatar
bzantium committed
95
96


Leo Gao's avatar
Leo Gao committed
97
98
if __name__ == "__main__":
    main()