cost_estimate.py 1.72 KB
Newer Older
Leo Gao's avatar
Leo Gao committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
import argparse
import json
import numpy as np
import random
import itertools
import collections
import logging

from lm_eval import models, tasks, evaluator, base
import random
from lm_eval.base import LM
import transformers


class DryrunLM(LM):
    def __init__(self):
        self.tokencost = 0
        self.tokenizer = transformers.GPT2TokenizerFast.from_pretrained('gpt2')
        self.tokenizer.pad_token = "<|endoftext|>"

    @classmethod
    def create_from_arg_string(cls, arg_string):
        return cls()

    def loglikelihood(self, requests):
        res = []
        
        for ctx, cont in requests:
            res.append((-random.random(), False))
            self.tokencost += len(self.tokenizer.tokenize(ctx + cont))

        return res
    
    def greedy_until(self, requests):
        res = []
        
        for ctx, until in requests:
            res.append("lol")

            # assume worst case - generates until 256
            self.tokencost += len(self.tokenizer.tokenize(ctx)) + 256

        return res


def main():
    lm = DryrunLM()

    values = []
    for taskname in list(tasks.TASK_REGISTRY.keys()):
        lm.tokencost = 0
        evaluator.evaluate(lm, {taskname: tasks.get_task(taskname)()}, False, 0, None)

        print(taskname, lm.tokencost)
        values.append([taskname, lm.tokencost, lm.tokencost / 1000 * 0.06])
    from pytablewriter import MarkdownTableWriter

    writer = MarkdownTableWriter()
    writer.headers = ["Task", "Tokens", "Davinci Cost"]

    values.sort(key=lambda x: -x[1])
    totcost = sum([x[1] for x in values])
    values.append(["**Total**", totcost, totcost / 1000 * 0.06])

    writer.value_matrix = values

    print(writer.dumps())
if __name__ == "__main__":
    main()