test_litellm.py 1.62 KB
Newer Older
zzg_666's avatar
zzg_666 committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
from dataflow.operators.core_text import PromptedGenerator
from dataflow.serving import APILLMServing_request, LiteLLMServing
from dataflow.utils.storage import FileStorage

class GPT_generator():
    def __init__(self):
        self.storage = FileStorage(
            first_entry_file_name= "../../dataflow/example/GeneralTextPipeline/translation.jsonl",
            cache_path="./cache",
            file_name_prefix="translation",
            cache_type="jsonl",
        )
        self.model_cache_dir = './dataflow_cache'
        self.llm_serving = APILLMServing_request(
                api_url="https://api.openai.com/v1/chat/completions",
                model_name="gpt-5",
                max_workers=10,
                # custom_llm_provider="openai", # if your are using custom llm provider's api
        )

        self.prompt_generator = PromptedGenerator(
            llm_serving = self.llm_serving,
            system_prompt = "Please translate to Chinese. Please answer in JSON format.",
            json_schema={
                "type": "object",
                "properties": {
                    "original": {"type": "string"},
                    "translation": {"type": "string"}
                },
                "required": ["original", "translation"],
                "additionalProperties": False,
            },
        )        

    def forward(self):
        # Initial filters
        self.prompt_generator.run(
            storage = self.storage.step(),
            input_key = "raw_content",
        )


if __name__ == "__main__":
    # This is the entry point for the pipeline

    model = GPT_generator()
    model.forward()