distilabel_example.py 1.63 KB
Newer Older
chenzk's avatar
v1.0  
chenzk committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
'''
from distilabel.steps import LoadDataFromHub

loader = LoadDataFromHub(
    repo_id="distilabel-internal-testing/instructions",
    split="test",
    batch_size=2
)
loader.load()

# Just like we saw with LoadDataFromDicts, the `process` method will yield batches.
result = next(loader.process())
print(result)
# >>> result
# ([{'prompt': 'Arianna has 12...', False)
'''


from distilabel.models.llms import OpenAILLM

llm = OpenAILLM(
    model="Qwen/Qwen3-4B",
    base_url=r"http://x.x.x.x:8000/v1", # ip
    api_key="EMPTY",  # 本地部署无需真实key
)

llm.load()

output = llm.generate_outputs(inputs=[[{"role": "user", "content": "Hello world!"}]])
# output = llm.generate_outputs(inputs=[[{"role": "user", "content": "Create a user profile for the following marathon"}]])
print(output)

'''
from distilabel.models import OpenAILLM
from distilabel.pipeline import Pipeline
from distilabel.steps import LoadDataFromDicts
from distilabel.steps.tasks import TextGeneration, UltraFeedback

with Pipeline(name="serving-llm") as pipeline:
    load_data = LoadDataFromDicts(
        data=[{"instruction": "Write a poem about the sun and moon."}]
    )

    # `base_url` points to the address of the `vLLM` serving the LLM
    llm = OpenAILLM(base_url="http://x.x.x.x:8000/v1", model="Qwen/Qwen3-4B", api_key="EMPTY")

    text_generation = TextGeneration(
        llm=llm,
        num_generations=3,
        group_generations=True,
        output_mappings={"generation": "generations"},
    )
    llm.load()

    ultrafeedback = UltraFeedback(aspect="overall-rating", llm=llm)

    load_data >> text_generation >> ultrafeedback
    print(ultrafeedback)
'''