test_supervised.py 4.35 KB
Newer Older
chenych's avatar
chenych committed
1
# Copyright 2025 the LlamaFactory team.
chenych's avatar
chenych committed
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import random

import pytest
from datasets import load_dataset
from transformers import AutoTokenizer

from llamafactory.extras.constants import IGNORE_INDEX
chenych's avatar
chenych committed
23
from llamafactory.train.test_utils import load_dataset_module
chenych's avatar
chenych committed
24
25


luopl's avatar
luopl committed
26
DEMO_DATA = os.getenv("DEMO_DATA", "llamafactory/demo_data")
chenych's avatar
chenych committed
27

chenych's avatar
chenych committed
28
TINY_LLAMA3 = os.getenv("TINY_LLAMA3", "llamafactory/tiny-random-Llama-3")
chenych's avatar
chenych committed
29

luopl's avatar
luopl committed
30
TINY_DATA = os.getenv("TINY_DATA", "llamafactory/tiny-supervised-dataset")
chenych's avatar
chenych committed
31
32

TRAIN_ARGS = {
chenych's avatar
chenych committed
33
    "model_name_or_path": TINY_LLAMA3,
chenych's avatar
chenych committed
34
35
36
37
38
39
40
41
42
43
44
45
46
    "stage": "sft",
    "do_train": True,
    "finetuning_type": "full",
    "template": "llama3",
    "cutoff_len": 8192,
    "output_dir": "dummy_dir",
    "overwrite_output_dir": True,
    "fp16": True,
}


@pytest.mark.parametrize("num_samples", [16])
def test_supervised_single_turn(num_samples: int):
chenych's avatar
chenych committed
47
    train_dataset = load_dataset_module(dataset_dir="ONLINE", dataset=TINY_DATA, **TRAIN_ARGS)["train_dataset"]
chenych's avatar
chenych committed
48
    ref_tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA3)
chenych's avatar
chenych committed
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
    original_data = load_dataset(TINY_DATA, split="train")
    indexes = random.choices(range(len(original_data)), k=num_samples)
    for index in indexes:
        prompt = original_data["instruction"][index]
        if original_data["input"][index]:
            prompt += "\n" + original_data["input"][index]

        messages = [
            {"role": "user", "content": prompt},
            {"role": "assistant", "content": original_data["output"][index]},
        ]
        ref_input_ids = ref_tokenizer.apply_chat_template(messages)
        assert train_dataset["input_ids"][index] == ref_input_ids


@pytest.mark.parametrize("num_samples", [8])
def test_supervised_multi_turn(num_samples: int):
chenych's avatar
chenych committed
66
67
68
    train_dataset = load_dataset_module(dataset_dir="REMOTE:" + DEMO_DATA, dataset="system_chat", **TRAIN_ARGS)[
        "train_dataset"
    ]
chenych's avatar
chenych committed
69
    ref_tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA3)
chenych's avatar
chenych committed
70
71
72
73
74
75
76
77
78
    original_data = load_dataset(DEMO_DATA, name="system_chat", split="train")
    indexes = random.choices(range(len(original_data)), k=num_samples)
    for index in indexes:
        ref_input_ids = ref_tokenizer.apply_chat_template(original_data["messages"][index])
        assert train_dataset["input_ids"][index] == ref_input_ids


@pytest.mark.parametrize("num_samples", [4])
def test_supervised_train_on_prompt(num_samples: int):
chenych's avatar
chenych committed
79
    train_dataset = load_dataset_module(
chenych's avatar
chenych committed
80
        dataset_dir="REMOTE:" + DEMO_DATA, dataset="system_chat", train_on_prompt=True, **TRAIN_ARGS
chenych's avatar
chenych committed
81
    )["train_dataset"]
chenych's avatar
chenych committed
82
    ref_tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA3)
chenych's avatar
chenych committed
83
84
85
86
87
88
89
90
91
92
    original_data = load_dataset(DEMO_DATA, name="system_chat", split="train")
    indexes = random.choices(range(len(original_data)), k=num_samples)
    for index in indexes:
        ref_ids = ref_tokenizer.apply_chat_template(original_data["messages"][index])
        assert train_dataset["input_ids"][index] == ref_ids
        assert train_dataset["labels"][index] == ref_ids


@pytest.mark.parametrize("num_samples", [4])
def test_supervised_mask_history(num_samples: int):
chenych's avatar
chenych committed
93
    train_dataset = load_dataset_module(
chenych's avatar
chenych committed
94
        dataset_dir="REMOTE:" + DEMO_DATA, dataset="system_chat", mask_history=True, **TRAIN_ARGS
chenych's avatar
chenych committed
95
    )["train_dataset"]
chenych's avatar
chenych committed
96
    ref_tokenizer = AutoTokenizer.from_pretrained(TINY_LLAMA3)
chenych's avatar
chenych committed
97
98
99
100
101
102
103
104
105
    original_data = load_dataset(DEMO_DATA, name="system_chat", split="train")
    indexes = random.choices(range(len(original_data)), k=num_samples)
    for index in indexes:
        messages = original_data["messages"][index]
        ref_input_ids = ref_tokenizer.apply_chat_template(messages)
        prompt_len = len(ref_tokenizer.apply_chat_template(messages[:-1], add_generation_prompt=True))
        ref_label_ids = [IGNORE_INDEX] * prompt_len + ref_input_ids[prompt_len:]
        assert train_dataset["input_ids"][index] == ref_input_ids
        assert train_dataset["labels"][index] == ref_label_ids