parser.py 5.54 KB
Newer Older
chenych's avatar
chenych committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
15
16
17
import json
import os
from dataclasses import dataclass
chenych's avatar
chenych committed
18
19
20
from typing import Any, Dict, List, Literal, Optional, Sequence

from transformers.utils import cached_file
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
21
22
23
24
25
26
27
28
29
30
31

from ..extras.constants import DATA_CONFIG
from ..extras.misc import use_modelscope


@dataclass
class DatasetAttr:
    r"""
    Dataset attributes.
    """

chenych's avatar
chenych committed
32
    # basic configs
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
33
34
    load_from: Literal["hf_hub", "ms_hub", "script", "file"]
    dataset_name: str
chenych's avatar
chenych committed
35
36
37
    formatting: Literal["alpaca", "sharegpt"] = "alpaca"
    ranking: bool = False
    # extra configs
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
38
    subset: Optional[str] = None
chenych's avatar
chenych committed
39
    split: str = "train"
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
40
    folder: Optional[str] = None
chenych's avatar
chenych committed
41
42
    num_samples: Optional[int] = None
    # common columns
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
43
    system: Optional[str] = None
chenych's avatar
chenych committed
44
45
    tools: Optional[str] = None
    images: Optional[str] = None
luopl's avatar
luopl committed
46
    videos: Optional[str] = None
chenych's avatar
chenych committed
47
48
49
50
51
    # rlhf columns
    chosen: Optional[str] = None
    rejected: Optional[str] = None
    kto_tag: Optional[str] = None
    # alpaca columns
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
52
53
54
55
    prompt: Optional[str] = "instruction"
    query: Optional[str] = "input"
    response: Optional[str] = "output"
    history: Optional[str] = None
chenych's avatar
chenych committed
56
    # sharegpt columns
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
57
    messages: Optional[str] = "conversations"
chenych's avatar
chenych committed
58
    # sharegpt tags
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
    role_tag: Optional[str] = "from"
    content_tag: Optional[str] = "value"
    user_tag: Optional[str] = "human"
    assistant_tag: Optional[str] = "gpt"
    observation_tag: Optional[str] = "observation"
    function_tag: Optional[str] = "function_call"
    system_tag: Optional[str] = "system"

    def __repr__(self) -> str:
        return self.dataset_name

    def set_attr(self, key: str, obj: Dict[str, Any], default: Optional[Any] = None) -> None:
        setattr(self, key, obj.get(key, default))


chenych's avatar
chenych committed
74
75
76
77
78
def get_dataset_list(dataset_names: Optional[Sequence[str]], dataset_dir: str) -> List["DatasetAttr"]:
    r"""
    Gets the attributes of the datasets.
    """
    if dataset_names is None:
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
79
80
        dataset_names = []

chenych's avatar
chenych committed
81
    if dataset_dir == "ONLINE":
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
82
83
        dataset_info = None
    else:
chenych's avatar
chenych committed
84
85
86
87
88
        if dataset_dir.startswith("REMOTE:"):
            config_path = cached_file(path_or_repo_id=dataset_dir[7:], filename=DATA_CONFIG, repo_type="dataset")
        else:
            config_path = os.path.join(dataset_dir, DATA_CONFIG)

Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
89
        try:
chenych's avatar
chenych committed
90
            with open(config_path, "r") as f:
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
91
92
93
                dataset_info = json.load(f)
        except Exception as err:
            if len(dataset_names) != 0:
chenych's avatar
chenych committed
94
                raise ValueError("Cannot open {} due to {}.".format(config_path, str(err)))
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
95

chenych's avatar
chenych committed
96
            dataset_info = None
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
97

chenych's avatar
chenych committed
98
    dataset_list: List["DatasetAttr"] = []
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
99
    for name in dataset_names:
chenych's avatar
chenych committed
100
        if dataset_info is None:  # dataset_dir is ONLINE
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
            load_from = "ms_hub" if use_modelscope() else "hf_hub"
            dataset_attr = DatasetAttr(load_from, dataset_name=name)
            dataset_list.append(dataset_attr)
            continue

        if name not in dataset_info:
            raise ValueError("Undefined dataset {} in {}.".format(name, DATA_CONFIG))

        has_hf_url = "hf_hub_url" in dataset_info[name]
        has_ms_url = "ms_hub_url" in dataset_info[name]

        if has_hf_url or has_ms_url:
            if (use_modelscope() and has_ms_url) or (not has_hf_url):
                dataset_attr = DatasetAttr("ms_hub", dataset_name=dataset_info[name]["ms_hub_url"])
            else:
                dataset_attr = DatasetAttr("hf_hub", dataset_name=dataset_info[name]["hf_hub_url"])
        elif "script_url" in dataset_info[name]:
            dataset_attr = DatasetAttr("script", dataset_name=dataset_info[name]["script_url"])
        else:
            dataset_attr = DatasetAttr("file", dataset_name=dataset_info[name]["file_name"])

chenych's avatar
chenych committed
122
123
        dataset_attr.set_attr("formatting", dataset_info[name], default="alpaca")
        dataset_attr.set_attr("ranking", dataset_info[name], default=False)
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
124
        dataset_attr.set_attr("subset", dataset_info[name])
chenych's avatar
chenych committed
125
        dataset_attr.set_attr("split", dataset_info[name], default="train")
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
126
        dataset_attr.set_attr("folder", dataset_info[name])
chenych's avatar
chenych committed
127
        dataset_attr.set_attr("num_samples", dataset_info[name])
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
128
129

        if "columns" in dataset_info[name]:
luopl's avatar
luopl committed
130
            column_names = ["system", "tools", "images", "videos", "chosen", "rejected", "kto_tag"]
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
131
132
133
            if dataset_attr.formatting == "alpaca":
                column_names.extend(["prompt", "query", "response", "history"])
            else:
chenych's avatar
chenych committed
134
                column_names.extend(["messages"])
Rayyyyy's avatar
V0.6.3  
Rayyyyy committed
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154

            for column_name in column_names:
                dataset_attr.set_attr(column_name, dataset_info[name]["columns"])

        if dataset_attr.formatting == "sharegpt" and "tags" in dataset_info[name]:
            tag_names = (
                "role_tag",
                "content_tag",
                "user_tag",
                "assistant_tag",
                "observation_tag",
                "function_tag",
                "system_tag",
            )
            for tag in tag_names:
                dataset_attr.set_attr(tag, dataset_info[name]["tags"])

        dataset_list.append(dataset_attr)

    return dataset_list