cli.py 4.49 KB
Newer Older
chenych's avatar
chenych committed
1
# Copyright 2025 the LlamaFactory team.
chenych's avatar
chenych committed
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import subprocess
import sys
chenych's avatar
chenych committed
18
from copy import deepcopy
chenych's avatar
chenych committed
19
from functools import partial
chenych's avatar
chenych committed
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37


USAGE = (
    "-" * 70
    + "\n"
    + "| Usage:                                                             |\n"
    + "|   llamafactory-cli api -h: launch an OpenAI-style API server       |\n"
    + "|   llamafactory-cli chat -h: launch a chat interface in CLI         |\n"
    + "|   llamafactory-cli eval -h: evaluate models                        |\n"
    + "|   llamafactory-cli export -h: merge LoRA adapters and export model |\n"
    + "|   llamafactory-cli train -h: train models                          |\n"
    + "|   llamafactory-cli webchat -h: launch a chat interface in Web UI   |\n"
    + "|   llamafactory-cli webui: launch LlamaBoard                        |\n"
    + "|   llamafactory-cli version: show version info                      |\n"
    + "-" * 70
)


chenych's avatar
chenych committed
38
39
40
41
42
43
44
45
46
47
def main():
    from . import launcher
    from .api.app import run_api
    from .chat.chat_model import run_chat
    from .eval.evaluator import run_eval
    from .extras import logging
    from .extras.env import VERSION, print_env
    from .extras.misc import find_available_port, get_device_count, is_env_enabled, use_ray
    from .train.tuner import export_model, run_exp
    from .webui.interface import run_web_demo, run_web_ui
chenych's avatar
chenych committed
48

chenych's avatar
chenych committed
49
    logger = logging.get_logger(__name__)
chenych's avatar
chenych committed
50

chenych's avatar
chenych committed
51
52
53
54
55
56
57
58
59
60
61
    WELCOME = (
        "-" * 58
        + "\n"
        + f"| Welcome to LLaMA Factory, version {VERSION}"
        + " " * (21 - len(VERSION))
        + "|\n|"
        + " " * 56
        + "|\n"
        + "| Project page: https://github.com/hiyouga/LLaMA-Factory |\n"
        + "-" * 58
    )
chenych's avatar
chenych committed
62

chenych's avatar
chenych committed
63
64
65
66
67
68
69
70
71
72
73
74
    COMMAND_MAP = {
        "api": run_api,
        "chat": run_chat,
        "env": print_env,
        "eval": run_eval,
        "export": export_model,
        "train": run_exp,
        "webchat": run_web_demo,
        "webui": run_web_ui,
        "version": partial(print, WELCOME),
        "help": partial(print, USAGE),
    }
chenych's avatar
chenych committed
75

chenych's avatar
chenych committed
76
    command = sys.argv.pop(1) if len(sys.argv) > 1 else "help"
chenych's avatar
chenych committed
77
78
79
80
81
82
83
84
85
86
    if command == "train" and (is_env_enabled("FORCE_TORCHRUN") or (get_device_count() > 1 and not use_ray())):
        # launch distributed training
        nnodes = os.getenv("NNODES", "1")
        node_rank = os.getenv("NODE_RANK", "0")
        nproc_per_node = os.getenv("NPROC_PER_NODE", str(get_device_count()))
        master_addr = os.getenv("MASTER_ADDR", "127.0.0.1")
        master_port = os.getenv("MASTER_PORT", str(find_available_port()))
        logger.info_rank0(f"Initializing {nproc_per_node} distributed tasks at: {master_addr}:{master_port}")
        if int(nnodes) > 1:
            print(f"Multi-node training enabled: num nodes: {nnodes}, node rank: {node_rank}")
chenych's avatar
chenych committed
87

chenych's avatar
chenych committed
88
89
90
91
92
        env = deepcopy(os.environ)
        if is_env_enabled("OPTIM_TORCH", "1"):
            # optimize DDP, see https://zhuanlan.zhihu.com/p/671834539
            env["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"
            env["TORCH_NCCL_AVOID_RECORD_STREAMS"] = "1"
chenych's avatar
chenych committed
93

chenych's avatar
chenych committed
94
95
96
97
98
99
100
101
102
103
104
105
106
107
        # NOTE: DO NOT USE shell=True to avoid security risk
        process = subprocess.run(
            (
                "torchrun --nnodes {nnodes} --node_rank {node_rank} --nproc_per_node {nproc_per_node} "
                "--master_addr {master_addr} --master_port {master_port} {file_name} {args}"
            )
            .format(
                nnodes=nnodes,
                node_rank=node_rank,
                nproc_per_node=nproc_per_node,
                master_addr=master_addr,
                master_port=master_port,
                file_name=launcher.__file__,
                args=" ".join(sys.argv[1:]),
chenych's avatar
chenych committed
108
            )
chenych's avatar
chenych committed
109
110
111
112
113
114
115
            .split(),
            env=env,
            check=True,
        )
        sys.exit(process.returncode)
    elif command in COMMAND_MAP:
        COMMAND_MAP[command]()
chenych's avatar
chenych committed
116
    else:
chenych's avatar
chenych committed
117
        print(f"Unknown command: {command}.\n{USAGE}")
luopl's avatar
luopl committed
118
119
120


if __name__ == "__main__":
chenych's avatar
chenych committed
121
122
123
    from multiprocessing import freeze_support

    freeze_support()
luopl's avatar
luopl committed
124
    main()