Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
Qwen2_pytorch
Commits
032b90a1
Commit
032b90a1
authored
Sep 12, 2024
by
luopl
Browse files
init commit
parents
Pipeline
#1684
canceled with stages
Changes
233
Pipelines
1
Show whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
2580 additions
and
0 deletions
+2580
-0
LLaMA-Factory/src/llamafactory/train/sft/__init__.py
LLaMA-Factory/src/llamafactory/train/sft/__init__.py
+18
-0
LLaMA-Factory/src/llamafactory/train/sft/metric.py
LLaMA-Factory/src/llamafactory/train/sft/metric.py
+130
-0
LLaMA-Factory/src/llamafactory/train/sft/trainer.py
LLaMA-Factory/src/llamafactory/train/sft/trainer.py
+150
-0
LLaMA-Factory/src/llamafactory/train/sft/workflow.py
LLaMA-Factory/src/llamafactory/train/sft/workflow.py
+123
-0
LLaMA-Factory/src/llamafactory/train/test_utils.py
LLaMA-Factory/src/llamafactory/train/test_utils.py
+118
-0
LLaMA-Factory/src/llamafactory/train/trainer_utils.py
LLaMA-Factory/src/llamafactory/train/trainer_utils.py
+427
-0
LLaMA-Factory/src/llamafactory/train/tuner.py
LLaMA-Factory/src/llamafactory/train/tuner.py
+143
-0
LLaMA-Factory/src/llamafactory/webui/__init__.py
LLaMA-Factory/src/llamafactory/webui/__init__.py
+0
-0
LLaMA-Factory/src/llamafactory/webui/chatter.py
LLaMA-Factory/src/llamafactory/webui/chatter.py
+164
-0
LLaMA-Factory/src/llamafactory/webui/common.py
LLaMA-Factory/src/llamafactory/webui/common.py
+196
-0
LLaMA-Factory/src/llamafactory/webui/components/__init__.py
LLaMA-Factory/src/llamafactory/webui/components/__init__.py
+30
-0
LLaMA-Factory/src/llamafactory/webui/components/chatbot.py
LLaMA-Factory/src/llamafactory/webui/components/chatbot.py
+88
-0
LLaMA-Factory/src/llamafactory/webui/components/data.py
LLaMA-Factory/src/llamafactory/webui/components/data.py
+120
-0
LLaMA-Factory/src/llamafactory/webui/components/eval.py
LLaMA-Factory/src/llamafactory/webui/components/eval.py
+93
-0
LLaMA-Factory/src/llamafactory/webui/components/export.py
LLaMA-Factory/src/llamafactory/webui/components/export.py
+154
-0
LLaMA-Factory/src/llamafactory/webui/components/infer.py
LLaMA-Factory/src/llamafactory/webui/components/infer.py
+73
-0
LLaMA-Factory/src/llamafactory/webui/components/top.py
LLaMA-Factory/src/llamafactory/webui/components/top.py
+77
-0
LLaMA-Factory/src/llamafactory/webui/components/train.py
LLaMA-Factory/src/llamafactory/webui/components/train.py
+354
-0
LLaMA-Factory/src/llamafactory/webui/css.py
LLaMA-Factory/src/llamafactory/webui/css.py
+41
-0
LLaMA-Factory/src/llamafactory/webui/engine.py
LLaMA-Factory/src/llamafactory/webui/engine.py
+81
-0
No files found.
LLaMA-Factory/src/llamafactory/train/sft/__init__.py
0 → 100644
View file @
032b90a1
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
.workflow
import
run_sft
__all__
=
[
"run_sft"
]
LLaMA-Factory/src/llamafactory/train/sft/metric.py
0 → 100644
View file @
032b90a1
# Copyright 2024 HuggingFace Inc., THUDM, and the LlamaFactory team.
#
# This code is inspired by the HuggingFace's transformers library and the THUDM's ChatGLM implementation.
# https://github.com/huggingface/transformers/blob/v4.40.0/examples/pytorch/summarization/run_summarization.py
# https://github.com/THUDM/ChatGLM-6B/blob/main/ptuning/main.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
dataclasses
import
dataclass
from
typing
import
TYPE_CHECKING
,
Dict
,
Optional
import
numpy
as
np
import
torch
from
transformers.utils
import
is_jieba_available
,
is_nltk_available
from
...extras.constants
import
IGNORE_INDEX
from
...extras.misc
import
numpify
from
...extras.packages
import
is_rouge_available
if
TYPE_CHECKING
:
from
transformers
import
EvalPrediction
,
PreTrainedTokenizer
if
is_jieba_available
():
import
jieba
# type: ignore
if
is_nltk_available
():
from
nltk.translate.bleu_score
import
SmoothingFunction
,
sentence_bleu
if
is_rouge_available
():
from
rouge_chinese
import
Rouge
def
eval_logit_processor
(
logits
:
"torch.Tensor"
,
labels
:
"torch.Tensor"
)
->
"torch.Tensor"
:
if
isinstance
(
logits
,
(
list
,
tuple
)):
if
logits
[
0
].
dim
()
==
3
:
# (batch_size, seq_len, vocab_size)
logits
=
logits
[
0
]
else
:
# moe models have aux loss
logits
=
logits
[
1
]
if
logits
.
dim
()
!=
3
:
raise
ValueError
(
"Cannot process the logits."
)
return
torch
.
argmax
(
logits
,
dim
=-
1
)
@
dataclass
class
ComputeAccuracy
:
def
_dump
(
self
)
->
Optional
[
Dict
[
str
,
float
]]:
result
=
None
if
hasattr
(
self
,
"score_dict"
):
result
=
{
k
:
float
(
np
.
mean
(
v
))
for
k
,
v
in
self
.
score_dict
.
items
()}
self
.
score_dict
=
{
"accuracy"
:
[]}
return
result
def
__post_init__
(
self
):
self
.
_dump
()
def
__call__
(
self
,
eval_preds
:
"EvalPrediction"
,
compute_result
:
bool
=
True
)
->
Optional
[
Dict
[
str
,
float
]]:
preds
,
labels
=
numpify
(
eval_preds
.
predictions
),
numpify
(
eval_preds
.
label_ids
)
for
i
in
range
(
len
(
preds
)):
pred
,
label
=
preds
[
i
,
:
-
1
],
labels
[
i
,
1
:]
label_mask
=
label
!=
IGNORE_INDEX
self
.
score_dict
[
"accuracy"
].
append
(
np
.
mean
(
pred
[
label_mask
]
==
label
[
label_mask
]))
if
compute_result
:
return
self
.
_dump
()
@
dataclass
class
ComputeSimilarity
:
r
"""
Wraps the tokenizer into metric functions, used in CustomSeq2SeqTrainer.
"""
tokenizer
:
"PreTrainedTokenizer"
def
_dump
(
self
)
->
Optional
[
Dict
[
str
,
float
]]:
result
=
None
if
hasattr
(
self
,
"score_dict"
):
result
=
{
k
:
float
(
np
.
mean
(
v
))
for
k
,
v
in
self
.
score_dict
.
items
()}
self
.
score_dict
=
{
"rouge-1"
:
[],
"rouge-2"
:
[],
"rouge-l"
:
[],
"bleu-4"
:
[]}
return
result
def
__post_init__
(
self
):
self
.
_dump
()
def
__call__
(
self
,
eval_preds
:
"EvalPrediction"
,
compute_result
:
bool
=
True
)
->
Optional
[
Dict
[
str
,
float
]]:
preds
,
labels
=
numpify
(
eval_preds
.
predictions
),
numpify
(
eval_preds
.
label_ids
)
preds
=
np
.
where
(
preds
!=
IGNORE_INDEX
,
preds
,
self
.
tokenizer
.
pad_token_id
)
labels
=
np
.
where
(
labels
!=
IGNORE_INDEX
,
labels
,
self
.
tokenizer
.
pad_token_id
)
decoded_preds
=
self
.
tokenizer
.
batch_decode
(
preds
,
skip_special_tokens
=
True
)
decoded_labels
=
self
.
tokenizer
.
batch_decode
(
labels
,
skip_special_tokens
=
True
)
for
pred
,
label
in
zip
(
decoded_preds
,
decoded_labels
):
hypothesis
=
list
(
jieba
.
cut
(
pred
))
reference
=
list
(
jieba
.
cut
(
label
))
if
len
(
" "
.
join
(
hypothesis
).
split
())
==
0
or
len
(
" "
.
join
(
reference
).
split
())
==
0
:
result
=
{
"rouge-1"
:
{
"f"
:
0.0
},
"rouge-2"
:
{
"f"
:
0.0
},
"rouge-l"
:
{
"f"
:
0.0
}}
else
:
rouge
=
Rouge
()
scores
=
rouge
.
get_scores
(
" "
.
join
(
hypothesis
),
" "
.
join
(
reference
))
result
=
scores
[
0
]
for
k
,
v
in
result
.
items
():
self
.
score_dict
[
k
].
append
(
round
(
v
[
"f"
]
*
100
,
4
))
bleu_score
=
sentence_bleu
([
list
(
label
)],
list
(
pred
),
smoothing_function
=
SmoothingFunction
().
method3
)
self
.
score_dict
[
"bleu-4"
].
append
(
round
(
bleu_score
*
100
,
4
))
if
compute_result
:
return
self
.
_dump
()
LLaMA-Factory/src/llamafactory/train/sft/trainer.py
0 → 100644
View file @
032b90a1
# Copyright 2024 HuggingFace Inc. and the LlamaFactory team.
#
# This code is inspired by the HuggingFace's transformers library.
# https://github.com/huggingface/transformers/blob/v4.40.0/src/transformers/trainer_seq2seq.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
json
import
os
from
types
import
MethodType
from
typing
import
TYPE_CHECKING
,
Any
,
Dict
,
List
,
Optional
,
Tuple
,
Union
import
numpy
as
np
import
torch
from
transformers
import
Seq2SeqTrainer
from
...extras.constants
import
IGNORE_INDEX
from
...extras.logging
import
get_logger
from
..callbacks
import
PissaConvertCallback
,
SaveProcessorCallback
from
..trainer_utils
import
create_custom_optimzer
,
create_custom_scheduler
if
TYPE_CHECKING
:
from
torch.utils.data
import
Dataset
from
transformers
import
ProcessorMixin
from
transformers.trainer
import
PredictionOutput
from
...hparams
import
FinetuningArguments
logger
=
get_logger
(
__name__
)
class
CustomSeq2SeqTrainer
(
Seq2SeqTrainer
):
r
"""
Inherits Seq2SeqTrainer to compute generative metrics such as BLEU and ROUGE.
"""
def
__init__
(
self
,
finetuning_args
:
"FinetuningArguments"
,
processor
:
Optional
[
"ProcessorMixin"
],
**
kwargs
)
->
None
:
super
().
__init__
(
**
kwargs
)
self
.
finetuning_args
=
finetuning_args
if
processor
is
not
None
:
self
.
add_callback
(
SaveProcessorCallback
(
processor
))
if
finetuning_args
.
pissa_convert
:
self
.
add_callback
(
PissaConvertCallback
)
if
finetuning_args
.
use_badam
:
from
badam
import
BAdamCallback
,
clip_grad_norm_old_version
self
.
accelerator
.
clip_grad_norm_
=
MethodType
(
clip_grad_norm_old_version
,
self
.
accelerator
)
self
.
add_callback
(
BAdamCallback
)
def
create_optimizer
(
self
)
->
"torch.optim.Optimizer"
:
if
self
.
optimizer
is
None
:
self
.
optimizer
=
create_custom_optimzer
(
self
.
model
,
self
.
args
,
self
.
finetuning_args
)
return
super
().
create_optimizer
()
def
create_scheduler
(
self
,
num_training_steps
:
int
,
optimizer
:
Optional
[
"torch.optim.Optimizer"
]
=
None
)
->
"torch.optim.lr_scheduler.LRScheduler"
:
create_custom_scheduler
(
self
.
args
,
num_training_steps
,
optimizer
)
return
super
().
create_scheduler
(
num_training_steps
,
optimizer
)
def
prediction_step
(
self
,
model
:
"torch.nn.Module"
,
inputs
:
Dict
[
str
,
Union
[
torch
.
Tensor
,
Any
]],
prediction_loss_only
:
bool
,
ignore_keys
:
Optional
[
List
[
str
]]
=
None
,
)
->
Tuple
[
Optional
[
float
],
Optional
[
torch
.
Tensor
],
Optional
[
torch
.
Tensor
]]:
r
"""
Removes the prompt part in the generated tokens.
Subclass and override to inject custom behavior.
"""
labels
=
inputs
[
"labels"
].
detach
().
clone
()
if
"labels"
in
inputs
else
None
# backup labels
if
self
.
args
.
predict_with_generate
:
assert
self
.
tokenizer
.
padding_side
==
"left"
,
"This method only accepts left-padded tensor."
prompt_len
,
label_len
=
inputs
[
"input_ids"
].
size
(
-
1
),
inputs
[
"labels"
].
size
(
-
1
)
if
prompt_len
>
label_len
:
inputs
[
"labels"
]
=
self
.
_pad_tensors_to_target_len
(
inputs
[
"labels"
],
inputs
[
"input_ids"
])
if
label_len
>
prompt_len
:
# truncate the labels instead of padding the inputs (llama2 fp16 compatibility)
inputs
[
"labels"
]
=
inputs
[
"labels"
][:,
:
prompt_len
]
loss
,
generated_tokens
,
_
=
super
().
prediction_step
(
# ignore the returned labels (may be truncated)
model
,
inputs
,
prediction_loss_only
=
prediction_loss_only
,
ignore_keys
=
ignore_keys
)
if
generated_tokens
is
not
None
and
self
.
args
.
predict_with_generate
:
generated_tokens
[:,
:
prompt_len
]
=
self
.
tokenizer
.
pad_token_id
generated_tokens
=
generated_tokens
.
contiguous
()
return
loss
,
generated_tokens
,
labels
def
_pad_tensors_to_target_len
(
self
,
src_tensor
:
torch
.
Tensor
,
tgt_tensor
:
torch
.
Tensor
)
->
torch
.
Tensor
:
r
"""
Pads the tensor to the same length as the target tensor.
"""
assert
self
.
tokenizer
.
pad_token_id
is
not
None
,
"Pad token is required."
padded_tensor
=
self
.
tokenizer
.
pad_token_id
*
torch
.
ones_like
(
tgt_tensor
)
padded_tensor
[:,
-
src_tensor
.
shape
[
-
1
]
:]
=
src_tensor
# adopt left-padding
return
padded_tensor
.
contiguous
()
# in contiguous memory
def
save_predictions
(
self
,
dataset
:
"Dataset"
,
predict_results
:
"PredictionOutput"
)
->
None
:
r
"""
Saves model predictions to `output_dir`.
A custom behavior that not contained in Seq2SeqTrainer.
"""
if
not
self
.
is_world_process_zero
():
return
output_prediction_file
=
os
.
path
.
join
(
self
.
args
.
output_dir
,
"generated_predictions.jsonl"
)
logger
.
info
(
f
"Saving prediction results to
{
output_prediction_file
}
"
)
labels
=
np
.
where
(
predict_results
.
label_ids
!=
IGNORE_INDEX
,
predict_results
.
label_ids
,
self
.
tokenizer
.
pad_token_id
)
preds
=
np
.
where
(
predict_results
.
predictions
!=
IGNORE_INDEX
,
predict_results
.
predictions
,
self
.
tokenizer
.
pad_token_id
)
for
i
in
range
(
len
(
preds
)):
pad_len
=
np
.
nonzero
(
preds
[
i
]
!=
self
.
tokenizer
.
pad_token_id
)[
0
]
if
len
(
pad_len
):
# move pad token to last
preds
[
i
]
=
np
.
concatenate
((
preds
[
i
][
pad_len
[
0
]
:],
preds
[
i
][:
pad_len
[
0
]]),
axis
=-
1
)
decoded_inputs
=
self
.
tokenizer
.
batch_decode
(
dataset
[
"input_ids"
],
skip_special_tokens
=
True
)
decoded_labels
=
self
.
tokenizer
.
batch_decode
(
labels
,
skip_special_tokens
=
True
)
decoded_preds
=
self
.
tokenizer
.
batch_decode
(
preds
,
skip_special_tokens
=
True
)
with
open
(
output_prediction_file
,
"w"
,
encoding
=
"utf-8"
)
as
writer
:
res
:
List
[
str
]
=
[]
for
text
,
label
,
pred
in
zip
(
decoded_inputs
,
decoded_labels
,
decoded_preds
):
res
.
append
(
json
.
dumps
({
"prompt"
:
text
,
"label"
:
label
,
"predict"
:
pred
},
ensure_ascii
=
False
))
writer
.
write
(
"
\n
"
.
join
(
res
))
LLaMA-Factory/src/llamafactory/train/sft/workflow.py
0 → 100644
View file @
032b90a1
# Copyright 2024 HuggingFace Inc. and the LlamaFactory team.
#
# This code is inspired by the HuggingFace's transformers library.
# https://github.com/huggingface/transformers/blob/v4.40.0/examples/pytorch/summarization/run_summarization.py
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
typing
import
TYPE_CHECKING
,
List
,
Optional
from
...data
import
SFTDataCollatorWith4DAttentionMask
,
get_dataset
from
...extras.constants
import
IGNORE_INDEX
from
...extras.misc
import
get_logits_processor
from
...extras.ploting
import
plot_loss
from
...model
import
load_model
,
load_tokenizer
from
..trainer_utils
import
create_modelcard_and_push
from
.metric
import
ComputeAccuracy
,
ComputeSimilarity
,
eval_logit_processor
from
.trainer
import
CustomSeq2SeqTrainer
if
TYPE_CHECKING
:
from
transformers
import
Seq2SeqTrainingArguments
,
TrainerCallback
from
...hparams
import
DataArguments
,
FinetuningArguments
,
GeneratingArguments
,
ModelArguments
def
run_sft
(
model_args
:
"ModelArguments"
,
data_args
:
"DataArguments"
,
training_args
:
"Seq2SeqTrainingArguments"
,
finetuning_args
:
"FinetuningArguments"
,
generating_args
:
"GeneratingArguments"
,
callbacks
:
Optional
[
List
[
"TrainerCallback"
]]
=
None
,
):
tokenizer_module
=
load_tokenizer
(
model_args
)
tokenizer
=
tokenizer_module
[
"tokenizer"
]
dataset_module
=
get_dataset
(
model_args
,
data_args
,
training_args
,
stage
=
"sft"
,
**
tokenizer_module
)
model
=
load_model
(
tokenizer
,
model_args
,
finetuning_args
,
training_args
.
do_train
)
if
getattr
(
model
,
"is_quantized"
,
False
)
and
not
training_args
.
do_train
:
setattr
(
model
,
"_hf_peft_config_loaded"
,
True
)
# hack here: make model compatible with prediction
data_collator
=
SFTDataCollatorWith4DAttentionMask
(
tokenizer
=
tokenizer
,
pad_to_multiple_of
=
8
if
training_args
.
do_train
else
None
,
# for shift short attention
label_pad_token_id
=
IGNORE_INDEX
if
data_args
.
ignore_pad_token_for_loss
else
tokenizer
.
pad_token_id
,
block_diag_attn
=
model_args
.
block_diag_attn
,
attn_implementation
=
getattr
(
model
.
config
,
"_attn_implementation"
,
None
),
compute_dtype
=
model_args
.
compute_dtype
,
)
# Override the decoding parameters of Seq2SeqTrainer
training_args
.
generation_max_length
=
training_args
.
generation_max_length
or
data_args
.
cutoff_len
training_args
.
generation_num_beams
=
data_args
.
eval_num_beams
or
training_args
.
generation_num_beams
training_args
.
remove_unused_columns
=
False
if
model_args
.
visual_inputs
else
training_args
.
remove_unused_columns
# Metric utils
metric_module
=
{}
if
training_args
.
predict_with_generate
:
metric_module
[
"compute_metrics"
]
=
ComputeSimilarity
(
tokenizer
=
tokenizer
)
elif
finetuning_args
.
compute_accuracy
:
metric_module
[
"compute_metrics"
]
=
ComputeAccuracy
()
metric_module
[
"preprocess_logits_for_metrics"
]
=
eval_logit_processor
# Initialize our Trainer
trainer
=
CustomSeq2SeqTrainer
(
model
=
model
,
args
=
training_args
,
finetuning_args
=
finetuning_args
,
data_collator
=
data_collator
,
callbacks
=
callbacks
,
**
dataset_module
,
**
tokenizer_module
,
**
metric_module
,
)
# Keyword arguments for `model.generate`
gen_kwargs
=
generating_args
.
to_dict
()
gen_kwargs
[
"eos_token_id"
]
=
[
tokenizer
.
eos_token_id
]
+
tokenizer
.
additional_special_tokens_ids
gen_kwargs
[
"pad_token_id"
]
=
tokenizer
.
pad_token_id
gen_kwargs
[
"logits_processor"
]
=
get_logits_processor
()
# Training
if
training_args
.
do_train
:
train_result
=
trainer
.
train
(
resume_from_checkpoint
=
training_args
.
resume_from_checkpoint
)
trainer
.
save_model
()
trainer
.
log_metrics
(
"train"
,
train_result
.
metrics
)
trainer
.
save_metrics
(
"train"
,
train_result
.
metrics
)
trainer
.
save_state
()
if
trainer
.
is_world_process_zero
()
and
finetuning_args
.
plot_loss
:
plot_loss
(
training_args
.
output_dir
,
keys
=
[
"loss"
,
"eval_loss"
,
"eval_accuracy"
])
if
training_args
.
predict_with_generate
:
tokenizer
.
padding_side
=
"left"
# use left-padding in generation
# Evaluation
if
training_args
.
do_eval
:
metrics
=
trainer
.
evaluate
(
metric_key_prefix
=
"eval"
,
**
gen_kwargs
)
if
training_args
.
predict_with_generate
:
# eval_loss will be wrong if predict_with_generate is enabled
metrics
.
pop
(
"eval_loss"
,
None
)
trainer
.
log_metrics
(
"eval"
,
metrics
)
trainer
.
save_metrics
(
"eval"
,
metrics
)
# Predict
if
training_args
.
do_predict
:
predict_results
=
trainer
.
predict
(
dataset_module
[
"eval_dataset"
],
metric_key_prefix
=
"predict"
,
**
gen_kwargs
)
if
training_args
.
predict_with_generate
:
# predict_loss will be wrong if predict_with_generate is enabled
predict_results
.
metrics
.
pop
(
"predict_loss"
,
None
)
trainer
.
log_metrics
(
"predict"
,
predict_results
.
metrics
)
trainer
.
save_metrics
(
"predict"
,
predict_results
.
metrics
)
trainer
.
save_predictions
(
dataset_module
[
"eval_dataset"
],
predict_results
)
# Create model card
create_modelcard_and_push
(
trainer
,
model_args
,
data_args
,
training_args
,
finetuning_args
)
LLaMA-Factory/src/llamafactory/train/test_utils.py
0 → 100644
View file @
032b90a1
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
typing
import
TYPE_CHECKING
,
Dict
,
Optional
,
Sequence
,
Set
,
Tuple
,
Union
import
torch
from
peft
import
PeftModel
from
transformers
import
AutoModelForCausalLM
from
trl
import
AutoModelForCausalLMWithValueHead
from
..data
import
get_dataset
from
..extras.misc
import
get_current_device
from
..hparams
import
get_infer_args
,
get_train_args
from
..model
import
load_model
,
load_tokenizer
if
TYPE_CHECKING
:
from
datasets
import
Dataset
from
peft
import
LoraModel
from
transformers
import
PreTrainedModel
def
compare_model
(
model_a
:
"torch.nn.Module"
,
model_b
:
"torch.nn.Module"
,
diff_keys
:
Sequence
[
str
]
=
[])
->
None
:
state_dict_a
=
model_a
.
state_dict
()
state_dict_b
=
model_b
.
state_dict
()
assert
set
(
state_dict_a
.
keys
())
==
set
(
state_dict_b
.
keys
())
for
name
in
state_dict_a
.
keys
():
if
any
(
key
in
name
for
key
in
diff_keys
):
assert
torch
.
allclose
(
state_dict_a
[
name
],
state_dict_b
[
name
],
rtol
=
1e-4
,
atol
=
1e-5
)
is
False
else
:
assert
torch
.
allclose
(
state_dict_a
[
name
],
state_dict_b
[
name
],
rtol
=
1e-4
,
atol
=
1e-5
)
is
True
def
check_lora_model
(
model
:
"LoraModel"
)
->
Tuple
[
Set
[
str
],
Set
[
str
]]:
linear_modules
,
extra_modules
=
set
(),
set
()
for
name
,
param
in
model
.
named_parameters
():
if
any
(
module
in
name
for
module
in
[
"lora_A"
,
"lora_B"
]):
linear_modules
.
add
(
name
.
split
(
".lora_"
,
maxsplit
=
1
)[
0
].
split
(
"."
)[
-
1
])
assert
param
.
requires_grad
is
True
assert
param
.
dtype
==
torch
.
float32
elif
"modules_to_save"
in
name
:
extra_modules
.
add
(
name
.
split
(
".modules_to_save"
,
maxsplit
=
1
)[
0
].
split
(
"."
)[
-
1
])
assert
param
.
requires_grad
is
True
assert
param
.
dtype
==
torch
.
float32
else
:
assert
param
.
requires_grad
is
False
assert
param
.
dtype
==
torch
.
float16
return
linear_modules
,
extra_modules
def
load_train_model
(
add_valuehead
:
bool
=
False
,
**
kwargs
)
->
"PreTrainedModel"
:
model_args
,
_
,
_
,
finetuning_args
,
_
=
get_train_args
(
kwargs
)
tokenizer
=
load_tokenizer
(
model_args
)[
"tokenizer"
]
return
load_model
(
tokenizer
,
model_args
,
finetuning_args
,
is_trainable
=
True
,
add_valuehead
=
add_valuehead
)
def
load_infer_model
(
add_valuehead
:
bool
=
False
,
**
kwargs
)
->
"PreTrainedModel"
:
model_args
,
_
,
finetuning_args
,
_
=
get_infer_args
(
kwargs
)
tokenizer
=
load_tokenizer
(
model_args
)[
"tokenizer"
]
return
load_model
(
tokenizer
,
model_args
,
finetuning_args
,
is_trainable
=
False
,
add_valuehead
=
add_valuehead
)
def
load_reference_model
(
model_path
:
str
,
lora_path
:
Optional
[
str
]
=
None
,
use_lora
:
bool
=
False
,
use_pissa
:
bool
=
False
,
is_trainable
:
bool
=
False
,
add_valuehead
:
bool
=
False
,
)
->
Union
[
"PreTrainedModel"
,
"LoraModel"
]:
if
add_valuehead
:
model
:
"AutoModelForCausalLMWithValueHead"
=
AutoModelForCausalLMWithValueHead
.
from_pretrained
(
model_path
,
torch_dtype
=
torch
.
float16
,
device_map
=
get_current_device
()
)
if
not
is_trainable
:
model
.
v_head
=
model
.
v_head
.
to
(
torch
.
float16
)
return
model
model
=
AutoModelForCausalLM
.
from_pretrained
(
model_path
,
torch_dtype
=
torch
.
float16
,
device_map
=
get_current_device
()
)
if
use_lora
or
use_pissa
:
model
=
PeftModel
.
from_pretrained
(
model
,
lora_path
,
subfolder
=
"pissa_init"
if
use_pissa
else
None
,
is_trainable
=
is_trainable
)
for
param
in
filter
(
lambda
p
:
p
.
requires_grad
,
model
.
parameters
()):
param
.
data
=
param
.
data
.
to
(
torch
.
float32
)
return
model
def
load_train_dataset
(
**
kwargs
)
->
"Dataset"
:
model_args
,
data_args
,
training_args
,
_
,
_
=
get_train_args
(
kwargs
)
tokenizer_module
=
load_tokenizer
(
model_args
)
dataset_module
=
get_dataset
(
model_args
,
data_args
,
training_args
,
stage
=
kwargs
[
"stage"
],
**
tokenizer_module
)
return
dataset_module
[
"train_dataset"
]
def
patch_valuehead_model
():
def
post_init
(
self
:
"AutoModelForCausalLMWithValueHead"
,
state_dict
:
Dict
[
str
,
"torch.Tensor"
])
->
None
:
state_dict
=
{
k
[
7
:]:
state_dict
[
k
]
for
k
in
state_dict
.
keys
()
if
k
.
startswith
(
"v_head."
)}
self
.
v_head
.
load_state_dict
(
state_dict
,
strict
=
False
)
del
state_dict
AutoModelForCausalLMWithValueHead
.
post_init
=
post_init
LLaMA-Factory/src/llamafactory/train/trainer_utils.py
0 → 100644
View file @
032b90a1
# Copyright 2024 HuggingFace Inc. and the LlamaFactory team.
#
# This code is inspired by the original GaLore's implementation: https://github.com/jiaweizzhao/GaLore
# and the original LoRA+'s implementation: https://github.com/nikhil-ghosh-berkeley/loraplus
# and the original BAdam's implementation: https://github.com/Ledzy/BAdam
# and the HuggingFace's TRL library: https://github.com/huggingface/trl
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
typing
import
TYPE_CHECKING
,
Callable
,
Dict
,
List
,
Optional
,
Tuple
,
Union
import
torch
from
transformers
import
Trainer
from
transformers.integrations
import
is_deepspeed_zero3_enabled
from
transformers.optimization
import
get_scheduler
from
transformers.pytorch_utils
import
ALL_LAYERNORM_LAYERS
from
transformers.trainer_pt_utils
import
get_parameter_names
from
..extras.constants
import
IGNORE_INDEX
from
..extras.logging
import
get_logger
from
..extras.packages
import
is_galore_available
from
..hparams
import
FinetuningArguments
,
ModelArguments
from
..model
import
find_all_linear_modules
,
load_model
,
load_tokenizer
,
load_valuehead_params
if
is_galore_available
():
from
galore_torch
import
GaLoreAdafactor
,
GaLoreAdamW
,
GaLoreAdamW8bit
if
TYPE_CHECKING
:
from
transformers
import
PreTrainedModel
,
Seq2SeqTrainingArguments
from
trl
import
AutoModelForCausalLMWithValueHead
from
..hparams
import
DataArguments
logger
=
get_logger
(
__name__
)
class
DummyOptimizer
(
torch
.
optim
.
Optimizer
):
r
"""
A dummy optimizer used for the GaLore algorithm.
"""
def
__init__
(
self
,
lr
:
float
=
1e-3
,
optimizer_dict
:
Optional
[
Dict
[
"torch.nn.Parameter"
,
"torch.optim.Optimizer"
]]
=
None
)
->
None
:
dummy_tensor
=
torch
.
randn
(
1
,
1
)
self
.
optimizer_dict
=
optimizer_dict
super
().
__init__
([
dummy_tensor
],
{
"lr"
:
lr
})
def
zero_grad
(
self
,
set_to_none
:
bool
=
True
)
->
None
:
pass
def
step
(
self
,
closure
:
Optional
[
Callable
[[],
float
]]
=
None
)
->
Optional
[
float
]:
pass
def
create_modelcard_and_push
(
trainer
:
"Trainer"
,
model_args
:
"ModelArguments"
,
data_args
:
"DataArguments"
,
training_args
:
"Seq2SeqTrainingArguments"
,
finetuning_args
:
"FinetuningArguments"
,
)
->
None
:
kwargs
=
{
"tasks"
:
"text-generation"
,
"finetuned_from"
:
model_args
.
model_name_or_path
,
"tags"
:
[
"llama-factory"
,
finetuning_args
.
finetuning_type
],
}
if
data_args
.
dataset
is
not
None
:
kwargs
[
"dataset"
]
=
data_args
.
dataset
if
model_args
.
use_unsloth
:
kwargs
[
"tags"
]
=
kwargs
[
"tags"
]
+
[
"unsloth"
]
if
not
training_args
.
do_train
:
pass
elif
training_args
.
push_to_hub
:
trainer
.
push_to_hub
(
**
kwargs
)
else
:
trainer
.
create_model_card
(
license
=
"other"
,
**
kwargs
)
# prevent from connecting to hub
def
create_ref_model
(
model_args
:
"ModelArguments"
,
finetuning_args
:
"FinetuningArguments"
,
add_valuehead
:
bool
=
False
)
->
Optional
[
Union
[
"PreTrainedModel"
,
"AutoModelForCausalLMWithValueHead"
]]:
r
"""
Creates reference model for PPO/DPO training. Evaluation mode is not supported.
The valuehead parameter is randomly initialized since it is useless for PPO training.
"""
if
finetuning_args
.
ref_model
is
not
None
:
ref_model_args
=
ModelArguments
.
copyfrom
(
model_args
,
model_name_or_path
=
finetuning_args
.
ref_model
,
adapter_name_or_path
=
finetuning_args
.
ref_model_adapters
,
quantization_bit
=
finetuning_args
.
ref_model_quantization_bit
,
)
ref_finetuning_args
=
FinetuningArguments
()
tokenizer
=
load_tokenizer
(
ref_model_args
)[
"tokenizer"
]
ref_model
=
load_model
(
tokenizer
,
ref_model_args
,
ref_finetuning_args
,
is_trainable
=
False
,
add_valuehead
=
add_valuehead
)
logger
.
info
(
"Created reference model from {}"
.
format
(
finetuning_args
.
ref_model
))
else
:
if
finetuning_args
.
finetuning_type
==
"lora"
:
ref_model
=
None
else
:
ref_model_args
=
ModelArguments
.
copyfrom
(
model_args
)
ref_finetuning_args
=
FinetuningArguments
()
tokenizer
=
load_tokenizer
(
ref_model_args
)[
"tokenizer"
]
ref_model
=
load_model
(
tokenizer
,
ref_model_args
,
ref_finetuning_args
,
is_trainable
=
False
,
add_valuehead
=
add_valuehead
)
logger
.
info
(
"Created reference model from the model itself."
)
return
ref_model
def
create_reward_model
(
model
:
"AutoModelForCausalLMWithValueHead"
,
model_args
:
"ModelArguments"
,
finetuning_args
:
"FinetuningArguments"
)
->
Optional
[
"AutoModelForCausalLMWithValueHead"
]:
r
"""
Creates reward model for PPO training.
"""
if
finetuning_args
.
reward_model_type
==
"api"
:
assert
finetuning_args
.
reward_model
.
startswith
(
"http"
),
"Please provide full url."
logger
.
info
(
"Use reward server {}"
.
format
(
finetuning_args
.
reward_model
))
return
finetuning_args
.
reward_model
elif
finetuning_args
.
reward_model_type
==
"lora"
:
model
.
pretrained_model
.
load_adapter
(
finetuning_args
.
reward_model
,
"reward"
)
for
name
,
param
in
model
.
named_parameters
():
# https://github.com/huggingface/peft/issues/1090
if
"default"
in
name
:
param
.
data
=
param
.
data
.
to
(
torch
.
float32
)
# trainable params should in fp32
vhead_params
=
load_valuehead_params
(
finetuning_args
.
reward_model
,
model_args
)
assert
vhead_params
is
not
None
,
"Reward model is not correctly loaded."
model
.
register_buffer
(
"reward_head_weight"
,
vhead_params
[
"v_head.summary.weight"
],
persistent
=
False
)
model
.
register_buffer
(
"reward_head_bias"
,
vhead_params
[
"v_head.summary.bias"
],
persistent
=
False
)
model
.
register_buffer
(
"default_head_weight"
,
torch
.
zeros_like
(
vhead_params
[
"v_head.summary.weight"
]),
persistent
=
False
)
model
.
register_buffer
(
"default_head_bias"
,
torch
.
zeros_like
(
vhead_params
[
"v_head.summary.bias"
]),
persistent
=
False
)
logger
.
info
(
"Loaded adapter weights of reward model from {}"
.
format
(
finetuning_args
.
reward_model
))
return
None
else
:
reward_model_args
=
ModelArguments
.
copyfrom
(
model_args
,
model_name_or_path
=
finetuning_args
.
reward_model
,
adapter_name_or_path
=
finetuning_args
.
reward_model_adapters
,
quantization_bit
=
finetuning_args
.
reward_model_quantization_bit
,
)
reward_finetuning_args
=
FinetuningArguments
()
tokenizer
=
load_tokenizer
(
reward_model_args
)[
"tokenizer"
]
reward_model
=
load_model
(
tokenizer
,
reward_model_args
,
reward_finetuning_args
,
is_trainable
=
False
,
add_valuehead
=
True
)
logger
.
info
(
"Loaded full weights of reward model from {}"
.
format
(
finetuning_args
.
reward_model
))
logger
.
warning
(
"Please ensure the ppo model and reward model share SAME tokenizer and vocabulary."
)
return
reward_model
def
_get_decay_parameter_names
(
model
:
"PreTrainedModel"
)
->
List
[
str
]:
r
"""
Returns a list of names of parameters with weight decay. (weights in non-layernorm layers)
"""
decay_parameters
=
get_parameter_names
(
model
,
ALL_LAYERNORM_LAYERS
)
decay_parameters
=
[
name
for
name
in
decay_parameters
if
"bias"
not
in
name
]
return
decay_parameters
def
_create_galore_optimizer
(
model
:
"PreTrainedModel"
,
training_args
:
"Seq2SeqTrainingArguments"
,
finetuning_args
:
"FinetuningArguments"
,
)
->
"torch.optim.Optimizer"
:
if
len
(
finetuning_args
.
galore_target
)
==
1
and
finetuning_args
.
galore_target
[
0
]
==
"all"
:
galore_targets
=
find_all_linear_modules
(
model
,
finetuning_args
.
freeze_vision_tower
)
else
:
galore_targets
=
finetuning_args
.
galore_target
galore_params
:
List
[
"torch.nn.Parameter"
]
=
[]
for
name
,
module
in
model
.
named_modules
():
if
isinstance
(
module
,
torch
.
nn
.
Linear
)
and
any
(
target
in
name
for
target
in
galore_targets
):
for
param
in
module
.
parameters
():
if
param
.
requires_grad
and
len
(
param
.
shape
)
>
1
:
galore_params
.
append
(
param
)
galore_kwargs
=
{
"rank"
:
finetuning_args
.
galore_rank
,
"update_proj_gap"
:
finetuning_args
.
galore_update_interval
,
"scale"
:
finetuning_args
.
galore_scale
,
"proj_type"
:
finetuning_args
.
galore_proj_type
,
}
id_galore_params
=
{
id
(
param
)
for
param
in
galore_params
}
decay_params
,
nodecay_params
=
[],
[]
# they are non-galore parameters
trainable_params
:
List
[
"torch.nn.Parameter"
]
=
[]
# galore_params + decay_params + nodecay_params
decay_param_names
=
_get_decay_parameter_names
(
model
)
for
name
,
param
in
model
.
named_parameters
():
if
param
.
requires_grad
:
trainable_params
.
append
(
param
)
if
id
(
param
)
not
in
id_galore_params
:
if
name
in
decay_param_names
:
decay_params
.
append
(
param
)
else
:
nodecay_params
.
append
(
param
)
_
,
optim_kwargs
=
Trainer
.
get_optimizer_cls_and_kwargs
(
training_args
)
if
training_args
.
optim
==
"adamw_torch"
:
optim_class
=
GaLoreAdamW
elif
training_args
.
optim
in
[
"adamw_bnb_8bit"
,
"adamw_8bit"
,
"paged_adamw_8bit"
]:
optim_class
=
GaLoreAdamW8bit
elif
training_args
.
optim
==
"adafactor"
:
optim_class
=
GaLoreAdafactor
else
:
raise
NotImplementedError
(
"Unknow optim: {}"
.
format
(
training_args
.
optim
))
if
finetuning_args
.
galore_layerwise
:
if
training_args
.
gradient_accumulation_steps
!=
1
:
raise
ValueError
(
"Per-layer GaLore does not support gradient accumulation."
)
optimizer_dict
:
Dict
[
"torch.Tensor"
,
"torch.optim.Optimizer"
]
=
{}
for
param
in
nodecay_params
:
param_groups
=
[
dict
(
params
=
[
param
],
weight_decay
=
0.0
)]
optimizer_dict
[
param
]
=
optim_class
(
param_groups
,
**
optim_kwargs
)
for
param
in
decay_params
:
param_groups
=
[
dict
(
params
=
[
param
],
weight_decay
=
training_args
.
weight_decay
)]
optimizer_dict
[
param
]
=
optim_class
(
param_groups
,
**
optim_kwargs
)
for
param
in
galore_params
:
# galore params have weight decay
param_groups
=
[
dict
(
params
=
[
param
],
weight_decay
=
training_args
.
weight_decay
,
**
galore_kwargs
)]
optimizer_dict
[
param
]
=
optim_class
(
param_groups
,
**
optim_kwargs
)
def
optimizer_hook
(
param
:
"torch.nn.Parameter"
):
if
param
.
grad
is
not
None
:
optimizer_dict
[
param
].
step
()
optimizer_dict
[
param
].
zero_grad
()
for
param
in
trainable_params
:
param
.
register_post_accumulate_grad_hook
(
optimizer_hook
)
optimizer
=
DummyOptimizer
(
lr
=
training_args
.
learning_rate
,
optimizer_dict
=
optimizer_dict
)
else
:
param_groups
=
[
dict
(
params
=
nodecay_params
,
weight_decay
=
0.0
),
dict
(
params
=
decay_params
,
weight_decay
=
training_args
.
weight_decay
),
dict
(
params
=
galore_params
,
weight_decay
=
training_args
.
weight_decay
,
**
galore_kwargs
),
]
optimizer
=
optim_class
(
param_groups
,
**
optim_kwargs
)
logger
.
info
(
"Using GaLore optimizer, may cause hanging at the start of training, wait patiently."
)
return
optimizer
def
_create_loraplus_optimizer
(
model
:
"PreTrainedModel"
,
training_args
:
"Seq2SeqTrainingArguments"
,
finetuning_args
:
"FinetuningArguments"
,
)
->
"torch.optim.Optimizer"
:
default_lr
=
training_args
.
learning_rate
loraplus_lr
=
training_args
.
learning_rate
*
finetuning_args
.
loraplus_lr_ratio
embedding_lr
=
finetuning_args
.
loraplus_lr_embedding
decay_param_names
=
_get_decay_parameter_names
(
model
)
param_dict
:
Dict
[
str
,
List
[
"torch.nn.Parameter"
]]
=
{
"lora_a"
:
[],
"lora_b"
:
[],
"lora_b_nodecay"
:
[],
"embedding"
:
[],
}
for
name
,
param
in
model
.
named_parameters
():
if
param
.
requires_grad
:
if
"lora_embedding_B"
in
name
:
param_dict
[
"embedding"
].
append
(
param
)
elif
"lora_B"
in
name
or
param
.
ndim
==
1
:
if
name
in
decay_param_names
:
param_dict
[
"lora_b"
].
append
(
param
)
else
:
param_dict
[
"lora_b_nodecay"
].
append
(
param
)
else
:
param_dict
[
"lora_a"
].
append
(
param
)
optim_class
,
optim_kwargs
=
Trainer
.
get_optimizer_cls_and_kwargs
(
training_args
)
param_groups
=
[
dict
(
params
=
param_dict
[
"lora_a"
],
lr
=
default_lr
,
weight_decay
=
training_args
.
weight_decay
),
dict
(
params
=
param_dict
[
"lora_b"
],
lr
=
loraplus_lr
,
weight_decay
=
training_args
.
weight_decay
),
dict
(
params
=
param_dict
[
"lora_b_nodecay"
],
lr
=
loraplus_lr
,
weight_decay
=
0.0
),
dict
(
params
=
param_dict
[
"embedding"
],
lr
=
embedding_lr
,
weight_decay
=
training_args
.
weight_decay
),
]
optimizer
=
optim_class
(
param_groups
,
**
optim_kwargs
)
logger
.
info
(
"Using LoRA+ optimizer with loraplus lr ratio {:.2f}."
.
format
(
finetuning_args
.
loraplus_lr_ratio
))
return
optimizer
def
_create_badam_optimizer
(
model
:
"PreTrainedModel"
,
training_args
:
"Seq2SeqTrainingArguments"
,
finetuning_args
:
"FinetuningArguments"
,
)
->
"torch.optim.Optimizer"
:
decay_params
,
nodecay_params
=
[],
[]
decay_param_names
=
_get_decay_parameter_names
(
model
)
for
name
,
param
in
model
.
named_parameters
():
if
param
.
requires_grad
:
if
name
in
decay_param_names
:
decay_params
.
append
(
param
)
else
:
nodecay_params
.
append
(
param
)
optim_class
,
optim_kwargs
=
Trainer
.
get_optimizer_cls_and_kwargs
(
training_args
)
param_groups
=
[
dict
(
params
=
nodecay_params
,
weight_decay
=
0.0
),
dict
(
params
=
decay_params
,
weight_decay
=
training_args
.
weight_decay
),
]
if
finetuning_args
.
badam_mode
==
"layer"
:
from
badam
import
BlockOptimizer
base_optimizer
=
optim_class
(
param_groups
,
**
optim_kwargs
)
optimizer
=
BlockOptimizer
(
base_optimizer
=
base_optimizer
,
named_parameters_list
=
list
(
model
.
named_parameters
()),
block_prefix_list
=
None
,
switch_block_every
=
finetuning_args
.
badam_switch_interval
,
start_block
=
finetuning_args
.
badam_start_block
,
switch_mode
=
finetuning_args
.
badam_switch_mode
,
verbose
=
finetuning_args
.
badam_verbose
,
ds_zero3_enabled
=
is_deepspeed_zero3_enabled
(),
)
logger
.
info
(
f
"Using BAdam optimizer with layer-wise update, switch mode is
{
finetuning_args
.
badam_switch_mode
}
, "
f
"switch block every
{
finetuning_args
.
badam_switch_interval
}
steps, "
f
"default start block is
{
finetuning_args
.
badam_start_block
}
"
)
elif
finetuning_args
.
badam_mode
==
"ratio"
:
from
badam
import
BlockOptimizerRatio
assert
finetuning_args
.
badam_update_ratio
>
1e-6
optimizer
=
BlockOptimizerRatio
(
param_groups
=
param_groups
,
named_parameters_list
=
list
(
model
.
named_parameters
()),
update_ratio
=
finetuning_args
.
badam_update_ratio
,
mask_mode
=
finetuning_args
.
badam_mask_mode
,
verbose
=
finetuning_args
.
badam_verbose
,
include_embedding
=
False
,
**
optim_kwargs
,
)
logger
.
info
(
f
"Using BAdam optimizer with ratio-based update, update ratio is
{
finetuning_args
.
badam_update_ratio
}
, "
f
"mask mode is
{
finetuning_args
.
badam_mask_mode
}
"
)
return
optimizer
def
create_custom_optimzer
(
model
:
"PreTrainedModel"
,
training_args
:
"Seq2SeqTrainingArguments"
,
finetuning_args
:
"FinetuningArguments"
,
)
->
Optional
[
"torch.optim.Optimizer"
]:
if
finetuning_args
.
use_galore
:
return
_create_galore_optimizer
(
model
,
training_args
,
finetuning_args
)
if
finetuning_args
.
loraplus_lr_ratio
is
not
None
:
return
_create_loraplus_optimizer
(
model
,
training_args
,
finetuning_args
)
if
finetuning_args
.
use_badam
:
return
_create_badam_optimizer
(
model
,
training_args
,
finetuning_args
)
def
create_custom_scheduler
(
training_args
:
"Seq2SeqTrainingArguments"
,
num_training_steps
:
int
,
optimizer
:
Optional
[
"torch.optim.Optimizer"
]
=
None
,
)
->
None
:
if
optimizer
is
not
None
and
isinstance
(
optimizer
,
DummyOptimizer
):
optimizer_dict
=
optimizer
.
optimizer_dict
scheduler_dict
:
Dict
[
"torch.nn.Parameter"
,
"torch.optim.lr_scheduler.LRScheduler"
]
=
{}
for
param
in
optimizer_dict
.
keys
():
scheduler_dict
[
param
]
=
get_scheduler
(
training_args
.
lr_scheduler_type
,
optimizer
=
optimizer_dict
[
param
],
num_warmup_steps
=
training_args
.
get_warmup_steps
(
num_training_steps
),
num_training_steps
=
num_training_steps
,
scheduler_specific_kwargs
=
training_args
.
lr_scheduler_kwargs
,
)
def
scheduler_hook
(
param
:
"torch.nn.Parameter"
):
scheduler_dict
[
param
].
step
()
for
param
in
optimizer_dict
.
keys
():
param
.
register_post_accumulate_grad_hook
(
scheduler_hook
)
def
get_batch_logps
(
logits
:
"torch.Tensor"
,
labels
:
"torch.Tensor"
,
label_pad_token_id
:
int
=
IGNORE_INDEX
)
->
Tuple
[
"torch.Tensor"
,
"torch.Tensor"
]:
r
"""
Computes the log probabilities of the given labels under the given logits.
Returns:
logps: A tensor of shape (batch_size,) containing the sum of log probabilities.
valid_length: A tensor of shape (batch_size,) containing the number of non-masked tokens.
"""
if
logits
.
shape
[:
-
1
]
!=
labels
.
shape
:
raise
ValueError
(
"Logits (batchsize x seqlen) and labels must have the same shape."
)
labels
=
labels
[:,
1
:].
clone
()
logits
=
logits
[:,
:
-
1
,
:]
loss_mask
=
labels
!=
label_pad_token_id
labels
[
labels
==
label_pad_token_id
]
=
0
# dummy token
per_token_logps
=
torch
.
gather
(
logits
.
log_softmax
(
-
1
),
dim
=
2
,
index
=
labels
.
unsqueeze
(
2
)).
squeeze
(
2
)
return
(
per_token_logps
*
loss_mask
).
sum
(
-
1
),
loss_mask
.
sum
(
-
1
)
LLaMA-Factory/src/llamafactory/train/tuner.py
0 → 100644
View file @
032b90a1
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
os
import
shutil
from
typing
import
TYPE_CHECKING
,
Any
,
Dict
,
List
,
Optional
import
torch
from
transformers
import
PreTrainedModel
from
..data
import
get_template_and_fix_tokenizer
from
..extras.constants
import
V_HEAD_SAFE_WEIGHTS_NAME
,
V_HEAD_WEIGHTS_NAME
from
..extras.logging
import
get_logger
from
..hparams
import
get_infer_args
,
get_train_args
from
..model
import
load_model
,
load_tokenizer
from
.callbacks
import
LogCallback
from
.dpo
import
run_dpo
from
.kto
import
run_kto
from
.ppo
import
run_ppo
from
.pt
import
run_pt
from
.rm
import
run_rm
from
.sft
import
run_sft
if
TYPE_CHECKING
:
from
transformers
import
TrainerCallback
logger
=
get_logger
(
__name__
)
def
run_exp
(
args
:
Optional
[
Dict
[
str
,
Any
]]
=
None
,
callbacks
:
List
[
"TrainerCallback"
]
=
[])
->
None
:
callbacks
.
append
(
LogCallback
())
model_args
,
data_args
,
training_args
,
finetuning_args
,
generating_args
=
get_train_args
(
args
)
if
finetuning_args
.
stage
==
"pt"
:
run_pt
(
model_args
,
data_args
,
training_args
,
finetuning_args
,
callbacks
)
elif
finetuning_args
.
stage
==
"sft"
:
run_sft
(
model_args
,
data_args
,
training_args
,
finetuning_args
,
generating_args
,
callbacks
)
elif
finetuning_args
.
stage
==
"rm"
:
run_rm
(
model_args
,
data_args
,
training_args
,
finetuning_args
,
callbacks
)
elif
finetuning_args
.
stage
==
"ppo"
:
run_ppo
(
model_args
,
data_args
,
training_args
,
finetuning_args
,
generating_args
,
callbacks
)
elif
finetuning_args
.
stage
==
"dpo"
:
run_dpo
(
model_args
,
data_args
,
training_args
,
finetuning_args
,
callbacks
)
elif
finetuning_args
.
stage
==
"kto"
:
run_kto
(
model_args
,
data_args
,
training_args
,
finetuning_args
,
callbacks
)
else
:
raise
ValueError
(
"Unknown task: {}."
.
format
(
finetuning_args
.
stage
))
def
export_model
(
args
:
Optional
[
Dict
[
str
,
Any
]]
=
None
)
->
None
:
model_args
,
data_args
,
finetuning_args
,
_
=
get_infer_args
(
args
)
if
model_args
.
export_dir
is
None
:
raise
ValueError
(
"Please specify `export_dir` to save model."
)
if
model_args
.
adapter_name_or_path
is
not
None
and
model_args
.
export_quantization_bit
is
not
None
:
raise
ValueError
(
"Please merge adapters before quantizing the model."
)
tokenizer_module
=
load_tokenizer
(
model_args
)
tokenizer
=
tokenizer_module
[
"tokenizer"
]
processor
=
tokenizer_module
[
"processor"
]
get_template_and_fix_tokenizer
(
tokenizer
,
data_args
.
template
)
model
=
load_model
(
tokenizer
,
model_args
,
finetuning_args
)
# must after fixing tokenizer to resize vocab
if
getattr
(
model
,
"quantization_method"
,
None
)
is
not
None
and
model_args
.
adapter_name_or_path
is
not
None
:
raise
ValueError
(
"Cannot merge adapters to a quantized model."
)
if
not
isinstance
(
model
,
PreTrainedModel
):
raise
ValueError
(
"The model is not a `PreTrainedModel`, export aborted."
)
if
getattr
(
model
,
"quantization_method"
,
None
)
is
not
None
:
# quantized model adopts float16 type
setattr
(
model
.
config
,
"torch_dtype"
,
torch
.
float16
)
else
:
if
model_args
.
infer_dtype
==
"auto"
:
output_dtype
=
getattr
(
model
.
config
,
"torch_dtype"
,
torch
.
float16
)
else
:
output_dtype
=
getattr
(
torch
,
model_args
.
infer_dtype
)
setattr
(
model
.
config
,
"torch_dtype"
,
output_dtype
)
model
=
model
.
to
(
output_dtype
)
logger
.
info
(
"Convert model dtype to: {}."
.
format
(
output_dtype
))
model
.
save_pretrained
(
save_directory
=
model_args
.
export_dir
,
max_shard_size
=
"{}GB"
.
format
(
model_args
.
export_size
),
safe_serialization
=
(
not
model_args
.
export_legacy_format
),
)
if
model_args
.
export_hub_model_id
is
not
None
:
model
.
push_to_hub
(
model_args
.
export_hub_model_id
,
token
=
model_args
.
hf_hub_token
,
max_shard_size
=
"{}GB"
.
format
(
model_args
.
export_size
),
safe_serialization
=
(
not
model_args
.
export_legacy_format
),
)
if
finetuning_args
.
stage
==
"rm"
:
if
model_args
.
adapter_name_or_path
is
not
None
:
vhead_path
=
model_args
.
adapter_name_or_path
[
-
1
]
else
:
vhead_path
=
model_args
.
model_name_or_path
if
os
.
path
.
exists
(
os
.
path
.
join
(
vhead_path
,
V_HEAD_SAFE_WEIGHTS_NAME
)):
shutil
.
copy
(
os
.
path
.
join
(
vhead_path
,
V_HEAD_SAFE_WEIGHTS_NAME
),
os
.
path
.
join
(
model_args
.
export_dir
,
V_HEAD_SAFE_WEIGHTS_NAME
),
)
logger
.
info
(
"Copied valuehead to {}."
.
format
(
model_args
.
export_dir
))
elif
os
.
path
.
exists
(
os
.
path
.
join
(
vhead_path
,
V_HEAD_WEIGHTS_NAME
)):
shutil
.
copy
(
os
.
path
.
join
(
vhead_path
,
V_HEAD_WEIGHTS_NAME
),
os
.
path
.
join
(
model_args
.
export_dir
,
V_HEAD_WEIGHTS_NAME
),
)
logger
.
info
(
"Copied valuehead to {}."
.
format
(
model_args
.
export_dir
))
try
:
tokenizer
.
padding_side
=
"left"
# restore padding side
tokenizer
.
init_kwargs
[
"padding_side"
]
=
"left"
tokenizer
.
save_pretrained
(
model_args
.
export_dir
)
if
model_args
.
export_hub_model_id
is
not
None
:
tokenizer
.
push_to_hub
(
model_args
.
export_hub_model_id
,
token
=
model_args
.
hf_hub_token
)
if
model_args
.
visual_inputs
and
processor
is
not
None
:
getattr
(
processor
,
"image_processor"
).
save_pretrained
(
model_args
.
export_dir
)
if
model_args
.
export_hub_model_id
is
not
None
:
getattr
(
processor
,
"image_processor"
).
push_to_hub
(
model_args
.
export_hub_model_id
,
token
=
model_args
.
hf_hub_token
)
except
Exception
:
logger
.
warning
(
"Cannot save tokenizer, please copy the files manually."
)
LLaMA-Factory/src/llamafactory/webui/__init__.py
0 → 100644
View file @
032b90a1
LLaMA-Factory/src/llamafactory/webui/chatter.py
0 → 100644
View file @
032b90a1
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
json
import
os
from
typing
import
TYPE_CHECKING
,
Dict
,
Generator
,
List
,
Optional
,
Sequence
,
Tuple
from
numpy.typing
import
NDArray
from
..chat
import
ChatModel
from
..data
import
Role
from
..extras.constants
import
PEFT_METHODS
from
..extras.misc
import
torch_gc
from
..extras.packages
import
is_gradio_available
from
.common
import
QUANTIZATION_BITS
,
get_save_dir
from
.locales
import
ALERTS
if
TYPE_CHECKING
:
from
..chat
import
BaseEngine
from
.manager
import
Manager
if
is_gradio_available
():
import
gradio
as
gr
class
WebChatModel
(
ChatModel
):
def
__init__
(
self
,
manager
:
"Manager"
,
demo_mode
:
bool
=
False
,
lazy_init
:
bool
=
True
)
->
None
:
self
.
manager
=
manager
self
.
demo_mode
=
demo_mode
self
.
engine
:
Optional
[
"BaseEngine"
]
=
None
if
not
lazy_init
:
# read arguments from command line
super
().
__init__
()
if
demo_mode
and
os
.
environ
.
get
(
"DEMO_MODEL"
)
and
os
.
environ
.
get
(
"DEMO_TEMPLATE"
):
# load demo model
model_name_or_path
=
os
.
environ
.
get
(
"DEMO_MODEL"
)
template
=
os
.
environ
.
get
(
"DEMO_TEMPLATE"
)
infer_backend
=
os
.
environ
.
get
(
"DEMO_BACKEND"
,
"huggingface"
)
super
().
__init__
(
dict
(
model_name_or_path
=
model_name_or_path
,
template
=
template
,
infer_backend
=
infer_backend
)
)
@
property
def
loaded
(
self
)
->
bool
:
return
self
.
engine
is
not
None
def
load_model
(
self
,
data
)
->
Generator
[
str
,
None
,
None
]:
get
=
lambda
elem_id
:
data
[
self
.
manager
.
get_elem_by_id
(
elem_id
)]
lang
,
model_name
,
model_path
=
get
(
"top.lang"
),
get
(
"top.model_name"
),
get
(
"top.model_path"
)
finetuning_type
,
checkpoint_path
=
get
(
"top.finetuning_type"
),
get
(
"top.checkpoint_path"
)
error
=
""
if
self
.
loaded
:
error
=
ALERTS
[
"err_exists"
][
lang
]
elif
not
model_name
:
error
=
ALERTS
[
"err_no_model"
][
lang
]
elif
not
model_path
:
error
=
ALERTS
[
"err_no_path"
][
lang
]
elif
self
.
demo_mode
:
error
=
ALERTS
[
"err_demo"
][
lang
]
if
error
:
gr
.
Warning
(
error
)
yield
error
return
if
get
(
"top.quantization_bit"
)
in
QUANTIZATION_BITS
:
quantization_bit
=
int
(
get
(
"top.quantization_bit"
))
else
:
quantization_bit
=
None
yield
ALERTS
[
"info_loading"
][
lang
]
args
=
dict
(
model_name_or_path
=
model_path
,
finetuning_type
=
finetuning_type
,
quantization_bit
=
quantization_bit
,
quantization_method
=
get
(
"top.quantization_method"
),
template
=
get
(
"top.template"
),
flash_attn
=
"fa2"
if
get
(
"top.booster"
)
==
"flashattn2"
else
"auto"
,
use_unsloth
=
(
get
(
"top.booster"
)
==
"unsloth"
),
visual_inputs
=
get
(
"top.visual_inputs"
),
rope_scaling
=
get
(
"top.rope_scaling"
)
if
get
(
"top.rope_scaling"
)
in
[
"linear"
,
"dynamic"
]
else
None
,
infer_backend
=
get
(
"infer.infer_backend"
),
infer_dtype
=
get
(
"infer.infer_dtype"
),
)
if
checkpoint_path
:
if
finetuning_type
in
PEFT_METHODS
:
# list
args
[
"adapter_name_or_path"
]
=
","
.
join
(
[
get_save_dir
(
model_name
,
finetuning_type
,
adapter
)
for
adapter
in
checkpoint_path
]
)
else
:
# str
args
[
"model_name_or_path"
]
=
get_save_dir
(
model_name
,
finetuning_type
,
checkpoint_path
)
super
().
__init__
(
args
)
yield
ALERTS
[
"info_loaded"
][
lang
]
def
unload_model
(
self
,
data
)
->
Generator
[
str
,
None
,
None
]:
lang
=
data
[
self
.
manager
.
get_elem_by_id
(
"top.lang"
)]
if
self
.
demo_mode
:
gr
.
Warning
(
ALERTS
[
"err_demo"
][
lang
])
yield
ALERTS
[
"err_demo"
][
lang
]
return
yield
ALERTS
[
"info_unloading"
][
lang
]
self
.
engine
=
None
torch_gc
()
yield
ALERTS
[
"info_unloaded"
][
lang
]
def
append
(
self
,
chatbot
:
List
[
List
[
Optional
[
str
]]],
messages
:
Sequence
[
Dict
[
str
,
str
]],
role
:
str
,
query
:
str
,
)
->
Tuple
[
List
[
List
[
Optional
[
str
]]],
List
[
Dict
[
str
,
str
]],
str
]:
return
chatbot
+
[[
query
,
None
]],
messages
+
[{
"role"
:
role
,
"content"
:
query
}],
""
def
stream
(
self
,
chatbot
:
List
[
List
[
Optional
[
str
]]],
messages
:
Sequence
[
Dict
[
str
,
str
]],
system
:
str
,
tools
:
str
,
image
:
Optional
[
NDArray
],
max_new_tokens
:
int
,
top_p
:
float
,
temperature
:
float
,
)
->
Generator
[
Tuple
[
List
[
List
[
Optional
[
str
]]],
List
[
Dict
[
str
,
str
]]],
None
,
None
]:
chatbot
[
-
1
][
1
]
=
""
response
=
""
for
new_text
in
self
.
stream_chat
(
messages
,
system
,
tools
,
image
,
max_new_tokens
=
max_new_tokens
,
top_p
=
top_p
,
temperature
=
temperature
):
response
+=
new_text
if
tools
:
result
=
self
.
engine
.
template
.
extract_tool
(
response
)
else
:
result
=
response
if
isinstance
(
result
,
list
):
tool_calls
=
[{
"name"
:
tool
[
0
],
"arguments"
:
json
.
loads
(
tool
[
1
])}
for
tool
in
result
]
tool_calls
=
json
.
dumps
(
tool_calls
,
indent
=
4
,
ensure_ascii
=
False
)
output_messages
=
messages
+
[{
"role"
:
Role
.
FUNCTION
.
value
,
"content"
:
tool_calls
}]
bot_text
=
"```json
\n
"
+
tool_calls
+
"
\n
```"
else
:
output_messages
=
messages
+
[{
"role"
:
Role
.
ASSISTANT
.
value
,
"content"
:
result
}]
bot_text
=
result
chatbot
[
-
1
][
1
]
=
bot_text
yield
chatbot
,
output_messages
LLaMA-Factory/src/llamafactory/webui/common.py
0 → 100644
View file @
032b90a1
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
json
import
os
from
collections
import
defaultdict
from
typing
import
Any
,
Dict
,
Optional
,
Tuple
from
yaml
import
safe_dump
,
safe_load
from
..extras.constants
import
(
CHECKPOINT_NAMES
,
DATA_CONFIG
,
DEFAULT_TEMPLATE
,
PEFT_METHODS
,
STAGES_USE_PAIR_DATA
,
SUPPORTED_MODELS
,
TRAINING_STAGES
,
VISION_MODELS
,
DownloadSource
,
)
from
..extras.logging
import
get_logger
from
..extras.misc
import
use_modelscope
from
..extras.packages
import
is_gradio_available
if
is_gradio_available
():
import
gradio
as
gr
logger
=
get_logger
(
__name__
)
DEFAULT_CACHE_DIR
=
"cache"
DEFAULT_CONFIG_DIR
=
"config"
DEFAULT_DATA_DIR
=
"data"
DEFAULT_SAVE_DIR
=
"saves"
USER_CONFIG
=
"user_config.yaml"
QUANTIZATION_BITS
=
[
"8"
,
"6"
,
"5"
,
"4"
,
"3"
,
"2"
,
"1"
]
GPTQ_BITS
=
[
"8"
,
"4"
,
"3"
,
"2"
]
def
get_save_dir
(
*
paths
:
str
)
->
os
.
PathLike
:
r
"""
Gets the path to saved model checkpoints.
"""
if
os
.
path
.
sep
in
paths
[
-
1
]:
logger
.
warning
(
"Found complex path, some features may be not available."
)
return
paths
[
-
1
]
paths
=
(
path
.
replace
(
" "
,
""
).
strip
()
for
path
in
paths
)
return
os
.
path
.
join
(
DEFAULT_SAVE_DIR
,
*
paths
)
def
get_config_path
()
->
os
.
PathLike
:
r
"""
Gets the path to user config.
"""
return
os
.
path
.
join
(
DEFAULT_CACHE_DIR
,
USER_CONFIG
)
def
load_config
()
->
Dict
[
str
,
Any
]:
r
"""
Loads user config if exists.
"""
try
:
with
open
(
get_config_path
(),
"r"
,
encoding
=
"utf-8"
)
as
f
:
return
safe_load
(
f
)
except
Exception
:
return
{
"lang"
:
None
,
"last_model"
:
None
,
"path_dict"
:
{},
"cache_dir"
:
None
}
def
save_config
(
lang
:
str
,
model_name
:
Optional
[
str
]
=
None
,
model_path
:
Optional
[
str
]
=
None
)
->
None
:
r
"""
Saves user config.
"""
os
.
makedirs
(
DEFAULT_CACHE_DIR
,
exist_ok
=
True
)
user_config
=
load_config
()
user_config
[
"lang"
]
=
lang
or
user_config
[
"lang"
]
if
model_name
:
user_config
[
"last_model"
]
=
model_name
if
model_name
and
model_path
:
user_config
[
"path_dict"
][
model_name
]
=
model_path
with
open
(
get_config_path
(),
"w"
,
encoding
=
"utf-8"
)
as
f
:
safe_dump
(
user_config
,
f
)
def
get_model_path
(
model_name
:
str
)
->
str
:
r
"""
Gets the model path according to the model name.
"""
user_config
=
load_config
()
path_dict
:
Dict
[
"DownloadSource"
,
str
]
=
SUPPORTED_MODELS
.
get
(
model_name
,
defaultdict
(
str
))
model_path
=
user_config
[
"path_dict"
].
get
(
model_name
,
""
)
or
path_dict
.
get
(
DownloadSource
.
DEFAULT
,
""
)
if
(
use_modelscope
()
and
path_dict
.
get
(
DownloadSource
.
MODELSCOPE
)
and
model_path
==
path_dict
.
get
(
DownloadSource
.
DEFAULT
)
):
# replace path
model_path
=
path_dict
.
get
(
DownloadSource
.
MODELSCOPE
)
return
model_path
def
get_prefix
(
model_name
:
str
)
->
str
:
r
"""
Gets the prefix of the model name to obtain the model family.
"""
return
model_name
.
split
(
"-"
)[
0
]
def
get_model_info
(
model_name
:
str
)
->
Tuple
[
str
,
str
,
bool
]:
r
"""
Gets the necessary information of this model.
Returns:
model_path (str)
template (str)
visual (bool)
"""
return
get_model_path
(
model_name
),
get_template
(
model_name
),
get_visual
(
model_name
)
def
get_template
(
model_name
:
str
)
->
str
:
r
"""
Gets the template name if the model is a chat model.
"""
if
model_name
and
model_name
.
endswith
(
"Chat"
)
and
get_prefix
(
model_name
)
in
DEFAULT_TEMPLATE
:
return
DEFAULT_TEMPLATE
[
get_prefix
(
model_name
)]
return
"default"
def
get_visual
(
model_name
:
str
)
->
bool
:
r
"""
Judges if the model is a vision language model.
"""
return
get_prefix
(
model_name
)
in
VISION_MODELS
def
list_checkpoints
(
model_name
:
str
,
finetuning_type
:
str
)
->
"gr.Dropdown"
:
r
"""
Lists all available checkpoints.
"""
checkpoints
=
[]
if
model_name
:
save_dir
=
get_save_dir
(
model_name
,
finetuning_type
)
if
save_dir
and
os
.
path
.
isdir
(
save_dir
):
for
checkpoint
in
os
.
listdir
(
save_dir
):
if
os
.
path
.
isdir
(
os
.
path
.
join
(
save_dir
,
checkpoint
))
and
any
(
os
.
path
.
isfile
(
os
.
path
.
join
(
save_dir
,
checkpoint
,
name
))
for
name
in
CHECKPOINT_NAMES
):
checkpoints
.
append
(
checkpoint
)
if
finetuning_type
in
PEFT_METHODS
:
return
gr
.
Dropdown
(
value
=
[],
choices
=
checkpoints
,
multiselect
=
True
)
else
:
return
gr
.
Dropdown
(
value
=
None
,
choices
=
checkpoints
,
multiselect
=
False
)
def
load_dataset_info
(
dataset_dir
:
str
)
->
Dict
[
str
,
Dict
[
str
,
Any
]]:
r
"""
Loads dataset_info.json.
"""
if
dataset_dir
==
"ONLINE"
or
dataset_dir
.
startswith
(
"REMOTE:"
):
logger
.
info
(
"dataset_dir is {}, using online dataset."
.
format
(
dataset_dir
))
return
{}
try
:
with
open
(
os
.
path
.
join
(
dataset_dir
,
DATA_CONFIG
),
"r"
,
encoding
=
"utf-8"
)
as
f
:
return
json
.
load
(
f
)
except
Exception
as
err
:
logger
.
warning
(
"Cannot open {} due to {}."
.
format
(
os
.
path
.
join
(
dataset_dir
,
DATA_CONFIG
),
str
(
err
)))
return
{}
def
list_datasets
(
dataset_dir
:
str
=
None
,
training_stage
:
str
=
list
(
TRAINING_STAGES
.
keys
())[
0
])
->
"gr.Dropdown"
:
r
"""
Lists all available datasets in the dataset dir for the training stage.
"""
dataset_info
=
load_dataset_info
(
dataset_dir
if
dataset_dir
is
not
None
else
DEFAULT_DATA_DIR
)
ranking
=
TRAINING_STAGES
[
training_stage
]
in
STAGES_USE_PAIR_DATA
datasets
=
[
k
for
k
,
v
in
dataset_info
.
items
()
if
v
.
get
(
"ranking"
,
False
)
==
ranking
]
return
gr
.
Dropdown
(
choices
=
datasets
)
LLaMA-Factory/src/llamafactory/webui/components/__init__.py
0 → 100644
View file @
032b90a1
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
.chatbot
import
create_chat_box
from
.eval
import
create_eval_tab
from
.export
import
create_export_tab
from
.infer
import
create_infer_tab
from
.top
import
create_top
from
.train
import
create_train_tab
__all__
=
[
"create_chat_box"
,
"create_eval_tab"
,
"create_export_tab"
,
"create_infer_tab"
,
"create_top"
,
"create_train_tab"
,
]
LLaMA-Factory/src/llamafactory/webui/components/chatbot.py
0 → 100644
View file @
032b90a1
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
typing
import
TYPE_CHECKING
,
Dict
,
Tuple
from
...data
import
Role
from
...extras.packages
import
is_gradio_available
from
..utils
import
check_json_schema
if
is_gradio_available
():
import
gradio
as
gr
if
TYPE_CHECKING
:
from
gradio.components
import
Component
from
..engine
import
Engine
def
create_chat_box
(
engine
:
"Engine"
,
visible
:
bool
=
False
)
->
Tuple
[
"Component"
,
"Component"
,
Dict
[
str
,
"Component"
]]:
with
gr
.
Column
(
visible
=
visible
)
as
chat_box
:
chatbot
=
gr
.
Chatbot
(
show_copy_button
=
True
)
messages
=
gr
.
State
([])
with
gr
.
Row
():
with
gr
.
Column
(
scale
=
4
):
with
gr
.
Row
():
with
gr
.
Column
():
role
=
gr
.
Dropdown
(
choices
=
[
Role
.
USER
.
value
,
Role
.
OBSERVATION
.
value
],
value
=
Role
.
USER
.
value
)
system
=
gr
.
Textbox
(
show_label
=
False
)
tools
=
gr
.
Textbox
(
show_label
=
False
,
lines
=
3
)
with
gr
.
Column
()
as
image_box
:
image
=
gr
.
Image
(
sources
=
[
"upload"
],
type
=
"numpy"
)
query
=
gr
.
Textbox
(
show_label
=
False
,
lines
=
8
)
submit_btn
=
gr
.
Button
(
variant
=
"primary"
)
with
gr
.
Column
(
scale
=
1
):
max_new_tokens
=
gr
.
Slider
(
minimum
=
8
,
maximum
=
4096
,
value
=
512
,
step
=
1
)
top_p
=
gr
.
Slider
(
minimum
=
0.01
,
maximum
=
1.0
,
value
=
0.7
,
step
=
0.01
)
temperature
=
gr
.
Slider
(
minimum
=
0.01
,
maximum
=
1.5
,
value
=
0.95
,
step
=
0.01
)
clear_btn
=
gr
.
Button
()
tools
.
input
(
check_json_schema
,
inputs
=
[
tools
,
engine
.
manager
.
get_elem_by_id
(
"top.lang"
)])
submit_btn
.
click
(
engine
.
chatter
.
append
,
[
chatbot
,
messages
,
role
,
query
],
[
chatbot
,
messages
,
query
],
).
then
(
engine
.
chatter
.
stream
,
[
chatbot
,
messages
,
system
,
tools
,
image
,
max_new_tokens
,
top_p
,
temperature
],
[
chatbot
,
messages
],
)
clear_btn
.
click
(
lambda
:
([],
[]),
outputs
=
[
chatbot
,
messages
])
return
(
chatbot
,
messages
,
dict
(
chat_box
=
chat_box
,
role
=
role
,
system
=
system
,
tools
=
tools
,
image_box
=
image_box
,
image
=
image
,
query
=
query
,
submit_btn
=
submit_btn
,
max_new_tokens
=
max_new_tokens
,
top_p
=
top_p
,
temperature
=
temperature
,
clear_btn
=
clear_btn
,
),
)
LLaMA-Factory/src/llamafactory/webui/components/data.py
0 → 100644
View file @
032b90a1
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import
json
import
os
from
typing
import
TYPE_CHECKING
,
Any
,
Dict
,
List
,
Tuple
from
...extras.constants
import
DATA_CONFIG
from
...extras.packages
import
is_gradio_available
if
is_gradio_available
():
import
gradio
as
gr
if
TYPE_CHECKING
:
from
gradio.components
import
Component
PAGE_SIZE
=
2
def
prev_page
(
page_index
:
int
)
->
int
:
return
page_index
-
1
if
page_index
>
0
else
page_index
def
next_page
(
page_index
:
int
,
total_num
:
int
)
->
int
:
return
page_index
+
1
if
(
page_index
+
1
)
*
PAGE_SIZE
<
total_num
else
page_index
def
can_preview
(
dataset_dir
:
str
,
dataset
:
list
)
->
"gr.Button"
:
try
:
with
open
(
os
.
path
.
join
(
dataset_dir
,
DATA_CONFIG
),
"r"
,
encoding
=
"utf-8"
)
as
f
:
dataset_info
=
json
.
load
(
f
)
except
Exception
:
return
gr
.
Button
(
interactive
=
False
)
if
len
(
dataset
)
==
0
or
"file_name"
not
in
dataset_info
[
dataset
[
0
]]:
return
gr
.
Button
(
interactive
=
False
)
data_path
=
os
.
path
.
join
(
dataset_dir
,
dataset_info
[
dataset
[
0
]][
"file_name"
])
if
os
.
path
.
isfile
(
data_path
)
or
(
os
.
path
.
isdir
(
data_path
)
and
os
.
listdir
(
data_path
)):
return
gr
.
Button
(
interactive
=
True
)
else
:
return
gr
.
Button
(
interactive
=
False
)
def
_load_data_file
(
file_path
:
str
)
->
List
[
Any
]:
with
open
(
file_path
,
"r"
,
encoding
=
"utf-8"
)
as
f
:
if
file_path
.
endswith
(
".json"
):
return
json
.
load
(
f
)
elif
file_path
.
endswith
(
".jsonl"
):
return
[
json
.
loads
(
line
)
for
line
in
f
]
else
:
return
list
(
f
)
def
get_preview
(
dataset_dir
:
str
,
dataset
:
list
,
page_index
:
int
)
->
Tuple
[
int
,
list
,
"gr.Column"
]:
with
open
(
os
.
path
.
join
(
dataset_dir
,
DATA_CONFIG
),
"r"
,
encoding
=
"utf-8"
)
as
f
:
dataset_info
=
json
.
load
(
f
)
data_path
=
os
.
path
.
join
(
dataset_dir
,
dataset_info
[
dataset
[
0
]][
"file_name"
])
if
os
.
path
.
isfile
(
data_path
):
data
=
_load_data_file
(
data_path
)
else
:
data
=
[]
for
file_name
in
os
.
listdir
(
data_path
):
data
.
extend
(
_load_data_file
(
os
.
path
.
join
(
data_path
,
file_name
)))
return
len
(
data
),
data
[
PAGE_SIZE
*
page_index
:
PAGE_SIZE
*
(
page_index
+
1
)],
gr
.
Column
(
visible
=
True
)
def
create_preview_box
(
dataset_dir
:
"gr.Textbox"
,
dataset
:
"gr.Dropdown"
)
->
Dict
[
str
,
"Component"
]:
data_preview_btn
=
gr
.
Button
(
interactive
=
False
,
scale
=
1
)
with
gr
.
Column
(
visible
=
False
,
elem_classes
=
"modal-box"
)
as
preview_box
:
with
gr
.
Row
():
preview_count
=
gr
.
Number
(
value
=
0
,
interactive
=
False
,
precision
=
0
)
page_index
=
gr
.
Number
(
value
=
0
,
interactive
=
False
,
precision
=
0
)
with
gr
.
Row
():
prev_btn
=
gr
.
Button
()
next_btn
=
gr
.
Button
()
close_btn
=
gr
.
Button
()
with
gr
.
Row
():
preview_samples
=
gr
.
JSON
()
dataset
.
change
(
can_preview
,
[
dataset_dir
,
dataset
],
[
data_preview_btn
],
queue
=
False
).
then
(
lambda
:
0
,
outputs
=
[
page_index
],
queue
=
False
)
data_preview_btn
.
click
(
get_preview
,
[
dataset_dir
,
dataset
,
page_index
],
[
preview_count
,
preview_samples
,
preview_box
],
queue
=
False
)
prev_btn
.
click
(
prev_page
,
[
page_index
],
[
page_index
],
queue
=
False
).
then
(
get_preview
,
[
dataset_dir
,
dataset
,
page_index
],
[
preview_count
,
preview_samples
,
preview_box
],
queue
=
False
)
next_btn
.
click
(
next_page
,
[
page_index
,
preview_count
],
[
page_index
],
queue
=
False
).
then
(
get_preview
,
[
dataset_dir
,
dataset
,
page_index
],
[
preview_count
,
preview_samples
,
preview_box
],
queue
=
False
)
close_btn
.
click
(
lambda
:
gr
.
Column
(
visible
=
False
),
outputs
=
[
preview_box
],
queue
=
False
)
return
dict
(
data_preview_btn
=
data_preview_btn
,
preview_count
=
preview_count
,
page_index
=
page_index
,
prev_btn
=
prev_btn
,
next_btn
=
next_btn
,
close_btn
=
close_btn
,
preview_samples
=
preview_samples
,
)
LLaMA-Factory/src/llamafactory/webui/components/eval.py
0 → 100644
View file @
032b90a1
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
typing
import
TYPE_CHECKING
,
Dict
from
...extras.packages
import
is_gradio_available
from
..common
import
DEFAULT_DATA_DIR
,
list_datasets
from
.data
import
create_preview_box
if
is_gradio_available
():
import
gradio
as
gr
if
TYPE_CHECKING
:
from
gradio.components
import
Component
from
..engine
import
Engine
def
create_eval_tab
(
engine
:
"Engine"
)
->
Dict
[
str
,
"Component"
]:
input_elems
=
engine
.
manager
.
get_base_elems
()
elem_dict
=
dict
()
with
gr
.
Row
():
dataset_dir
=
gr
.
Textbox
(
value
=
DEFAULT_DATA_DIR
,
scale
=
2
)
dataset
=
gr
.
Dropdown
(
multiselect
=
True
,
allow_custom_value
=
True
,
scale
=
4
)
preview_elems
=
create_preview_box
(
dataset_dir
,
dataset
)
input_elems
.
update
({
dataset_dir
,
dataset
})
elem_dict
.
update
(
dict
(
dataset_dir
=
dataset_dir
,
dataset
=
dataset
,
**
preview_elems
))
with
gr
.
Row
():
cutoff_len
=
gr
.
Slider
(
minimum
=
4
,
maximum
=
65536
,
value
=
1024
,
step
=
1
)
max_samples
=
gr
.
Textbox
(
value
=
"100000"
)
batch_size
=
gr
.
Slider
(
minimum
=
1
,
maximum
=
1024
,
value
=
2
,
step
=
1
)
predict
=
gr
.
Checkbox
(
value
=
True
)
input_elems
.
update
({
cutoff_len
,
max_samples
,
batch_size
,
predict
})
elem_dict
.
update
(
dict
(
cutoff_len
=
cutoff_len
,
max_samples
=
max_samples
,
batch_size
=
batch_size
,
predict
=
predict
))
with
gr
.
Row
():
max_new_tokens
=
gr
.
Slider
(
minimum
=
8
,
maximum
=
4096
,
value
=
512
,
step
=
1
)
top_p
=
gr
.
Slider
(
minimum
=
0.01
,
maximum
=
1
,
value
=
0.7
,
step
=
0.01
)
temperature
=
gr
.
Slider
(
minimum
=
0.01
,
maximum
=
1.5
,
value
=
0.95
,
step
=
0.01
)
output_dir
=
gr
.
Textbox
()
input_elems
.
update
({
max_new_tokens
,
top_p
,
temperature
,
output_dir
})
elem_dict
.
update
(
dict
(
max_new_tokens
=
max_new_tokens
,
top_p
=
top_p
,
temperature
=
temperature
,
output_dir
=
output_dir
))
with
gr
.
Row
():
cmd_preview_btn
=
gr
.
Button
()
start_btn
=
gr
.
Button
(
variant
=
"primary"
)
stop_btn
=
gr
.
Button
(
variant
=
"stop"
)
with
gr
.
Row
():
resume_btn
=
gr
.
Checkbox
(
visible
=
False
,
interactive
=
False
)
progress_bar
=
gr
.
Slider
(
visible
=
False
,
interactive
=
False
)
with
gr
.
Row
():
output_box
=
gr
.
Markdown
()
elem_dict
.
update
(
dict
(
cmd_preview_btn
=
cmd_preview_btn
,
start_btn
=
start_btn
,
stop_btn
=
stop_btn
,
resume_btn
=
resume_btn
,
progress_bar
=
progress_bar
,
output_box
=
output_box
,
)
)
output_elems
=
[
output_box
,
progress_bar
]
cmd_preview_btn
.
click
(
engine
.
runner
.
preview_eval
,
input_elems
,
output_elems
,
concurrency_limit
=
None
)
start_btn
.
click
(
engine
.
runner
.
run_eval
,
input_elems
,
output_elems
)
stop_btn
.
click
(
engine
.
runner
.
set_abort
)
resume_btn
.
change
(
engine
.
runner
.
monitor
,
outputs
=
output_elems
,
concurrency_limit
=
None
)
dataset
.
focus
(
list_datasets
,
[
dataset_dir
],
[
dataset
],
queue
=
False
)
return
elem_dict
LLaMA-Factory/src/llamafactory/webui/components/export.py
0 → 100644
View file @
032b90a1
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
typing
import
TYPE_CHECKING
,
Dict
,
Generator
,
List
,
Union
from
...extras.constants
import
PEFT_METHODS
from
...extras.misc
import
torch_gc
from
...extras.packages
import
is_gradio_available
from
...train.tuner
import
export_model
from
..common
import
GPTQ_BITS
,
get_save_dir
from
..locales
import
ALERTS
if
is_gradio_available
():
import
gradio
as
gr
if
TYPE_CHECKING
:
from
gradio.components
import
Component
from
..engine
import
Engine
def
can_quantize
(
checkpoint_path
:
Union
[
str
,
List
[
str
]])
->
"gr.Dropdown"
:
if
isinstance
(
checkpoint_path
,
list
)
and
len
(
checkpoint_path
)
!=
0
:
return
gr
.
Dropdown
(
value
=
"none"
,
interactive
=
False
)
else
:
return
gr
.
Dropdown
(
interactive
=
True
)
def
save_model
(
lang
:
str
,
model_name
:
str
,
model_path
:
str
,
finetuning_type
:
str
,
checkpoint_path
:
Union
[
str
,
List
[
str
]],
template
:
str
,
visual_inputs
:
bool
,
export_size
:
int
,
export_quantization_bit
:
str
,
export_quantization_dataset
:
str
,
export_device
:
str
,
export_legacy_format
:
bool
,
export_dir
:
str
,
export_hub_model_id
:
str
,
)
->
Generator
[
str
,
None
,
None
]:
error
=
""
if
not
model_name
:
error
=
ALERTS
[
"err_no_model"
][
lang
]
elif
not
model_path
:
error
=
ALERTS
[
"err_no_path"
][
lang
]
elif
not
export_dir
:
error
=
ALERTS
[
"err_no_export_dir"
][
lang
]
elif
export_quantization_bit
in
GPTQ_BITS
and
not
export_quantization_dataset
:
error
=
ALERTS
[
"err_no_dataset"
][
lang
]
elif
export_quantization_bit
not
in
GPTQ_BITS
and
not
checkpoint_path
:
error
=
ALERTS
[
"err_no_adapter"
][
lang
]
elif
export_quantization_bit
in
GPTQ_BITS
and
checkpoint_path
and
isinstance
(
checkpoint_path
,
list
):
error
=
ALERTS
[
"err_gptq_lora"
][
lang
]
if
error
:
gr
.
Warning
(
error
)
yield
error
return
args
=
dict
(
model_name_or_path
=
model_path
,
finetuning_type
=
finetuning_type
,
template
=
template
,
visual_inputs
=
visual_inputs
,
export_dir
=
export_dir
,
export_hub_model_id
=
export_hub_model_id
or
None
,
export_size
=
export_size
,
export_quantization_bit
=
int
(
export_quantization_bit
)
if
export_quantization_bit
in
GPTQ_BITS
else
None
,
export_quantization_dataset
=
export_quantization_dataset
,
export_device
=
export_device
,
export_legacy_format
=
export_legacy_format
,
)
if
checkpoint_path
:
if
finetuning_type
in
PEFT_METHODS
:
# list
args
[
"adapter_name_or_path"
]
=
","
.
join
(
[
get_save_dir
(
model_name
,
finetuning_type
,
adapter
)
for
adapter
in
checkpoint_path
]
)
else
:
# str
args
[
"model_name_or_path"
]
=
get_save_dir
(
model_name
,
finetuning_type
,
checkpoint_path
)
yield
ALERTS
[
"info_exporting"
][
lang
]
export_model
(
args
)
torch_gc
()
yield
ALERTS
[
"info_exported"
][
lang
]
def
create_export_tab
(
engine
:
"Engine"
)
->
Dict
[
str
,
"Component"
]:
with
gr
.
Row
():
export_size
=
gr
.
Slider
(
minimum
=
1
,
maximum
=
100
,
value
=
5
,
step
=
1
)
export_quantization_bit
=
gr
.
Dropdown
(
choices
=
[
"none"
]
+
GPTQ_BITS
,
value
=
"none"
)
export_quantization_dataset
=
gr
.
Textbox
(
value
=
"data/c4_demo.json"
)
export_device
=
gr
.
Radio
(
choices
=
[
"cpu"
,
"auto"
],
value
=
"cpu"
)
export_legacy_format
=
gr
.
Checkbox
()
with
gr
.
Row
():
export_dir
=
gr
.
Textbox
()
export_hub_model_id
=
gr
.
Textbox
()
checkpoint_path
:
gr
.
Dropdown
=
engine
.
manager
.
get_elem_by_id
(
"top.checkpoint_path"
)
checkpoint_path
.
change
(
can_quantize
,
[
checkpoint_path
],
[
export_quantization_bit
],
queue
=
False
)
export_btn
=
gr
.
Button
()
info_box
=
gr
.
Textbox
(
show_label
=
False
,
interactive
=
False
)
export_btn
.
click
(
save_model
,
[
engine
.
manager
.
get_elem_by_id
(
"top.lang"
),
engine
.
manager
.
get_elem_by_id
(
"top.model_name"
),
engine
.
manager
.
get_elem_by_id
(
"top.model_path"
),
engine
.
manager
.
get_elem_by_id
(
"top.finetuning_type"
),
engine
.
manager
.
get_elem_by_id
(
"top.checkpoint_path"
),
engine
.
manager
.
get_elem_by_id
(
"top.template"
),
engine
.
manager
.
get_elem_by_id
(
"top.visual_inputs"
),
export_size
,
export_quantization_bit
,
export_quantization_dataset
,
export_device
,
export_legacy_format
,
export_dir
,
export_hub_model_id
,
],
[
info_box
],
)
return
dict
(
export_size
=
export_size
,
export_quantization_bit
=
export_quantization_bit
,
export_quantization_dataset
=
export_quantization_dataset
,
export_device
=
export_device
,
export_legacy_format
=
export_legacy_format
,
export_dir
=
export_dir
,
export_hub_model_id
=
export_hub_model_id
,
export_btn
=
export_btn
,
info_box
=
info_box
,
)
LLaMA-Factory/src/llamafactory/webui/components/infer.py
0 → 100644
View file @
032b90a1
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
typing
import
TYPE_CHECKING
,
Dict
from
...extras.packages
import
is_gradio_available
from
.chatbot
import
create_chat_box
if
is_gradio_available
():
import
gradio
as
gr
if
TYPE_CHECKING
:
from
gradio.components
import
Component
from
..engine
import
Engine
def
create_infer_tab
(
engine
:
"Engine"
)
->
Dict
[
str
,
"Component"
]:
input_elems
=
engine
.
manager
.
get_base_elems
()
elem_dict
=
dict
()
with
gr
.
Row
():
infer_backend
=
gr
.
Dropdown
(
choices
=
[
"huggingface"
,
"vllm"
],
value
=
"huggingface"
)
infer_dtype
=
gr
.
Dropdown
(
choices
=
[
"auto"
,
"float16"
,
"bfloat16"
,
"float32"
],
value
=
"auto"
)
with
gr
.
Row
():
load_btn
=
gr
.
Button
()
unload_btn
=
gr
.
Button
()
info_box
=
gr
.
Textbox
(
show_label
=
False
,
interactive
=
False
)
input_elems
.
update
({
infer_backend
,
infer_dtype
})
elem_dict
.
update
(
dict
(
infer_backend
=
infer_backend
,
infer_dtype
=
infer_dtype
,
load_btn
=
load_btn
,
unload_btn
=
unload_btn
,
info_box
=
info_box
,
)
)
chatbot
,
messages
,
chat_elems
=
create_chat_box
(
engine
,
visible
=
False
)
elem_dict
.
update
(
chat_elems
)
load_btn
.
click
(
engine
.
chatter
.
load_model
,
input_elems
,
[
info_box
]).
then
(
lambda
:
gr
.
Column
(
visible
=
engine
.
chatter
.
loaded
),
outputs
=
[
chat_elems
[
"chat_box"
]]
)
unload_btn
.
click
(
engine
.
chatter
.
unload_model
,
input_elems
,
[
info_box
]).
then
(
lambda
:
([],
[]),
outputs
=
[
chatbot
,
messages
]
).
then
(
lambda
:
gr
.
Column
(
visible
=
engine
.
chatter
.
loaded
),
outputs
=
[
chat_elems
[
"chat_box"
]])
engine
.
manager
.
get_elem_by_id
(
"top.visual_inputs"
).
change
(
lambda
enabled
:
gr
.
Column
(
visible
=
enabled
),
[
engine
.
manager
.
get_elem_by_id
(
"top.visual_inputs"
)],
[
chat_elems
[
"image_box"
]],
)
return
elem_dict
LLaMA-Factory/src/llamafactory/webui/components/top.py
0 → 100644
View file @
032b90a1
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
typing
import
TYPE_CHECKING
,
Dict
from
...data
import
TEMPLATES
from
...extras.constants
import
METHODS
,
SUPPORTED_MODELS
from
...extras.packages
import
is_gradio_available
from
..common
import
get_model_info
,
list_checkpoints
,
save_config
from
..utils
import
can_quantize
,
can_quantize_to
if
is_gradio_available
():
import
gradio
as
gr
if
TYPE_CHECKING
:
from
gradio.components
import
Component
def
create_top
()
->
Dict
[
str
,
"Component"
]:
available_models
=
list
(
SUPPORTED_MODELS
.
keys
())
+
[
"Custom"
]
with
gr
.
Row
():
lang
=
gr
.
Dropdown
(
choices
=
[
"en"
,
"ru"
,
"zh"
],
scale
=
1
)
model_name
=
gr
.
Dropdown
(
choices
=
available_models
,
scale
=
3
)
model_path
=
gr
.
Textbox
(
scale
=
3
)
with
gr
.
Row
():
finetuning_type
=
gr
.
Dropdown
(
choices
=
METHODS
,
value
=
"lora"
,
scale
=
1
)
checkpoint_path
=
gr
.
Dropdown
(
multiselect
=
True
,
allow_custom_value
=
True
,
scale
=
6
)
with
gr
.
Accordion
(
open
=
False
)
as
advanced_tab
:
with
gr
.
Row
():
quantization_bit
=
gr
.
Dropdown
(
choices
=
[
"none"
,
"8"
,
"4"
],
value
=
"none"
,
allow_custom_value
=
True
,
scale
=
1
)
quantization_method
=
gr
.
Dropdown
(
choices
=
[
"bitsandbytes"
,
"hqq"
,
"eetq"
],
value
=
"bitsandbytes"
,
scale
=
1
)
template
=
gr
.
Dropdown
(
choices
=
list
(
TEMPLATES
.
keys
()),
value
=
"default"
,
scale
=
1
)
rope_scaling
=
gr
.
Radio
(
choices
=
[
"none"
,
"linear"
,
"dynamic"
],
value
=
"none"
,
scale
=
2
)
booster
=
gr
.
Radio
(
choices
=
[
"auto"
,
"flashattn2"
,
"unsloth"
],
value
=
"auto"
,
scale
=
2
)
visual_inputs
=
gr
.
Checkbox
(
scale
=
1
)
model_name
.
change
(
get_model_info
,
[
model_name
],
[
model_path
,
template
,
visual_inputs
],
queue
=
False
).
then
(
list_checkpoints
,
[
model_name
,
finetuning_type
],
[
checkpoint_path
],
queue
=
False
)
model_name
.
input
(
save_config
,
inputs
=
[
lang
,
model_name
],
queue
=
False
)
model_path
.
input
(
save_config
,
inputs
=
[
lang
,
model_name
,
model_path
],
queue
=
False
)
finetuning_type
.
change
(
can_quantize
,
[
finetuning_type
],
[
quantization_bit
],
queue
=
False
).
then
(
list_checkpoints
,
[
model_name
,
finetuning_type
],
[
checkpoint_path
],
queue
=
False
)
checkpoint_path
.
focus
(
list_checkpoints
,
[
model_name
,
finetuning_type
],
[
checkpoint_path
],
queue
=
False
)
quantization_method
.
change
(
can_quantize_to
,
[
quantization_method
],
[
quantization_bit
],
queue
=
False
)
return
dict
(
lang
=
lang
,
model_name
=
model_name
,
model_path
=
model_path
,
finetuning_type
=
finetuning_type
,
checkpoint_path
=
checkpoint_path
,
advanced_tab
=
advanced_tab
,
quantization_bit
=
quantization_bit
,
quantization_method
=
quantization_method
,
template
=
template
,
rope_scaling
=
rope_scaling
,
booster
=
booster
,
visual_inputs
=
visual_inputs
,
)
LLaMA-Factory/src/llamafactory/webui/components/train.py
0 → 100644
View file @
032b90a1
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
typing
import
TYPE_CHECKING
,
Dict
from
transformers.trainer_utils
import
SchedulerType
from
...extras.constants
import
TRAINING_STAGES
from
...extras.misc
import
get_device_count
from
...extras.packages
import
is_gradio_available
from
..common
import
DEFAULT_DATA_DIR
,
list_checkpoints
,
list_datasets
from
..utils
import
change_stage
,
list_config_paths
,
list_output_dirs
from
.data
import
create_preview_box
if
is_gradio_available
():
import
gradio
as
gr
if
TYPE_CHECKING
:
from
gradio.components
import
Component
from
..engine
import
Engine
def
create_train_tab
(
engine
:
"Engine"
)
->
Dict
[
str
,
"Component"
]:
input_elems
=
engine
.
manager
.
get_base_elems
()
elem_dict
=
dict
()
with
gr
.
Row
():
training_stage
=
gr
.
Dropdown
(
choices
=
list
(
TRAINING_STAGES
.
keys
()),
value
=
list
(
TRAINING_STAGES
.
keys
())[
0
],
scale
=
1
)
dataset_dir
=
gr
.
Textbox
(
value
=
DEFAULT_DATA_DIR
,
scale
=
1
)
dataset
=
gr
.
Dropdown
(
multiselect
=
True
,
allow_custom_value
=
True
,
scale
=
4
)
preview_elems
=
create_preview_box
(
dataset_dir
,
dataset
)
input_elems
.
update
({
training_stage
,
dataset_dir
,
dataset
})
elem_dict
.
update
(
dict
(
training_stage
=
training_stage
,
dataset_dir
=
dataset_dir
,
dataset
=
dataset
,
**
preview_elems
))
with
gr
.
Row
():
learning_rate
=
gr
.
Textbox
(
value
=
"5e-5"
)
num_train_epochs
=
gr
.
Textbox
(
value
=
"3.0"
)
max_grad_norm
=
gr
.
Textbox
(
value
=
"1.0"
)
max_samples
=
gr
.
Textbox
(
value
=
"100000"
)
compute_type
=
gr
.
Dropdown
(
choices
=
[
"bf16"
,
"fp16"
,
"fp32"
,
"pure_bf16"
],
value
=
"bf16"
)
input_elems
.
update
({
learning_rate
,
num_train_epochs
,
max_grad_norm
,
max_samples
,
compute_type
})
elem_dict
.
update
(
dict
(
learning_rate
=
learning_rate
,
num_train_epochs
=
num_train_epochs
,
max_grad_norm
=
max_grad_norm
,
max_samples
=
max_samples
,
compute_type
=
compute_type
,
)
)
with
gr
.
Row
():
cutoff_len
=
gr
.
Slider
(
minimum
=
4
,
maximum
=
65536
,
value
=
1024
,
step
=
1
)
batch_size
=
gr
.
Slider
(
minimum
=
1
,
maximum
=
1024
,
value
=
2
,
step
=
1
)
gradient_accumulation_steps
=
gr
.
Slider
(
minimum
=
1
,
maximum
=
1024
,
value
=
8
,
step
=
1
)
val_size
=
gr
.
Slider
(
minimum
=
0
,
maximum
=
1
,
value
=
0
,
step
=
0.001
)
lr_scheduler_type
=
gr
.
Dropdown
(
choices
=
[
scheduler
.
value
for
scheduler
in
SchedulerType
],
value
=
"cosine"
)
input_elems
.
update
({
cutoff_len
,
batch_size
,
gradient_accumulation_steps
,
val_size
,
lr_scheduler_type
})
elem_dict
.
update
(
dict
(
cutoff_len
=
cutoff_len
,
batch_size
=
batch_size
,
gradient_accumulation_steps
=
gradient_accumulation_steps
,
val_size
=
val_size
,
lr_scheduler_type
=
lr_scheduler_type
,
)
)
with
gr
.
Accordion
(
open
=
False
)
as
extra_tab
:
with
gr
.
Row
():
logging_steps
=
gr
.
Slider
(
minimum
=
1
,
maximum
=
1000
,
value
=
5
,
step
=
5
)
save_steps
=
gr
.
Slider
(
minimum
=
10
,
maximum
=
5000
,
value
=
100
,
step
=
10
)
warmup_steps
=
gr
.
Slider
(
minimum
=
0
,
maximum
=
5000
,
value
=
0
,
step
=
1
)
neftune_alpha
=
gr
.
Slider
(
minimum
=
0
,
maximum
=
10
,
value
=
0
,
step
=
0.1
)
optim
=
gr
.
Textbox
(
value
=
"adamw_torch"
)
with
gr
.
Row
():
with
gr
.
Column
():
packing
=
gr
.
Checkbox
()
neat_packing
=
gr
.
Checkbox
()
with
gr
.
Column
():
train_on_prompt
=
gr
.
Checkbox
()
mask_history
=
gr
.
Checkbox
()
with
gr
.
Column
():
resize_vocab
=
gr
.
Checkbox
()
use_llama_pro
=
gr
.
Checkbox
()
with
gr
.
Column
():
shift_attn
=
gr
.
Checkbox
()
report_to
=
gr
.
Checkbox
()
input_elems
.
update
(
{
logging_steps
,
save_steps
,
warmup_steps
,
neftune_alpha
,
optim
,
packing
,
neat_packing
,
train_on_prompt
,
mask_history
,
resize_vocab
,
use_llama_pro
,
shift_attn
,
report_to
,
}
)
elem_dict
.
update
(
dict
(
extra_tab
=
extra_tab
,
logging_steps
=
logging_steps
,
save_steps
=
save_steps
,
warmup_steps
=
warmup_steps
,
neftune_alpha
=
neftune_alpha
,
optim
=
optim
,
packing
=
packing
,
neat_packing
=
neat_packing
,
train_on_prompt
=
train_on_prompt
,
mask_history
=
mask_history
,
resize_vocab
=
resize_vocab
,
use_llama_pro
=
use_llama_pro
,
shift_attn
=
shift_attn
,
report_to
=
report_to
,
)
)
with
gr
.
Accordion
(
open
=
False
)
as
freeze_tab
:
with
gr
.
Row
():
freeze_trainable_layers
=
gr
.
Slider
(
minimum
=-
128
,
maximum
=
128
,
value
=
2
,
step
=
1
)
freeze_trainable_modules
=
gr
.
Textbox
(
value
=
"all"
)
freeze_extra_modules
=
gr
.
Textbox
()
input_elems
.
update
({
freeze_trainable_layers
,
freeze_trainable_modules
,
freeze_extra_modules
})
elem_dict
.
update
(
dict
(
freeze_tab
=
freeze_tab
,
freeze_trainable_layers
=
freeze_trainable_layers
,
freeze_trainable_modules
=
freeze_trainable_modules
,
freeze_extra_modules
=
freeze_extra_modules
,
)
)
with
gr
.
Accordion
(
open
=
False
)
as
lora_tab
:
with
gr
.
Row
():
lora_rank
=
gr
.
Slider
(
minimum
=
1
,
maximum
=
1024
,
value
=
8
,
step
=
1
)
lora_alpha
=
gr
.
Slider
(
minimum
=
1
,
maximum
=
2048
,
value
=
16
,
step
=
1
)
lora_dropout
=
gr
.
Slider
(
minimum
=
0
,
maximum
=
1
,
value
=
0
,
step
=
0.01
)
loraplus_lr_ratio
=
gr
.
Slider
(
minimum
=
0
,
maximum
=
64
,
value
=
0
,
step
=
0.01
)
create_new_adapter
=
gr
.
Checkbox
()
with
gr
.
Row
():
use_rslora
=
gr
.
Checkbox
()
use_dora
=
gr
.
Checkbox
()
use_pissa
=
gr
.
Checkbox
()
lora_target
=
gr
.
Textbox
(
scale
=
2
)
additional_target
=
gr
.
Textbox
(
scale
=
2
)
input_elems
.
update
(
{
lora_rank
,
lora_alpha
,
lora_dropout
,
loraplus_lr_ratio
,
create_new_adapter
,
use_rslora
,
use_dora
,
use_pissa
,
lora_target
,
additional_target
,
}
)
elem_dict
.
update
(
dict
(
lora_tab
=
lora_tab
,
lora_rank
=
lora_rank
,
lora_alpha
=
lora_alpha
,
lora_dropout
=
lora_dropout
,
loraplus_lr_ratio
=
loraplus_lr_ratio
,
create_new_adapter
=
create_new_adapter
,
use_rslora
=
use_rslora
,
use_dora
=
use_dora
,
use_pissa
=
use_pissa
,
lora_target
=
lora_target
,
additional_target
=
additional_target
,
)
)
with
gr
.
Accordion
(
open
=
False
)
as
rlhf_tab
:
with
gr
.
Row
():
pref_beta
=
gr
.
Slider
(
minimum
=
0
,
maximum
=
1
,
value
=
0.1
,
step
=
0.01
)
pref_ftx
=
gr
.
Slider
(
minimum
=
0
,
maximum
=
10
,
value
=
0
,
step
=
0.01
)
pref_loss
=
gr
.
Dropdown
(
choices
=
[
"sigmoid"
,
"hinge"
,
"ipo"
,
"kto_pair"
,
"orpo"
,
"simpo"
],
value
=
"sigmoid"
)
reward_model
=
gr
.
Dropdown
(
multiselect
=
True
,
allow_custom_value
=
True
)
with
gr
.
Column
():
ppo_score_norm
=
gr
.
Checkbox
()
ppo_whiten_rewards
=
gr
.
Checkbox
()
input_elems
.
update
({
pref_beta
,
pref_ftx
,
pref_loss
,
reward_model
,
ppo_score_norm
,
ppo_whiten_rewards
})
elem_dict
.
update
(
dict
(
rlhf_tab
=
rlhf_tab
,
pref_beta
=
pref_beta
,
pref_ftx
=
pref_ftx
,
pref_loss
=
pref_loss
,
reward_model
=
reward_model
,
ppo_score_norm
=
ppo_score_norm
,
ppo_whiten_rewards
=
ppo_whiten_rewards
,
)
)
with
gr
.
Accordion
(
open
=
False
)
as
galore_tab
:
with
gr
.
Row
():
use_galore
=
gr
.
Checkbox
()
galore_rank
=
gr
.
Slider
(
minimum
=
1
,
maximum
=
1024
,
value
=
16
,
step
=
1
)
galore_update_interval
=
gr
.
Slider
(
minimum
=
1
,
maximum
=
1024
,
value
=
200
,
step
=
1
)
galore_scale
=
gr
.
Slider
(
minimum
=
0
,
maximum
=
1
,
value
=
0.25
,
step
=
0.01
)
galore_target
=
gr
.
Textbox
(
value
=
"all"
)
input_elems
.
update
({
use_galore
,
galore_rank
,
galore_update_interval
,
galore_scale
,
galore_target
})
elem_dict
.
update
(
dict
(
galore_tab
=
galore_tab
,
use_galore
=
use_galore
,
galore_rank
=
galore_rank
,
galore_update_interval
=
galore_update_interval
,
galore_scale
=
galore_scale
,
galore_target
=
galore_target
,
)
)
with
gr
.
Accordion
(
open
=
False
)
as
badam_tab
:
with
gr
.
Row
():
use_badam
=
gr
.
Checkbox
()
badam_mode
=
gr
.
Dropdown
(
choices
=
[
"layer"
,
"ratio"
],
value
=
"layer"
)
badam_switch_mode
=
gr
.
Dropdown
(
choices
=
[
"ascending"
,
"descending"
,
"random"
,
"fixed"
],
value
=
"ascending"
)
badam_switch_interval
=
gr
.
Slider
(
minimum
=
1
,
maximum
=
1024
,
value
=
50
,
step
=
1
)
badam_update_ratio
=
gr
.
Slider
(
minimum
=
0
,
maximum
=
1
,
value
=
0.05
,
step
=
0.01
)
input_elems
.
update
({
use_badam
,
badam_mode
,
badam_switch_mode
,
badam_switch_interval
,
badam_update_ratio
})
elem_dict
.
update
(
dict
(
badam_tab
=
badam_tab
,
use_badam
=
use_badam
,
badam_mode
=
badam_mode
,
badam_switch_mode
=
badam_switch_mode
,
badam_switch_interval
=
badam_switch_interval
,
badam_update_ratio
=
badam_update_ratio
,
)
)
with
gr
.
Row
():
cmd_preview_btn
=
gr
.
Button
()
arg_save_btn
=
gr
.
Button
()
arg_load_btn
=
gr
.
Button
()
start_btn
=
gr
.
Button
(
variant
=
"primary"
)
stop_btn
=
gr
.
Button
(
variant
=
"stop"
)
with
gr
.
Row
():
with
gr
.
Column
(
scale
=
3
):
with
gr
.
Row
():
current_time
=
gr
.
Textbox
(
visible
=
False
,
interactive
=
False
)
output_dir
=
gr
.
Dropdown
(
allow_custom_value
=
True
)
config_path
=
gr
.
Dropdown
(
allow_custom_value
=
True
)
with
gr
.
Row
():
device_count
=
gr
.
Textbox
(
value
=
str
(
get_device_count
()
or
1
),
interactive
=
False
)
ds_stage
=
gr
.
Dropdown
(
choices
=
[
"none"
,
"2"
,
"3"
],
value
=
"none"
)
ds_offload
=
gr
.
Checkbox
()
with
gr
.
Row
():
resume_btn
=
gr
.
Checkbox
(
visible
=
False
,
interactive
=
False
)
progress_bar
=
gr
.
Slider
(
visible
=
False
,
interactive
=
False
)
with
gr
.
Row
():
output_box
=
gr
.
Markdown
()
with
gr
.
Column
(
scale
=
1
):
loss_viewer
=
gr
.
Plot
()
input_elems
.
update
({
output_dir
,
config_path
,
ds_stage
,
ds_offload
})
elem_dict
.
update
(
dict
(
cmd_preview_btn
=
cmd_preview_btn
,
arg_save_btn
=
arg_save_btn
,
arg_load_btn
=
arg_load_btn
,
start_btn
=
start_btn
,
stop_btn
=
stop_btn
,
current_time
=
current_time
,
output_dir
=
output_dir
,
config_path
=
config_path
,
device_count
=
device_count
,
ds_stage
=
ds_stage
,
ds_offload
=
ds_offload
,
resume_btn
=
resume_btn
,
progress_bar
=
progress_bar
,
output_box
=
output_box
,
loss_viewer
=
loss_viewer
,
)
)
output_elems
=
[
output_box
,
progress_bar
,
loss_viewer
]
cmd_preview_btn
.
click
(
engine
.
runner
.
preview_train
,
input_elems
,
output_elems
,
concurrency_limit
=
None
)
start_btn
.
click
(
engine
.
runner
.
run_train
,
input_elems
,
output_elems
)
stop_btn
.
click
(
engine
.
runner
.
set_abort
)
resume_btn
.
change
(
engine
.
runner
.
monitor
,
outputs
=
output_elems
,
concurrency_limit
=
None
)
lang
=
engine
.
manager
.
get_elem_by_id
(
"top.lang"
)
model_name
:
"gr.Dropdown"
=
engine
.
manager
.
get_elem_by_id
(
"top.model_name"
)
finetuning_type
:
"gr.Dropdown"
=
engine
.
manager
.
get_elem_by_id
(
"top.finetuning_type"
)
arg_save_btn
.
click
(
engine
.
runner
.
save_args
,
input_elems
,
output_elems
,
concurrency_limit
=
None
)
arg_load_btn
.
click
(
engine
.
runner
.
load_args
,
[
lang
,
config_path
],
list
(
input_elems
)
+
[
output_box
],
concurrency_limit
=
None
)
dataset
.
focus
(
list_datasets
,
[
dataset_dir
,
training_stage
],
[
dataset
],
queue
=
False
)
training_stage
.
change
(
change_stage
,
[
training_stage
],
[
dataset
,
packing
],
queue
=
False
)
reward_model
.
focus
(
list_checkpoints
,
[
model_name
,
finetuning_type
],
[
reward_model
],
queue
=
False
)
model_name
.
change
(
list_output_dirs
,
[
model_name
,
finetuning_type
,
current_time
],
[
output_dir
],
queue
=
False
)
finetuning_type
.
change
(
list_output_dirs
,
[
model_name
,
finetuning_type
,
current_time
],
[
output_dir
],
queue
=
False
)
output_dir
.
change
(
list_output_dirs
,
[
model_name
,
finetuning_type
,
current_time
],
[
output_dir
],
concurrency_limit
=
None
)
output_dir
.
input
(
engine
.
runner
.
check_output_dir
,
[
lang
,
model_name
,
finetuning_type
,
output_dir
],
list
(
input_elems
)
+
[
output_box
],
concurrency_limit
=
None
,
)
config_path
.
change
(
list_config_paths
,
[
current_time
],
[
config_path
],
queue
=
False
)
return
elem_dict
LLaMA-Factory/src/llamafactory/webui/css.py
0 → 100644
View file @
032b90a1
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
CSS
=
r
"""
.duplicate-button {
margin: auto !important;
color: white !important;
background: black !important;
border-radius: 100vh !important;
}
.modal-box {
position: fixed !important;
top: 50%;
left: 50%;
transform: translate(-50%, -50%); /* center horizontally */
max-width: 1000px;
max-height: 750px;
overflow-y: auto;
background-color: var(--input-background-fill);
flex-wrap: nowrap !important;
border: 2px solid black !important;
z-index: 1000;
padding: 10px;
}
.dark .modal-box {
border: 2px solid white !important;
}
"""
LLaMA-Factory/src/llamafactory/webui/engine.py
0 → 100644
View file @
032b90a1
# Copyright 2024 the LlamaFactory team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from
typing
import
TYPE_CHECKING
,
Any
,
Dict
from
.chatter
import
WebChatModel
from
.common
import
load_config
from
.locales
import
LOCALES
from
.manager
import
Manager
from
.runner
import
Runner
from
.utils
import
create_ds_config
,
get_time
if
TYPE_CHECKING
:
from
gradio.components
import
Component
class
Engine
:
def
__init__
(
self
,
demo_mode
:
bool
=
False
,
pure_chat
:
bool
=
False
)
->
None
:
self
.
demo_mode
=
demo_mode
self
.
pure_chat
=
pure_chat
self
.
manager
=
Manager
()
self
.
runner
=
Runner
(
self
.
manager
,
demo_mode
)
self
.
chatter
=
WebChatModel
(
self
.
manager
,
demo_mode
,
lazy_init
=
(
not
pure_chat
))
if
not
demo_mode
:
create_ds_config
()
def
_update_component
(
self
,
input_dict
:
Dict
[
str
,
Dict
[
str
,
Any
]])
->
Dict
[
"Component"
,
"Component"
]:
r
"""
Gets the dict to update the components.
"""
output_dict
:
Dict
[
"Component"
,
"Component"
]
=
{}
for
elem_id
,
elem_attr
in
input_dict
.
items
():
elem
=
self
.
manager
.
get_elem_by_id
(
elem_id
)
output_dict
[
elem
]
=
elem
.
__class__
(
**
elem_attr
)
return
output_dict
def
resume
(
self
):
user_config
=
load_config
()
if
not
self
.
demo_mode
else
{}
lang
=
user_config
.
get
(
"lang"
,
None
)
or
"en"
init_dict
=
{
"top.lang"
:
{
"value"
:
lang
},
"infer.chat_box"
:
{
"visible"
:
self
.
chatter
.
loaded
}}
if
not
self
.
pure_chat
:
current_time
=
get_time
()
init_dict
[
"train.current_time"
]
=
{
"value"
:
current_time
}
init_dict
[
"train.output_dir"
]
=
{
"value"
:
"train_{}"
.
format
(
current_time
)}
init_dict
[
"train.config_path"
]
=
{
"value"
:
"{}.yaml"
.
format
(
current_time
)}
init_dict
[
"eval.output_dir"
]
=
{
"value"
:
"eval_{}"
.
format
(
current_time
)}
init_dict
[
"infer.image_box"
]
=
{
"visible"
:
False
}
if
user_config
.
get
(
"last_model"
,
None
):
init_dict
[
"top.model_name"
]
=
{
"value"
:
user_config
[
"last_model"
]}
yield
self
.
_update_component
(
init_dict
)
if
self
.
runner
.
running
and
not
self
.
demo_mode
and
not
self
.
pure_chat
:
yield
{
elem
:
elem
.
__class__
(
value
=
value
)
for
elem
,
value
in
self
.
runner
.
running_data
.
items
()}
if
self
.
runner
.
do_train
:
yield
self
.
_update_component
({
"train.resume_btn"
:
{
"value"
:
True
}})
else
:
yield
self
.
_update_component
({
"eval.resume_btn"
:
{
"value"
:
True
}})
def
change_lang
(
self
,
lang
:
str
):
return
{
elem
:
elem
.
__class__
(
**
LOCALES
[
elem_name
][
lang
])
for
elem_name
,
elem
in
self
.
manager
.
get_elem_iter
()
if
elem_name
in
LOCALES
}
Prev
1
…
6
7
8
9
10
11
12
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment