Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
qwen2.5-coder_pytorch
Commits
53b3977b
"include/ck/ck.hpp" did not exist on "31ded4ac4bc524acdbf897ffff094d7e7cbed991"
Commit
53b3977b
authored
Jul 11, 2025
by
dongchy920
Browse files
Initial commit
parents
Pipeline
#2841
failed with stages
in 0 seconds
Changes
350
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
410 additions
and
0 deletions
+410
-0
LLaMA-Factory/examples/extras/nlg_eval/llama3_lora_predict.yaml
...Factory/examples/extras/nlg_eval/llama3_lora_predict.yaml
+29
-0
LLaMA-Factory/examples/extras/pissa/init.sh
LLaMA-Factory/examples/extras/pissa/init.sh
+5
-0
LLaMA-Factory/examples/extras/pissa/llama3_lora_sft.yaml
LLaMA-Factory/examples/extras/pissa/llama3_lora_sft.yaml
+43
-0
LLaMA-Factory/examples/inference/llama3.yaml
LLaMA-Factory/examples/inference/llama3.yaml
+4
-0
LLaMA-Factory/examples/inference/llama3_full_sft.yaml
LLaMA-Factory/examples/inference/llama3_full_sft.yaml
+4
-0
LLaMA-Factory/examples/inference/llama3_lora_sft.yaml
LLaMA-Factory/examples/inference/llama3_lora_sft.yaml
+5
-0
LLaMA-Factory/examples/inference/llama3_vllm.yaml
LLaMA-Factory/examples/inference/llama3_vllm.yaml
+5
-0
LLaMA-Factory/examples/inference/llava1_5.yaml
LLaMA-Factory/examples/inference/llava1_5.yaml
+4
-0
LLaMA-Factory/examples/inference/qwen2_coder_custom.yaml
LLaMA-Factory/examples/inference/qwen2_coder_custom.yaml
+4
-0
LLaMA-Factory/examples/inference/qwen2_vl.yaml
LLaMA-Factory/examples/inference/qwen2_vl.yaml
+4
-0
LLaMA-Factory/examples/merge_lora/llama3_gptq.yaml
LLaMA-Factory/examples/merge_lora/llama3_gptq.yaml
+12
-0
LLaMA-Factory/examples/merge_lora/llama3_lora_sft.yaml
LLaMA-Factory/examples/merge_lora/llama3_lora_sft.yaml
+14
-0
LLaMA-Factory/examples/merge_lora/qwen2vl_lora_sft.yaml
LLaMA-Factory/examples/merge_lora/qwen2vl_lora_sft.yaml
+14
-0
LLaMA-Factory/examples/train_full/llama3_full_sft.yaml
LLaMA-Factory/examples/train_full/llama3_full_sft.yaml
+40
-0
LLaMA-Factory/examples/train_full/qwen2vl_full_sft.yaml
LLaMA-Factory/examples/train_full/qwen2vl_full_sft.yaml
+42
-0
LLaMA-Factory/examples/train_lora/llama3_lora_dpo.yaml
LLaMA-Factory/examples/train_lora/llama3_lora_dpo.yaml
+42
-0
LLaMA-Factory/examples/train_lora/llama3_lora_eval.yaml
LLaMA-Factory/examples/train_lora/llama3_lora_eval.yaml
+19
-0
LLaMA-Factory/examples/train_lora/llama3_lora_kto.yaml
LLaMA-Factory/examples/train_lora/llama3_lora_kto.yaml
+41
-0
LLaMA-Factory/examples/train_lora/llama3_lora_ppo.yaml
LLaMA-Factory/examples/train_lora/llama3_lora_ppo.yaml
+40
-0
LLaMA-Factory/examples/train_lora/llama3_lora_pretrain.yaml
LLaMA-Factory/examples/train_lora/llama3_lora_pretrain.yaml
+39
-0
No files found.
LLaMA-Factory/examples/extras/nlg_eval/llama3_lora_predict.yaml
0 → 100644
View file @
53b3977b
# The batch generation can be SLOW using this config.
# For faster inference, we recommend to use `scripts/vllm_infer.py`.
### model
model_name_or_path
:
meta-llama/Meta-Llama-3-8B-Instruct
adapter_name_or_path
:
saves/llama3-8b/lora/sft
trust_remote_code
:
true
### method
stage
:
sft
do_predict
:
true
finetuning_type
:
lora
### dataset
eval_dataset
:
identity,alpaca_en_demo
template
:
llama3
cutoff_len
:
2048
max_samples
:
50
overwrite_cache
:
true
preprocessing_num_workers
:
16
### output
output_dir
:
saves/llama3-8b/lora/predict
overwrite_output_dir
:
true
### eval
per_device_eval_batch_size
:
1
predict_with_generate
:
true
ddp_timeout
:
180000000
LLaMA-Factory/examples/extras/pissa/init.sh
0 → 100644
View file @
53b3977b
#!/bin/bash
python scripts/pissa_init.py
\
--model_name_or_path
meta-llama/Meta-Llama-3-8B-Instruct
\
--output_dir
models/llama3-8b-pissa
LLaMA-Factory/examples/extras/pissa/llama3_lora_sft.yaml
0 → 100644
View file @
53b3977b
### model
model_name_or_path
:
meta-llama/Meta-Llama-3-8B-Instruct
trust_remote_code
:
true
### method
stage
:
sft
do_train
:
true
finetuning_type
:
lora
lora_target
:
all
pissa_init
:
true
pissa_iter
:
16
pissa_convert
:
true
### dataset
dataset
:
identity,alpaca_en_demo
template
:
llama3
cutoff_len
:
2048
max_samples
:
1000
overwrite_cache
:
true
preprocessing_num_workers
:
16
### output
output_dir
:
saves/llama3-8b/lora/sft
logging_steps
:
10
save_steps
:
500
plot_loss
:
true
overwrite_output_dir
:
true
### train
per_device_train_batch_size
:
1
gradient_accumulation_steps
:
8
learning_rate
:
1.0e-4
num_train_epochs
:
3.0
lr_scheduler_type
:
cosine
warmup_ratio
:
0.1
bf16
:
true
ddp_timeout
:
180000000
### eval
val_size
:
0.1
per_device_eval_batch_size
:
1
eval_strategy
:
steps
eval_steps
:
500
LLaMA-Factory/examples/inference/llama3.yaml
0 → 100644
View file @
53b3977b
model_name_or_path
:
meta-llama/Meta-Llama-3-8B-Instruct
template
:
llama3
infer_backend
:
huggingface
# choices: [huggingface, vllm]
trust_remote_code
:
true
LLaMA-Factory/examples/inference/llama3_full_sft.yaml
0 → 100644
View file @
53b3977b
model_name_or_path
:
saves/llama3-8b/full/sft
template
:
llama3
infer_backend
:
huggingface
# choices: [huggingface, vllm]
trust_remote_code
:
true
LLaMA-Factory/examples/inference/llama3_lora_sft.yaml
0 → 100644
View file @
53b3977b
model_name_or_path
:
meta-llama/Meta-Llama-3-8B-Instruct
adapter_name_or_path
:
saves/llama3-8b/lora/sft
template
:
llama3
infer_backend
:
huggingface
# choices: [huggingface, vllm]
trust_remote_code
:
true
LLaMA-Factory/examples/inference/llama3_vllm.yaml
0 → 100644
View file @
53b3977b
model_name_or_path
:
meta-llama/Meta-Llama-3-8B-Instruct
template
:
llama3
infer_backend
:
vllm
vllm_enforce_eager
:
true
trust_remote_code
:
true
LLaMA-Factory/examples/inference/llava1_5.yaml
0 → 100644
View file @
53b3977b
model_name_or_path
:
llava-hf/llava-1.5-7b-hf
template
:
llava
infer_backend
:
huggingface
# choices: [huggingface, vllm]
trust_remote_code
:
true
LLaMA-Factory/examples/inference/qwen2_coder_custom.yaml
0 → 100644
View file @
53b3977b
model_name_or_path
:
Qwen/Qwen2.5-Coder-32B-Instruct
template
:
qwen
infer_backend
:
huggingface
# choices: [huggingface, vllm]
trust_remote_code
:
true
LLaMA-Factory/examples/inference/qwen2_vl.yaml
0 → 100644
View file @
53b3977b
model_name_or_path
:
Qwen/Qwen2-VL-7B-Instruct
template
:
qwen2_vl
infer_backend
:
huggingface
# choices: [huggingface, vllm]
trust_remote_code
:
true
LLaMA-Factory/examples/merge_lora/llama3_gptq.yaml
0 → 100644
View file @
53b3977b
### model
model_name_or_path
:
meta-llama/Meta-Llama-3-8B-Instruct
template
:
llama3
trust_remote_code
:
true
### export
export_dir
:
models/llama3_gptq
export_quantization_bit
:
4
export_quantization_dataset
:
data/c4_demo.json
export_size
:
2
export_device
:
cpu
export_legacy_format
:
false
LLaMA-Factory/examples/merge_lora/llama3_lora_sft.yaml
0 → 100644
View file @
53b3977b
### Note: DO NOT use quantized model or quantization_bit when merging lora adapters
### model
model_name_or_path
:
meta-llama/Meta-Llama-3-8B-Instruct
adapter_name_or_path
:
saves/llama3-8b/lora/sft
template
:
llama3
finetuning_type
:
lora
trust_remote_code
:
true
### export
export_dir
:
models/llama3_lora_sft
export_size
:
2
export_device
:
cpu
export_legacy_format
:
false
LLaMA-Factory/examples/merge_lora/qwen2vl_lora_sft.yaml
0 → 100644
View file @
53b3977b
### Note: DO NOT use quantized model or quantization_bit when merging lora adapters
### model
model_name_or_path
:
Qwen/Qwen2-VL-7B-Instruct
adapter_name_or_path
:
saves/qwen2_vl-7b/lora/sft
template
:
qwen2_vl
finetuning_type
:
lora
trust_remote_code
:
true
### export
export_dir
:
models/qwen2_vl_lora_sft
export_size
:
2
export_device
:
cpu
export_legacy_format
:
false
LLaMA-Factory/examples/train_full/llama3_full_sft.yaml
0 → 100644
View file @
53b3977b
### model
model_name_or_path
:
meta-llama/Meta-Llama-3-8B-Instruct
trust_remote_code
:
true
### method
stage
:
sft
do_train
:
true
finetuning_type
:
full
deepspeed
:
examples/deepspeed/ds_z3_config.json
# choices: [ds_z0_config.json, ds_z2_config.json, ds_z3_config.json]
### dataset
dataset
:
identity,alpaca_en_demo
template
:
llama3
cutoff_len
:
2048
max_samples
:
1000
overwrite_cache
:
true
preprocessing_num_workers
:
16
### output
output_dir
:
saves/llama3-8b/full/sft
logging_steps
:
10
save_steps
:
500
plot_loss
:
true
overwrite_output_dir
:
true
### train
per_device_train_batch_size
:
1
gradient_accumulation_steps
:
2
learning_rate
:
1.0e-5
num_train_epochs
:
3.0
lr_scheduler_type
:
cosine
warmup_ratio
:
0.1
bf16
:
true
ddp_timeout
:
180000000
### eval
val_size
:
0.1
per_device_eval_batch_size
:
1
eval_strategy
:
steps
eval_steps
:
500
LLaMA-Factory/examples/train_full/qwen2vl_full_sft.yaml
0 → 100644
View file @
53b3977b
### model
model_name_or_path
:
Qwen/Qwen2-VL-7B-Instruct
trust_remote_code
:
true
### method
stage
:
sft
do_train
:
true
finetuning_type
:
full
freeze_vision_tower
:
true
# choices: [true, false]
train_mm_proj_only
:
false
# choices: [true, false]
deepspeed
:
examples/deepspeed/ds_z3_config.json
# choices: [ds_z0_config.json, ds_z2_config.json, ds_z3_config.json]
### dataset
dataset
:
mllm_demo,identity,alpaca_en_demo
template
:
qwen2_vl
cutoff_len
:
2048
max_samples
:
1000
overwrite_cache
:
true
preprocessing_num_workers
:
16
### output
output_dir
:
saves/qwen2_vl-7b/full/sft
logging_steps
:
10
save_steps
:
500
plot_loss
:
true
overwrite_output_dir
:
true
### train
per_device_train_batch_size
:
1
gradient_accumulation_steps
:
2
learning_rate
:
1.0e-5
num_train_epochs
:
30.0
lr_scheduler_type
:
cosine
warmup_ratio
:
0.1
bf16
:
true
ddp_timeout
:
180000000
### eval
val_size
:
0.1
per_device_eval_batch_size
:
1
eval_strategy
:
steps
eval_steps
:
500
LLaMA-Factory/examples/train_lora/llama3_lora_dpo.yaml
0 → 100644
View file @
53b3977b
### model
model_name_or_path
:
meta-llama/Meta-Llama-3-8B-Instruct
trust_remote_code
:
true
### method
stage
:
dpo
do_train
:
true
finetuning_type
:
lora
lora_target
:
all
pref_beta
:
0.1
pref_loss
:
sigmoid
# choices: [sigmoid (dpo), orpo, simpo]
### dataset
dataset
:
dpo_en_demo
template
:
llama3
cutoff_len
:
2048
max_samples
:
1000
overwrite_cache
:
true
preprocessing_num_workers
:
16
### output
output_dir
:
saves/llama3-8b/lora/dpo
logging_steps
:
10
save_steps
:
500
plot_loss
:
true
overwrite_output_dir
:
true
### train
per_device_train_batch_size
:
1
gradient_accumulation_steps
:
8
learning_rate
:
5.0e-6
num_train_epochs
:
3.0
lr_scheduler_type
:
cosine
warmup_ratio
:
0.1
bf16
:
true
ddp_timeout
:
180000000
### eval
val_size
:
0.1
per_device_eval_batch_size
:
1
eval_strategy
:
steps
eval_steps
:
500
LLaMA-Factory/examples/train_lora/llama3_lora_eval.yaml
0 → 100644
View file @
53b3977b
### model
model_name_or_path
:
meta-llama/Meta-Llama-3-8B-Instruct
adapter_name_or_path
:
saves/llama3-8b/lora/sft
trust_remote_code
:
true
### method
finetuning_type
:
lora
### dataset
task
:
mmlu_test
# choices: [mmlu_test, ceval_validation, cmmlu_test]
template
:
fewshot
lang
:
en
n_shot
:
5
### output
save_dir
:
saves/llama3-8b/lora/eval
### eval
batch_size
:
4
LLaMA-Factory/examples/train_lora/llama3_lora_kto.yaml
0 → 100644
View file @
53b3977b
### model
model_name_or_path
:
meta-llama/Meta-Llama-3-8B-Instruct
trust_remote_code
:
true
### method
stage
:
kto
do_train
:
true
finetuning_type
:
lora
lora_target
:
all
pref_beta
:
0.1
### dataset
dataset
:
kto_en_demo
template
:
llama3
cutoff_len
:
2048
max_samples
:
1000
overwrite_cache
:
true
preprocessing_num_workers
:
16
### output
output_dir
:
saves/llama3-8b/lora/kto
logging_steps
:
10
save_steps
:
500
plot_loss
:
true
overwrite_output_dir
:
true
### train
per_device_train_batch_size
:
1
gradient_accumulation_steps
:
8
learning_rate
:
5.0e-6
num_train_epochs
:
3.0
lr_scheduler_type
:
cosine
warmup_ratio
:
0.1
bf16
:
true
ddp_timeout
:
180000000
### eval
val_size
:
0.1
per_device_eval_batch_size
:
1
eval_strategy
:
steps
eval_steps
:
500
LLaMA-Factory/examples/train_lora/llama3_lora_ppo.yaml
0 → 100644
View file @
53b3977b
### model
model_name_or_path
:
meta-llama/Meta-Llama-3-8B-Instruct
reward_model
:
saves/llama3-8b/lora/reward
trust_remote_code
:
true
### method
stage
:
ppo
do_train
:
true
finetuning_type
:
lora
lora_target
:
all
### dataset
dataset
:
identity,alpaca_en_demo
template
:
llama3
cutoff_len
:
2048
max_samples
:
1000
overwrite_cache
:
true
preprocessing_num_workers
:
16
### output
output_dir
:
saves/llama3-8b/lora/ppo
logging_steps
:
10
save_steps
:
500
plot_loss
:
true
overwrite_output_dir
:
true
### train
per_device_train_batch_size
:
1
gradient_accumulation_steps
:
8
learning_rate
:
1.0e-5
num_train_epochs
:
3.0
lr_scheduler_type
:
cosine
warmup_ratio
:
0.1
bf16
:
true
ddp_timeout
:
180000000
### generate
max_new_tokens
:
512
top_k
:
0
top_p
:
0.9
LLaMA-Factory/examples/train_lora/llama3_lora_pretrain.yaml
0 → 100644
View file @
53b3977b
### model
model_name_or_path
:
meta-llama/Meta-Llama-3-8B-Instruct
trust_remote_code
:
true
### method
stage
:
pt
do_train
:
true
finetuning_type
:
lora
lora_target
:
all
### dataset
dataset
:
c4_demo
cutoff_len
:
2048
max_samples
:
1000
overwrite_cache
:
true
preprocessing_num_workers
:
16
### output
output_dir
:
saves/llama3-8b/lora/pretrain
logging_steps
:
10
save_steps
:
500
plot_loss
:
true
overwrite_output_dir
:
true
### train
per_device_train_batch_size
:
1
gradient_accumulation_steps
:
8
learning_rate
:
1.0e-4
num_train_epochs
:
3.0
lr_scheduler_type
:
cosine
warmup_ratio
:
0.1
bf16
:
true
ddp_timeout
:
180000000
### eval
val_size
:
0.1
per_device_eval_batch_size
:
1
eval_strategy
:
steps
eval_steps
:
500
Prev
1
2
3
4
5
6
7
8
9
…
18
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment