Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
LLaMA-Factory
Commits
317a82e2
"tests/data/processor/test_feedback.py" did not exist on "e92143e3fc53b0cef786700b2cceab5564a3367f"
Commit
317a82e2
authored
Mar 07, 2025
by
chenych
Browse files
Add QWQ-32B
parent
37b0ad9f
Changes
255
Hide whitespace changes
Inline
Side-by-side
Showing
15 changed files
with
58 additions
and
106 deletions
+58
-106
tests/e2e/test_train.py
tests/e2e/test_train.py
+1
-1
tests/eval/test_eval_template.py
tests/eval/test_eval_template.py
+1
-1
tests/model/model_utils/test_attention.py
tests/model/model_utils/test_attention.py
+4
-1
tests/model/model_utils/test_checkpointing.py
tests/model/model_utils/test_checkpointing.py
+1
-1
tests/model/model_utils/test_misc.py
tests/model/model_utils/test_misc.py
+43
-0
tests/model/model_utils/test_packing.py
tests/model/model_utils/test_packing.py
+1
-1
tests/model/model_utils/test_visual.py
tests/model/model_utils/test_visual.py
+1
-1
tests/model/test_base.py
tests/model/test_base.py
+1
-1
tests/model/test_freeze.py
tests/model/test_freeze.py
+1
-1
tests/model/test_full.py
tests/model/test_full.py
+1
-1
tests/model/test_lora.py
tests/model/test_lora.py
+1
-1
tests/model/test_pissa.py
tests/model/test_pissa.py
+1
-1
tests/test_throughput.py
tests/test_throughput.py
+0
-30
tests/test_toolcall.py
tests/test_toolcall.py
+0
-64
tests/train/test_sft_trainer.py
tests/train/test_sft_trainer.py
+1
-1
No files found.
tests/e2e/test_train.py
View file @
317a82e2
# Copyright 202
4
the LlamaFactory team.
# Copyright 202
5
the LlamaFactory team.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# you may not use this file except in compliance with the License.
...
...
tests/eval/test_eval_template.py
View file @
317a82e2
# Copyright 202
4
the LlamaFactory team.
# Copyright 202
5
the LlamaFactory team.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# you may not use this file except in compliance with the License.
...
...
tests/model/model_utils/test_attention.py
View file @
317a82e2
# Copyright 202
4
the LlamaFactory team.
# Copyright 202
5
the LlamaFactory team.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# you may not use this file except in compliance with the License.
...
@@ -14,8 +14,10 @@
...
@@ -14,8 +14,10 @@
import
os
import
os
import
pytest
from
transformers.utils
import
is_flash_attn_2_available
,
is_torch_sdpa_available
from
transformers.utils
import
is_flash_attn_2_available
,
is_torch_sdpa_available
from
llamafactory.extras.packages
import
is_transformers_version_greater_than
from
llamafactory.train.test_utils
import
load_infer_model
from
llamafactory.train.test_utils
import
load_infer_model
...
@@ -27,6 +29,7 @@ INFER_ARGS = {
...
@@ -27,6 +29,7 @@ INFER_ARGS = {
}
}
@
pytest
.
mark
.
xfail
(
is_transformers_version_greater_than
(
"4.48"
),
reason
=
"Attention refactor."
)
def
test_attention
():
def
test_attention
():
attention_available
=
[
"disabled"
]
attention_available
=
[
"disabled"
]
if
is_torch_sdpa_available
():
if
is_torch_sdpa_available
():
...
...
tests/model/model_utils/test_checkpointing.py
View file @
317a82e2
# Copyright 202
4
the LlamaFactory team.
# Copyright 202
5
the LlamaFactory team.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# you may not use this file except in compliance with the License.
...
...
tests/
data/test_processor
.py
→
tests/
model/model_utils/test_misc
.py
View file @
317a82e2
# Copyright 202
4
the LlamaFactory team.
# Copyright 202
5
the LlamaFactory team.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# you may not use this file except in compliance with the License.
...
@@ -12,24 +12,32 @@
...
@@ -12,24 +12,32 @@
# See the License for the specific language governing permissions and
# See the License for the specific language governing permissions and
# limitations under the License.
# limitations under the License.
from
typing
import
Tuple
import
os
import
pytest
import
pytest
import
torch
from
transformers
import
AutoConfig
,
AutoModelForCausalLM
from
llamafactory.data.processors.processor_utils
import
infer_seqlen
from
llamafactory.model.model_utils.misc
import
find_expanded_modules
@
pytest
.
mark
.
parametrize
(
HF_TOKEN
=
os
.
getenv
(
"HF_TOKEN"
)
"test_input,test_output"
,
[
((
3000
,
2000
,
1000
),
(
600
,
400
)),
@
pytest
.
mark
.
skipif
(
not
HF_TOKEN
,
reason
=
"Gated model."
)
((
2000
,
3000
,
1000
),
(
400
,
600
)),
def
test_expanded_modules
():
((
1000
,
100
,
1000
),
(
900
,
100
)),
config
=
AutoConfig
.
from_pretrained
(
"meta-llama/Meta-Llama-3-8B-Instruct"
)
((
100
,
1000
,
1000
),
(
100
,
900
)),
with
torch
.
device
(
"meta"
):
((
100
,
500
,
1000
),
(
100
,
500
)),
model
=
AutoModelForCausalLM
.
from_config
(
config
)
((
500
,
100
,
1000
),
(
500
,
100
)),
((
10
,
10
,
1000
),
(
10
,
10
)),
expanded_modules
=
find_expanded_modules
(
model
,
[
"q_proj"
,
"v_proj"
],
num_layer_trainable
=
4
)
],
assert
expanded_modules
==
[
)
"model.layers.7.self_attn.q_proj"
,
def
test_infer_seqlen
(
test_input
:
Tuple
[
int
,
int
,
int
],
test_output
:
Tuple
[
int
,
int
]):
"model.layers.7.self_attn.v_proj"
,
assert
test_output
==
infer_seqlen
(
*
test_input
)
"model.layers.15.self_attn.q_proj"
,
"model.layers.15.self_attn.v_proj"
,
"model.layers.23.self_attn.q_proj"
,
"model.layers.23.self_attn.v_proj"
,
"model.layers.31.self_attn.q_proj"
,
"model.layers.31.self_attn.v_proj"
,
]
tests/model/model_utils/test_packing.py
View file @
317a82e2
# Copyright 202
4
the LlamaFactory team.
# Copyright 202
5
the LlamaFactory team.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# you may not use this file except in compliance with the License.
...
...
tests/model/model_utils/test_visual.py
View file @
317a82e2
# Copyright 202
4
the LlamaFactory team.
# Copyright 202
5
the LlamaFactory team.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# you may not use this file except in compliance with the License.
...
...
tests/model/test_base.py
View file @
317a82e2
# Copyright 202
4
the LlamaFactory team.
# Copyright 202
5
the LlamaFactory team.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# you may not use this file except in compliance with the License.
...
...
tests/model/test_freeze.py
View file @
317a82e2
# Copyright 202
4
the LlamaFactory team.
# Copyright 202
5
the LlamaFactory team.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# you may not use this file except in compliance with the License.
...
...
tests/model/test_full.py
View file @
317a82e2
# Copyright 202
4
the LlamaFactory team.
# Copyright 202
5
the LlamaFactory team.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# you may not use this file except in compliance with the License.
...
...
tests/model/test_lora.py
View file @
317a82e2
# Copyright 202
4
the LlamaFactory team.
# Copyright 202
5
the LlamaFactory team.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# you may not use this file except in compliance with the License.
...
...
tests/model/test_pissa.py
View file @
317a82e2
# Copyright 202
4
the LlamaFactory team.
# Copyright 202
5
the LlamaFactory team.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# you may not use this file except in compliance with the License.
...
...
tests/test_throughput.py
deleted
100644 → 0
View file @
37b0ad9f
import
os
import
time
from
openai
import
OpenAI
from
transformers.utils.versions
import
require_version
require_version
(
"openai>=1.5.0"
,
"To fix: pip install openai>=1.5.0"
)
def
main
():
client
=
OpenAI
(
api_key
=
"0"
,
base_url
=
"http://localhost:{}/v1"
.
format
(
os
.
environ
.
get
(
"API_PORT"
,
8000
)),
)
messages
=
[{
"role"
:
"user"
,
"content"
:
"Write a long essay about environment protection as long as possible."
}]
num_tokens
=
0
start_time
=
time
.
time
()
for
_
in
range
(
8
):
result
=
client
.
chat
.
completions
.
create
(
messages
=
messages
,
model
=
"test"
)
num_tokens
+=
result
.
usage
.
completion_tokens
elapsed_time
=
time
.
time
()
-
start_time
print
(
"Throughput: {:.2f} tokens/s"
.
format
(
num_tokens
/
elapsed_time
))
# --infer_backend hf: 27.22 tokens/s (1.0x)
# --infer_backend vllm: 73.03 tokens/s (2.7x)
if
__name__
==
"__main__"
:
main
()
tests/test_toolcall.py
deleted
100644 → 0
View file @
37b0ad9f
import
json
import
os
from
typing
import
Sequence
from
openai
import
OpenAI
from
transformers.utils.versions
import
require_version
require_version
(
"openai>=1.5.0"
,
"To fix: pip install openai>=1.5.0"
)
def
calculate_gpa
(
grades
:
Sequence
[
str
],
hours
:
Sequence
[
int
])
->
float
:
grade_to_score
=
{
"A"
:
4
,
"B"
:
3
,
"C"
:
2
}
total_score
,
total_hour
=
0
,
0
for
grade
,
hour
in
zip
(
grades
,
hours
):
total_score
+=
grade_to_score
[
grade
]
*
hour
total_hour
+=
hour
return
round
(
total_score
/
total_hour
,
2
)
def
main
():
client
=
OpenAI
(
api_key
=
"0"
,
base_url
=
"http://localhost:{}/v1"
.
format
(
os
.
environ
.
get
(
"API_PORT"
,
8000
)),
)
tools
=
[
{
"type"
:
"function"
,
"function"
:
{
"name"
:
"calculate_gpa"
,
"description"
:
"Calculate the Grade Point Average (GPA) based on grades and credit hours"
,
"parameters"
:
{
"type"
:
"object"
,
"properties"
:
{
"grades"
:
{
"type"
:
"array"
,
"items"
:
{
"type"
:
"string"
},
"description"
:
"The grades"
},
"hours"
:
{
"type"
:
"array"
,
"items"
:
{
"type"
:
"integer"
},
"description"
:
"The credit hours"
},
},
"required"
:
[
"grades"
,
"hours"
],
},
},
}
]
tool_map
=
{
"calculate_gpa"
:
calculate_gpa
}
messages
=
[]
messages
.
append
({
"role"
:
"user"
,
"content"
:
"My grades are A, A, B, and C. The credit hours are 3, 4, 3, and 2."
})
result
=
client
.
chat
.
completions
.
create
(
messages
=
messages
,
model
=
"test"
,
tools
=
tools
)
if
result
.
choices
[
0
].
message
.
tool_calls
is
None
:
raise
ValueError
(
"Cannot retrieve function call from the response."
)
messages
.
append
(
result
.
choices
[
0
].
message
)
tool_call
=
result
.
choices
[
0
].
message
.
tool_calls
[
0
].
function
print
(
tool_call
)
# Function(arguments='{"grades": ["A", "A", "B", "C"], "hours": [3, 4, 3, 2]}', name='calculate_gpa')
name
,
arguments
=
tool_call
.
name
,
json
.
loads
(
tool_call
.
arguments
)
tool_result
=
tool_map
[
name
](
**
arguments
)
messages
.
append
({
"role"
:
"tool"
,
"content"
:
json
.
dumps
({
"gpa"
:
tool_result
},
ensure_ascii
=
False
)})
result
=
client
.
chat
.
completions
.
create
(
messages
=
messages
,
model
=
"test"
,
tools
=
tools
)
print
(
result
.
choices
[
0
].
message
.
content
)
# Based on the grades and credit hours you provided, your Grade Point Average (GPA) is 3.42.
if
__name__
==
"__main__"
:
main
()
tests/train/test_sft_trainer.py
View file @
317a82e2
# Copyright 202
4
the LlamaFactory team.
# Copyright 202
5
the LlamaFactory team.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# you may not use this file except in compliance with the License.
...
...
Prev
1
…
9
10
11
12
13
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment