Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
jerrrrry
infinilm
Commits
f2d9d397
Unverified
Commit
f2d9d397
authored
Feb 04, 2026
by
gongchensu
Committed by
GitHub
Feb 04, 2026
Browse files
Merge pull request #172 from gongchensu/Issue/170
Issue/170 - Add HYGON support and improve device type handling.
parents
d21a4f59
ed33c3a9
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
23 additions
and
7 deletions
+23
-7
examples/jiuge.py
examples/jiuge.py
+8
-1
test/bench/test_benchmark.py
test/bench/test_benchmark.py
+15
-6
No files found.
examples/jiuge.py
View file @
f2d9d397
...
...
@@ -47,6 +47,11 @@ def get_args():
action
=
"store_true"
,
help
=
"Run cambricon test"
,
)
parser
.
add_argument
(
"--hygon"
,
action
=
"store_true"
,
help
=
"Run hygon test"
,
)
parser
.
add_argument
(
"--model_path"
,
type
=
str
,
...
...
@@ -245,9 +250,11 @@ if __name__ == "__main__":
device_str
=
"cuda"
elif
args
.
cambricon
:
device_str
=
"mlu"
elif
args
.
hygon
:
device_str
=
"cuda"
else
:
print
(
"Usage: python examples/jiuge.py [--cpu | --nvidia | --metax | --moore | --iluvatar] --model_path=<path/to/model_dir>
\n
"
"Usage: python examples/jiuge.py [--cpu | --nvidia | --metax | --moore | --iluvatar
| --cambricon | --hygon
] --model_path=<path/to/model_dir>
\n
"
"such as, python examples/jiuge.py --nvidia --model_path=~/TinyLlama-1.1B-Chat-v1.0"
)
sys
.
exit
(
1
)
...
...
test/bench/test_benchmark.py
View file @
f2d9d397
...
...
@@ -62,16 +62,18 @@ class InfiniLMBenchmark(BaseBenchmark):
self
.
benchmark
=
benchmark
# Map device type string to infinicore device
# Note: These map to the Python device type strings used by infinicore.device()
# which correspond to _TORCH_DEVICE_MAP values in InfiniCore/python/infinicore/device.py
device_map
=
{
"cpu"
:
"cpu"
,
"nvidia"
:
"cuda"
,
"cambricon"
:
"mlu"
,
"ascend"
:
"
ascend
"
,
"metax"
:
"
metax
"
,
"moore"
:
"m
oore
"
,
"iluvatar"
:
"
iluvatar
"
,
"kunlun"
:
"
kunlun
"
,
"hygon"
:
"
hygon
"
,
"ascend"
:
"
npu
"
,
"metax"
:
"
cuda
"
,
"moore"
:
"m
usa
"
,
"iluvatar"
:
"
cuda
"
,
"kunlun"
:
"
cuda
"
,
"hygon"
:
"
cuda
"
,
}
device_name
=
device_map
.
get
(
device_type_str
.
lower
(),
"cpu"
)
...
...
@@ -180,6 +182,13 @@ class InfiniLMBenchmark(BaseBenchmark):
start_time
=
time
.
perf_counter
()
# For cpp backend, reset cache before generation if use_cache is enabled
if
self
.
model
.
use_cache
and
hasattr
(
self
.
model
,
"_model"
)
and
hasattr
(
self
.
model
.
_model
,
"reset_cache"
):
batch_size
=
input_ids
.
shape
[
0
]
seq_len
=
input_ids
.
shape
[
1
]
max_cache_len
=
max_steps
+
seq_len
self
.
model
.
reset_cache
(
batch_size
=
batch_size
,
initial_capacity
=
max_cache_len
)
# Use model's built-in generate() method which properly handles KV cache
# Pass sampling parameters (temperature, topk, topp) via kwargs
output_ids
=
self
.
model
.
generate
(
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment