Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
jerrrrry
infinicore
Commits
abab5652
Unverified
Commit
abab5652
authored
Feb 04, 2026
by
thatPepe
Committed by
GitHub
Feb 04, 2026
Browse files
Merge pull request #999 from InfiniTensor/issue/988
issue/988 - adapt to ali ppu
parents
bf0c825d
e0268b24
Changes
65
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
170 additions
and
0 deletions
+170
-0
test/infinicore/test.py
test/infinicore/test.py
+1
-0
test/infiniop/libinfiniop/devices.py
test/infiniop/libinfiniop/devices.py
+3
-0
test/infiniop/libinfiniop/utils.py
test/infiniop/libinfiniop/utils.py
+10
-0
xmake.lua
xmake.lua
+21
-0
xmake/ali.lua
xmake/ali.lua
+135
-0
No files found.
test/infinicore/test.py
View file @
abab5652
...
...
@@ -183,6 +183,7 @@ def func6_initialize_device_relationship():
_infinicore
.
Device
.
Type
.
QY
,
# 9 "cuda"
_infinicore
.
Device
.
Type
.
KUNLUN
,
# 7 "cuda"
_infinicore
.
Device
.
Type
.
HYGON
,
# 8 "cuda"
_infinicore
.
Device
.
Type
.
ALI
,
# 10 "cuda"
]
if
True
:
print
(
"
\n
---------- 测试 CPU"
)
...
...
test/infiniop/libinfiniop/devices.py
View file @
abab5652
...
...
@@ -9,6 +9,7 @@ class InfiniDeviceEnum:
KUNLUN
=
7
HYGON
=
8
QY
=
9
ALI
=
10
InfiniDeviceNames
=
{
...
...
@@ -22,6 +23,7 @@ InfiniDeviceNames = {
InfiniDeviceEnum
.
KUNLUN
:
"Kunlun"
,
InfiniDeviceEnum
.
HYGON
:
"Hygon"
,
InfiniDeviceEnum
.
QY
:
"QY"
,
InfiniDeviceEnum
.
ALI
:
"Ali"
,
}
# Mapping that maps InfiniDeviceEnum to torch device string
...
...
@@ -36,4 +38,5 @@ torch_device_map = {
InfiniDeviceEnum
.
KUNLUN
:
"cuda"
,
InfiniDeviceEnum
.
HYGON
:
"cuda"
,
InfiniDeviceEnum
.
QY
:
"cuda"
,
InfiniDeviceEnum
.
ALI
:
"cuda"
,
}
test/infiniop/libinfiniop/utils.py
View file @
abab5652
...
...
@@ -433,6 +433,11 @@ def get_args():
action
=
"store_true"
,
help
=
"Run HYGON DCU test"
,
)
parser
.
add_argument
(
"--ali"
,
action
=
"store_true"
,
help
=
"Run ALI PPU test"
,
)
return
parser
.
parse_args
()
...
...
@@ -487,6 +492,7 @@ def filter_tensor_dtypes_by_device(device, tensor_dtypes):
InfiniDeviceEnum
.
ASCEND
,
InfiniDeviceEnum
.
ILUVATAR
,
InfiniDeviceEnum
.
CAMBRICON
,
InfiniDeviceEnum
.
ALI
,
):
return
tensor_dtypes
else
:
...
...
@@ -757,6 +763,10 @@ def get_test_devices(args):
import
torch
devices_to_test
.
append
(
InfiniDeviceEnum
.
HYGON
)
if
args
.
ali
:
import
torch
devices_to_test
.
append
(
InfiniDeviceEnum
.
ALI
)
if
not
devices_to_test
:
devices_to_test
=
[
InfiniDeviceEnum
.
CPU
]
...
...
xmake.lua
View file @
abab5652
...
...
@@ -125,6 +125,18 @@ if has_config("iluvatar-gpu") then
includes
(
"xmake/iluvatar.lua"
)
end
-- ali
option
(
"ali-ppu"
)
set_default
(
false
)
set_showmenu
(
true
)
set_description
(
"Whether to compile implementations for Ali PPU"
)
option_end
()
if
has_config
(
"ali-ppu"
)
then
add_defines
(
"ENABLE_ALI_API"
)
includes
(
"xmake/ali.lua"
)
end
-- qy
option
(
"qy-gpu"
)
set_default
(
false
)
...
...
@@ -276,6 +288,9 @@ target("infinirt")
if
has_config
(
"iluvatar-gpu"
)
then
add_deps
(
"infinirt-iluvatar"
)
end
if
has_config
(
"ali-ppu"
)
then
add_deps
(
"infinirt-ali"
)
end
if
has_config
(
"qy-gpu"
)
then
add_deps
(
"infinirt-qy"
)
add_files
(
"build/.objs/infinirt-qy/rules/qy.cuda/src/infinirt/cuda/*.cu.o"
,
{
public
=
true
})
...
...
@@ -309,6 +324,9 @@ target("infiniop")
if
has_config
(
"iluvatar-gpu"
)
then
add_deps
(
"infiniop-iluvatar"
)
end
if
has_config
(
"ali-ppu"
)
then
add_deps
(
"infiniop-ali"
)
end
if
has_config
(
"qy-gpu"
)
then
add_deps
(
"infiniop-qy"
)
add_files
(
"build/.objs/infiniop-qy/rules/qy.cuda/src/infiniop/ops/*/nvidia/*.cu.o"
,
{
public
=
true
})
...
...
@@ -364,6 +382,9 @@ target("infiniccl")
if
has_config
(
"iluvatar-gpu"
)
then
add_deps
(
"infiniccl-iluvatar"
)
end
if
has_config
(
"ali-ppu"
)
then
add_deps
(
"infiniccl-ali"
)
end
if
has_config
(
"qy-gpu"
)
then
add_deps
(
"infiniccl-qy"
)
add_files
(
"build/.objs/infiniccl-qy/rules/qy.cuda/src/infiniccl/cuda/*.cu.o"
,
{
public
=
true
})
...
...
xmake/ali.lua
0 → 100644
View file @
abab5652
local
CUDNN_ROOT
=
os.getenv
(
"CUDNN_ROOT"
)
or
os.getenv
(
"CUDNN_HOME"
)
or
os.getenv
(
"CUDNN_PATH"
)
if
CUDNN_ROOT
~=
nil
then
add_includedirs
(
CUDNN_ROOT
..
"/include"
)
end
local
CUTLASS_ROOT
=
os.getenv
(
"CUTLASS_ROOT"
)
or
os.getenv
(
"CUTLASS_HOME"
)
or
os.getenv
(
"CUTLASS_PATH"
)
if
CUTLASS_ROOT
~=
nil
then
add_includedirs
(
CUTLASS_ROOT
)
end
target
(
"infiniop-ali"
)
set_kind
(
"static"
)
add_deps
(
"infini-utils"
)
on_install
(
function
(
target
)
end
)
set_policy
(
"build.cuda.devlink"
,
true
)
set_toolchains
(
"cuda"
)
add_links
(
"cudart"
,
"cublas"
)
if
has_config
(
"cudnn"
)
then
add_links
(
"cudnn"
)
end
on_load
(
function
(
target
)
import
(
"lib.detect.find_tool"
)
local
nvcc
=
find_tool
(
"nvcc"
)
if
nvcc
~=
nil
then
if
is_plat
(
"windows"
)
then
nvcc_path
=
os
.
iorun
(
"where nvcc"
):
match
(
"(.-)\r?\n"
)
else
nvcc_path
=
nvcc
.
program
end
target
:
add
(
"linkdirs"
,
path
.
directory
(
path
.
directory
(
nvcc_path
))
..
"/lib64/stubs"
)
target
:
add
(
"links"
,
"cuda"
)
end
end
)
if
is_plat
(
"windows"
)
then
add_cuflags
(
"-Xcompiler=/utf-8"
,
"--expt-relaxed-constexpr"
,
"--allow-unsupported-compiler"
)
add_cuflags
(
"-Xcompiler=/W3"
,
"-Xcompiler=/WX"
)
add_cxxflags
(
"/FS"
)
if
CUDNN_ROOT
~=
nil
then
add_linkdirs
(
CUDNN_ROOT
..
"
\\
lib\\x64"
)
end
else
add_cuflags
(
"-Xcompiler=-Wall"
,
"-Xcompiler=-Werror"
)
add_cuflags
(
"-Xcompiler=-fPIC"
)
add_cuflags
(
"--extended-lambda"
)
add_culdflags
(
"-Xcompiler=-fPIC"
)
add_cxflags
(
"-fPIC"
)
add_cxxflags
(
"-fPIC"
)
add_cflags
(
"-fPIC"
)
add_cuflags
(
"--expt-relaxed-constexpr"
)
if
CUDNN_ROOT
~=
nil
then
add_linkdirs
(
CUDNN_ROOT
..
"/lib"
)
end
end
add_cuflags
(
"-Xcompiler=-Wno-error=deprecated-declarations"
,
"-Xcompiler=-Wno-error=unused-function"
)
local
arch_opt
=
get_config
(
"cuda_arch"
)
if
arch_opt
and
type
(
arch_opt
)
==
"string"
then
for
_
,
arch
in
ipairs
(
arch_opt
:
split
(
","
))
do
arch
=
arch
:
trim
()
local
compute
=
arch
:
gsub
(
"sm_"
,
"compute_"
)
add_cuflags
(
"-gencode=arch="
..
compute
..
",code="
..
arch
)
end
else
add_cugencodes
(
"native"
)
end
set_languages
(
"cxx17"
)
add_files
(
"../src/infiniop/devices/nvidia/*.cu"
,
"../src/infiniop/ops/*/nvidia/*.cu"
)
if
has_config
(
"ninetoothed"
)
then
add_files
(
"../build/ninetoothed/*.c"
,
"../build/ninetoothed/*.cpp"
)
end
target_end
()
target
(
"infinirt-ali"
)
set_kind
(
"static"
)
add_deps
(
"infini-utils"
)
on_install
(
function
(
target
)
end
)
set_policy
(
"build.cuda.devlink"
,
true
)
set_toolchains
(
"cuda"
)
add_links
(
"cudart"
)
if
is_plat
(
"windows"
)
then
add_cuflags
(
"-Xcompiler=/utf-8"
,
"--expt-relaxed-constexpr"
,
"--allow-unsupported-compiler"
)
add_cxxflags
(
"/FS"
)
else
add_cuflags
(
"-Xcompiler=-fPIC"
,
"-Xcompiler=-shared"
)
add_culdflags
(
"-Xcompiler=-fPIC"
,
"-Xcompiler=-shared"
)
add_cxflags
(
"-fPIC"
,
"-shared"
)
add_cxxflags
(
"-fPIC"
,
"-shared"
)
add_shflags
(
"-fPIC"
)
end
set_languages
(
"cxx17"
)
add_files
(
"../src/infinirt/cuda/*.cu"
)
target_end
()
target
(
"infiniccl-ali"
)
set_kind
(
"static"
)
add_deps
(
"infinirt"
)
on_install
(
function
(
target
)
end
)
if
has_config
(
"ccl"
)
then
set_policy
(
"build.cuda.devlink"
,
true
)
set_toolchains
(
"cuda"
)
add_links
(
"cudart"
)
if
not
is_plat
(
"windows"
)
then
add_cuflags
(
"-Xcompiler=-fPIC"
)
add_culdflags
(
"-Xcompiler=-fPIC"
)
add_cxflags
(
"-fPIC"
)
add_cxxflags
(
"-fPIC"
)
local
nccl_root
=
os.getenv
(
"NCCL_ROOT"
)
if
nccl_root
then
add_includedirs
(
nccl_root
..
"/include"
)
add_links
(
nccl_root
..
"/lib/libnccl.so"
)
else
add_links
(
"nccl"
)
-- Fall back to default nccl linking
end
add_files
(
"../src/infiniccl/cuda/*.cu"
)
else
print
(
"[Warning] NCCL is not supported on Windows"
)
end
end
set_languages
(
"cxx17"
)
target_end
()
Prev
1
2
3
4
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment