Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
jerrrrry
infinicore
Commits
f1b8ab64
Unverified
Commit
f1b8ab64
authored
Feb 11, 2026
by
thatPepe
Committed by
GitHub
Feb 11, 2026
Browse files
Merge pull request #879 from InfiniTensor/issue/837
issue/837 - support int32 and int64 in cambricon add
parents
84201ad0
cc1d1559
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
8 additions
and
2 deletions
+8
-2
src/infiniop/ops/add/bang/add_bang.mlu
src/infiniop/ops/add/bang/add_bang.mlu
+5
-1
src/infiniop/ops/add/bang/add_bang_internal.mlu
src/infiniop/ops/add/bang/add_bang_internal.mlu
+3
-1
No files found.
src/infiniop/ops/add/bang/add_bang.mlu
View file @
f1b8ab64
...
...
@@ -31,7 +31,7 @@ infiniStatus_t Descriptor::create(
const auto &a_shape = a_desc->shape();
const auto &b_shape = b_desc->shape();
CHECK_DTYPE(dtype, INFINI_DTYPE_F16, INFINI_DTYPE_BF16, INFINI_DTYPE_F32);
CHECK_DTYPE(dtype, INFINI_DTYPE_F16, INFINI_DTYPE_BF16, INFINI_DTYPE_F32
, INFINI_DTYPE_I32, INFINI_DTYPE_I64
);
CHECK_SAME_SHAPE(c_shape, a_shape, b_shape);
...
...
@@ -59,6 +59,10 @@ infiniStatus_t Descriptor::calculate(
return _device_info->calculate<AddOp, bfloat16_t>(_info, workspace, output, inputs, queue);
case INFINI_DTYPE_F32:
return _device_info->calculate<AddOp, float>(_info, workspace, output, inputs, queue);
case INFINI_DTYPE_I32:
return _device_info->calculate<AddOp, int32_t>(_info, workspace, output, inputs, queue);
case INFINI_DTYPE_I64:
return _device_info->calculate<AddOp, int64_t>(_info, workspace, output, inputs, queue);
default:
return INFINI_STATUS_BAD_TENSOR_DTYPE;
}
...
...
src/infiniop/ops/add/bang/add_bang_internal.mlu
View file @
f1b8ab64
...
...
@@ -8,7 +8,7 @@ public:
static constexpr size_t num_inputs = 2;
template <typename T>
__mlu_device__ void operator()(T *out, const T *a, const T *b, size_t num_elements) const {
if constexpr (std::is_same_v<T, half> || std::is_same_v<T, bfloat16_t> || std::is_same_v<T, float>) {
if constexpr (std::is_same_v<T, half> || std::is_same_v<T, bfloat16_t> || std::is_same_v<T, float>
|| std::is_same_v<T, int32_t> || std::is_same_v<T, int64_t>
) {
__bang_add(out, a, b, num_elements);
} else {
out = a + b;
...
...
@@ -21,5 +21,7 @@ LAUNCH_ELEMENTWISE_KERNEL_IMPL(Add, AddOp)
LAUNCH_ELEMENTWISE_KERNEL_INSTANTIATE(Add, half)
LAUNCH_ELEMENTWISE_KERNEL_INSTANTIATE(Add, bfloat16_t)
LAUNCH_ELEMENTWISE_KERNEL_INSTANTIATE(Add, float)
LAUNCH_ELEMENTWISE_KERNEL_INSTANTIATE(Add, int32_t)
LAUNCH_ELEMENTWISE_KERNEL_INSTANTIATE(Add, int64_t)
#endif // __ADD_BANG_INTERNAL_H__
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment