Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
MIGraphX
Commits
d1d48bdc
Commit
d1d48bdc
authored
Sep 26, 2023
by
Alan Turner
Browse files
Cleanup
parent
59993d98
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
2 additions
and
127 deletions
+2
-127
src/targets/gpu/kernels/include/migraphx/kernels/ck_gemm_softmax_gemm.hpp
...kernels/include/migraphx/kernels/ck_gemm_softmax_gemm.hpp
+2
-2
test/onnx/gen_onnx.py
test/onnx/gen_onnx.py
+0
-79
test/onnx/old_gemm_softmax_gemm_test.onnx
test/onnx/old_gemm_softmax_gemm_test.onnx
+0
-46
No files found.
src/targets/gpu/kernels/include/migraphx/kernels/ck_gemm_softmax_gemm.hpp
View file @
d1d48bdc
...
@@ -65,9 +65,9 @@ __device__ void ck_gemm_softmax_gemm_matrix(C c, A a, B b, B1 b1, Settings s)
...
@@ -65,9 +65,9 @@ __device__ void ck_gemm_softmax_gemm_matrix(C c, A a, B b, B1 b1, Settings s)
to_ck_tensor
<
C
>
());
to_ck_tensor
<
C
>
());
static_assert
(
desc
.
IsValid
(),
"Invalid ck gemm."
);
static_assert
(
desc
.
IsValid
(),
"Invalid ck gemm."
);
const
float
scale
=
s
.
scale
;
G
::
Run
(
desc
,
G
::
Run
(
desc
,
scale
,
s
.
scale
,
to_ck_const_pointer
(
a
.
data
()),
to_ck_const_pointer
(
a
.
data
()),
to_ck_const_pointer
(
b
.
data
()),
to_ck_const_pointer
(
b
.
data
()),
to_ck_const_pointer
(
b1
.
data
()),
to_ck_const_pointer
(
b1
.
data
()),
...
...
test/onnx/gen_onnx.py
View file @
d1d48bdc
...
@@ -7799,82 +7799,3 @@ def where_mixed_test():
...
@@ -7799,82 +7799,3 @@ def where_mixed_test():
outputs
=
[
'z'
])
outputs
=
[
'z'
])
return
([
node
],
[
c
,
x
,
y
],
[
z
])
return
([
node
],
[
c
,
x
,
y
],
[
z
])
@
onnx_test
()
def
gemm_softmax_gemm_test
():
a
=
helper
.
make_tensor_value_info
(
'a'
,
TensorProto
.
FLOAT16
,
[
1
,
1
])
b
=
helper
.
make_tensor_value_info
(
'b'
,
TensorProto
.
FLOAT16
,
[
1
,
1
])
# c = helper.make_tensor_value_info('c', TensorProto.FLOAT16, [1, 1])
b1
=
helper
.
make_tensor_value_info
(
'b1'
,
TensorProto
.
FLOAT16
,
[
1
,
1
])
# bias = helper.make_tensor_value_info('bias', TensorProto.FLOAT16, [1, 1])
out
=
helper
.
make_tensor_value_info
(
'out'
,
TensorProto
.
FLOAT16
,
[
1
,
1
])
scale_array
=
np
.
array
([
1
])
bias_array
=
np
.
array
([
0
])
scale_tensor
=
helper
.
make_tensor
(
name
=
'scale'
,
data_type
=
TensorProto
.
FLOAT16
,
dims
=
[
1
,
1
],
vals
=
[
1
])
bias_tensor
=
helper
.
make_tensor
(
name
=
'bias'
,
data_type
=
TensorProto
.
FLOAT16
,
dims
=
[
1
,
1
],
vals
=
[
0
])
gemm1
=
onnx
.
helper
.
make_node
(
'MatMul'
,
inputs
=
[
'a'
,
'b'
],
outputs
=
[
'gemm1_out'
])
mul1
=
onnx
.
helper
.
make_node
(
'Mul'
,
inputs
=
[
'gemm1_out'
,
'scale'
],
outputs
=
[
'mul1_out'
])
add1
=
onnx
.
helper
.
make_node
(
'Add'
,
inputs
=
[
'mul1_out'
,
'bias'
],
outputs
=
[
'add1_out'
])
softmax
=
onnx
.
helper
.
make_node
(
'Softmax'
,
inputs
=
[
'add1_out'
],
outputs
=
[
'softmax_out'
])
gemm2
=
onnx
.
helper
.
make_node
(
'MatMul'
,
inputs
=
[
'softmax_out'
,
'b1'
],
outputs
=
[
'out'
])
return
([
gemm1
,
mul1
,
add1
,
softmax
,
gemm2
],
[
a
,
b
,
b1
],
[
out
],
[
scale_tensor
,
bias_tensor
])
@
onnx_test
()
def
old_gemm_softmax_gemm_test
():
a
=
helper
.
make_tensor_value_info
(
'a'
,
TensorProto
.
FLOAT16
,
[
1
,
1
])
b
=
helper
.
make_tensor_value_info
(
'b'
,
TensorProto
.
FLOAT16
,
[
1
,
1
])
c
=
helper
.
make_tensor_value_info
(
'c'
,
TensorProto
.
FLOAT16
,
[
1
,
1
])
b1
=
helper
.
make_tensor_value_info
(
'b1'
,
TensorProto
.
FLOAT16
,
[
1
,
1
])
bias
=
helper
.
make_tensor_value_info
(
'bias'
,
TensorProto
.
FLOAT16
,
[
1
,
1
])
out
=
helper
.
make_tensor_value_info
(
'out'
,
TensorProto
.
FLOAT16
,
[
1
,
1
])
scale_array
=
np
.
array
([(
1
/
8
)])
scale_tensor
=
helper
.
make_tensor
(
'scale'
,
TensorProto
.
FLOAT16
,
[
1
,
1
],
[
1
])
gemm1
=
onnx
.
helper
.
make_node
(
'MatMul'
,
inputs
=
[
'a'
,
'b'
],
outputs
=
[
'gemm1_out'
])
mul1
=
onnx
.
helper
.
make_node
(
'Mul'
,
inputs
=
[
'gemm1_out'
,
'scale'
],
outputs
=
[
'mul1_out'
])
add1
=
onnx
.
helper
.
make_node
(
'Add'
,
inputs
=
[
'mul1_out'
,
'c'
],
outputs
=
[
'add1_out'
])
softmax
=
onnx
.
helper
.
make_node
(
'Softmax'
,
inputs
=
[
'add1_out'
],
outputs
=
[
'softmax_out'
])
gemm2
=
onnx
.
helper
.
make_node
(
'MatMul'
,
inputs
=
[
'softmax_out'
,
'b1'
],
outputs
=
[
'out'
])
return
([
gemm1
,
mul1
,
add1
,
softmax
,
gemm2
],
[
a
,
b
,
c
,
b1
,
bias
],
[
out
],
[
scale_tensor
])
test/onnx/old_gemm_softmax_gemm_test.onnx
deleted
100644 → 0
View file @
59993d98
old_gemm_softmax_gemm_test:
a
b gemm1_out"MatMul
!
gemm1_out
scalemul1_out"Mul
mul1_out
cadd1_out"Add
add1_outsoftmax_out"Softmax
softmax_out
b1out"MatMulold_gemm_softmax_gemm_test*
*BscaleZ
a
Z
b
Z
c
Z
b1
Z
bias
b
out
B
\ No newline at end of file
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment