Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
composable_kernel
Commits
ff46bd90
"docs/vscode:/vscode.git/clone" did not exist on "a0520193e15951655ee2c08c24bfdca716f6f64c"
Commit
ff46bd90
authored
Mar 09, 2023
by
rocking
Browse files
Add conv dl instances and client example
parent
dc7b6568
Changes
21
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
717 additions
and
171 deletions
+717
-171
client_example/09_quantization/conv2d_fwd_bias_relu_perchannel_quantization.cpp
...tization/conv2d_fwd_bias_relu_perchannel_quantization.cpp
+15
-15
client_example/09_quantization/conv2d_fwd_bias_relu_perlayer_quantization.cpp
...antization/conv2d_fwd_bias_relu_perlayer_quantization.cpp
+15
-14
client_example/09_quantization/conv2d_fwd_perchannel_quantization.cpp
...le/09_quantization/conv2d_fwd_perchannel_quantization.cpp
+15
-14
client_example/09_quantization/conv2d_fwd_perlayer_quantization.cpp
...mple/09_quantization/conv2d_fwd_perlayer_quantization.cpp
+15
-15
library/include/ck/library/tensor_operation_instance/gpu/quantization/grouped_convolution_bias_forward_perchannel_quantization.hpp
...uped_convolution_bias_forward_perchannel_quantization.hpp
+41
-4
library/include/ck/library/tensor_operation_instance/gpu/quantization/grouped_convolution_bias_forward_perlayer_quantization.hpp
...rouped_convolution_bias_forward_perlayer_quantization.hpp
+41
-4
library/include/ck/library/tensor_operation_instance/gpu/quantization/grouped_convolution_forward_perchannel_quantization.hpp
...n/grouped_convolution_forward_perchannel_quantization.hpp
+40
-4
library/include/ck/library/tensor_operation_instance/gpu/quantization/grouped_convolution_forward_perlayer_quantization.hpp
...ion/grouped_convolution_forward_perlayer_quantization.hpp
+40
-4
library/src/tensor_operation_instance/gpu/quantization/CMakeLists.txt
...tensor_operation_instance/gpu/quantization/CMakeLists.txt
+25
-4
library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/conv2d_quantization_common.hpp
...pu/quantization/conv2d_fwd/conv2d_quantization_common.hpp
+59
-0
library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_dl_bias_perchannel_quantization_int8_instance.cpp
..._conv2d_dl_bias_perchannel_quantization_int8_instance.cpp
+82
-0
library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_dl_bias_perlayer_quantization_int8_instance.cpp
...ce_conv2d_dl_bias_perlayer_quantization_int8_instance.cpp
+82
-0
library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_dl_int8_instance.hpp
...uantization/conv2d_fwd/device_conv2d_dl_int8_instance.hpp
+33
-0
library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_dl_perchannel_quantization_int8_instance.cpp
...evice_conv2d_dl_perchannel_quantization_int8_instance.cpp
+80
-0
library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_dl_perlayer_quantization_int8_instance.cpp
.../device_conv2d_dl_perlayer_quantization_int8_instance.cpp
+80
-0
library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_int8_instance.hpp
...u/quantization/conv2d_fwd/device_conv2d_int8_instance.hpp
+0
-84
library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_xdl_bias_perchannel_quantization_int8_instance.cpp
...conv2d_xdl_bias_perchannel_quantization_int8_instance.cpp
+3
-3
library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_xdl_bias_perlayer_quantization_int8_instance.cpp
...e_conv2d_xdl_bias_perlayer_quantization_int8_instance.cpp
+3
-3
library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_xdl_int8_instance.hpp
...antization/conv2d_fwd/device_conv2d_xdl_int8_instance.hpp
+45
-0
library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_xdl_perchannel_quantization_int8_instance.cpp
...vice_conv2d_xdl_perchannel_quantization_int8_instance.cpp
+3
-3
No files found.
client_example/09_quantization/conv2d_fwd_bias_relu_perchannel_quantization.cpp
View file @
ff46bd90
...
@@ -28,16 +28,15 @@ using OutElementOp = ck::tensor_operation::element_wise::Add_Activation_Mul2_Cla
...
@@ -28,16 +28,15 @@ using OutElementOp = ck::tensor_operation::element_wise::Add_Activation_Mul2_Cla
static
constexpr
ck
::
index_t
NumDimSpatial
=
2
;
static
constexpr
ck
::
index_t
NumDimSpatial
=
2
;
static
constexpr
ck
::
index_t
G
=
1
;
static
constexpr
ck
::
index_t
G
=
1
;
static
constexpr
ck
::
index_t
N
=
4
;
static
constexpr
ck
::
index_t
N
=
4
;
// batch size
static
constexpr
ck
::
index_t
K
=
64
;
static
constexpr
ck
::
index_t
K
=
64
;
// output channel
static
constexpr
ck
::
index_t
C
=
32
;
static
constexpr
ck
::
index_t
C
=
192
;
// input channel
static
constexpr
ck
::
index_t
Y
=
3
;
static
constexpr
ck
::
index_t
Y
=
3
;
// filter H
static
constexpr
ck
::
index_t
X
=
3
;
static
constexpr
ck
::
index_t
X
=
3
;
// filter W
static
constexpr
ck
::
index_t
Hi
=
71
;
static
constexpr
ck
::
index_t
Hi
=
71
;
// input H
static
constexpr
ck
::
index_t
Wi
=
71
;
static
constexpr
ck
::
index_t
Wi
=
71
;
// input W
static
constexpr
ck
::
index_t
Ho
=
36
;
static
constexpr
ck
::
index_t
Ho
=
36
;
// output H
static
constexpr
ck
::
index_t
Wo
=
36
;
static
constexpr
ck
::
index_t
Wo
=
36
;
// output W
struct
SimpleDeviceMem
struct
SimpleDeviceMem
{
{
SimpleDeviceMem
()
=
delete
;
SimpleDeviceMem
()
=
delete
;
...
@@ -64,8 +63,8 @@ int main(int argc, char* argv[])
...
@@ -64,8 +63,8 @@ int main(int argc, char* argv[])
std
::
array
<
ck
::
index_t
,
5
>
bias_strides
{
K
,
0
,
1
,
0
,
0
};
std
::
array
<
ck
::
index_t
,
5
>
bias_strides
{
K
,
0
,
1
,
0
,
0
};
std
::
array
<
ck
::
index_t
,
5
>
requant_scale_lengths
{
G
,
N
,
K
,
Ho
,
Wo
};
std
::
array
<
ck
::
index_t
,
5
>
requant_scale_lengths
{
G
,
N
,
K
,
Ho
,
Wo
};
std
::
array
<
ck
::
index_t
,
5
>
requant_scale_strides
{
K
,
0
,
1
,
0
,
0
};
std
::
array
<
ck
::
index_t
,
5
>
requant_scale_strides
{
K
,
0
,
1
,
0
,
0
};
std
::
array
<
ck
::
index_t
,
5
>
out_lengths
{
G
,
N
,
C
,
Ho
,
Wo
};
std
::
array
<
ck
::
index_t
,
5
>
out_lengths
{
G
,
N
,
K
,
Ho
,
Wo
};
std
::
array
<
ck
::
index_t
,
5
>
out_strides
{
N
*
Ho
*
Wo
*
C
,
Ho
*
Wo
*
C
,
1
,
Wo
*
C
,
C
};
std
::
array
<
ck
::
index_t
,
5
>
out_strides
{
N
*
Ho
*
Wo
*
K
,
Ho
*
Wo
*
K
,
1
,
Wo
*
K
,
K
};
std
::
array
<
ck
::
index_t
,
2
>
in_left_pad
{
1
,
1
};
std
::
array
<
ck
::
index_t
,
2
>
in_left_pad
{
1
,
1
};
std
::
array
<
ck
::
index_t
,
2
>
in_right_pad
{
1
,
1
};
std
::
array
<
ck
::
index_t
,
2
>
in_right_pad
{
1
,
1
};
std
::
array
<
ck
::
index_t
,
2
>
conv_strides
{
2
,
2
};
std
::
array
<
ck
::
index_t
,
2
>
conv_strides
{
2
,
2
};
...
@@ -162,11 +161,12 @@ int main(int argc, char* argv[])
...
@@ -162,11 +161,12 @@ int main(int argc, char* argv[])
}
}
}
}
std
::
cout
<<
"Best Perf: "
<<
std
::
setw
(
10
)
<<
best_avg_time
<<
" ms, "
<<
best_tflops
<<
" TFlops, "
<<
best_gb_per_sec
<<
" GB/s, "
<<
best_op_name
<<
std
::
endl
;
// run the best intance
// run the best intance
if
(
best_op_id
!=
-
1
)
{
{
std
::
cout
<<
"Best Perf: "
<<
std
::
setw
(
10
)
<<
best_avg_time
<<
" ms, "
<<
best_tflops
<<
" TFlops, "
<<
best_gb_per_sec
<<
" GB/s, "
<<
best_op_name
<<
std
::
endl
;
auto
&
op_ptr
=
op_ptrs
[
best_op_id
];
auto
&
op_ptr
=
op_ptrs
[
best_op_id
];
std
::
cout
<<
"Run the best instance without timing: "
<<
op_ptr
->
GetTypeString
()
std
::
cout
<<
"Run the best instance without timing: "
<<
op_ptr
->
GetTypeString
()
<<
std
::
endl
;
<<
std
::
endl
;
...
...
client_example/09_quantization/conv2d_fwd_bias_relu_perlayer_quantization.cpp
View file @
ff46bd90
...
@@ -26,15 +26,15 @@ using OutElementOp = ck::tensor_operation::element_wise::Add_Activation_Mul_Clam
...
@@ -26,15 +26,15 @@ using OutElementOp = ck::tensor_operation::element_wise::Add_Activation_Mul_Clam
static
constexpr
ck
::
index_t
NumDimSpatial
=
2
;
static
constexpr
ck
::
index_t
NumDimSpatial
=
2
;
static
constexpr
ck
::
index_t
G
=
1
;
static
constexpr
ck
::
index_t
G
=
1
;
static
constexpr
ck
::
index_t
N
=
4
;
static
constexpr
ck
::
index_t
N
=
4
;
// batch size
static
constexpr
ck
::
index_t
K
=
64
;
static
constexpr
ck
::
index_t
K
=
64
;
// output channel
static
constexpr
ck
::
index_t
C
=
32
;
static
constexpr
ck
::
index_t
C
=
192
;
// input channel
static
constexpr
ck
::
index_t
Y
=
3
;
static
constexpr
ck
::
index_t
Y
=
3
;
// filter H
static
constexpr
ck
::
index_t
X
=
3
;
static
constexpr
ck
::
index_t
X
=
3
;
// filter W
static
constexpr
ck
::
index_t
Hi
=
71
;
static
constexpr
ck
::
index_t
Hi
=
71
;
// input H
static
constexpr
ck
::
index_t
Wi
=
71
;
static
constexpr
ck
::
index_t
Wi
=
71
;
// input W
static
constexpr
ck
::
index_t
Ho
=
36
;
static
constexpr
ck
::
index_t
Ho
=
36
;
// output H
static
constexpr
ck
::
index_t
Wo
=
36
;
static
constexpr
ck
::
index_t
Wo
=
36
;
// output W
struct
SimpleDeviceMem
struct
SimpleDeviceMem
{
{
...
@@ -60,8 +60,8 @@ int main(int argc, char* argv[])
...
@@ -60,8 +60,8 @@ int main(int argc, char* argv[])
std
::
array
<
ck
::
index_t
,
5
>
weight_strides
{
K
*
Y
*
X
*
C
,
Y
*
X
*
C
,
1
,
X
*
C
,
C
};
std
::
array
<
ck
::
index_t
,
5
>
weight_strides
{
K
*
Y
*
X
*
C
,
Y
*
X
*
C
,
1
,
X
*
C
,
C
};
std
::
array
<
ck
::
index_t
,
5
>
bias_lengths
{
G
,
N
,
K
,
Ho
,
Wo
};
std
::
array
<
ck
::
index_t
,
5
>
bias_lengths
{
G
,
N
,
K
,
Ho
,
Wo
};
std
::
array
<
ck
::
index_t
,
5
>
bias_strides
{
K
,
0
,
1
,
0
,
0
};
std
::
array
<
ck
::
index_t
,
5
>
bias_strides
{
K
,
0
,
1
,
0
,
0
};
std
::
array
<
ck
::
index_t
,
5
>
out_lengths
{
G
,
N
,
C
,
Ho
,
Wo
};
std
::
array
<
ck
::
index_t
,
5
>
out_lengths
{
G
,
N
,
K
,
Ho
,
Wo
};
std
::
array
<
ck
::
index_t
,
5
>
out_strides
{
N
*
Ho
*
Wo
*
C
,
Ho
*
Wo
*
C
,
1
,
Wo
*
C
,
C
};
std
::
array
<
ck
::
index_t
,
5
>
out_strides
{
N
*
Ho
*
Wo
*
K
,
Ho
*
Wo
*
K
,
1
,
Wo
*
K
,
K
};
std
::
array
<
ck
::
index_t
,
2
>
in_left_pad
{
1
,
1
};
std
::
array
<
ck
::
index_t
,
2
>
in_left_pad
{
1
,
1
};
std
::
array
<
ck
::
index_t
,
2
>
in_right_pad
{
1
,
1
};
std
::
array
<
ck
::
index_t
,
2
>
in_right_pad
{
1
,
1
};
std
::
array
<
ck
::
index_t
,
2
>
conv_strides
{
2
,
2
};
std
::
array
<
ck
::
index_t
,
2
>
conv_strides
{
2
,
2
};
...
@@ -156,11 +156,12 @@ int main(int argc, char* argv[])
...
@@ -156,11 +156,12 @@ int main(int argc, char* argv[])
}
}
}
}
std
::
cout
<<
"Best Perf: "
<<
std
::
setw
(
10
)
<<
best_avg_time
<<
" ms, "
<<
best_tflops
<<
" TFlops, "
<<
best_gb_per_sec
<<
" GB/s, "
<<
best_op_name
<<
std
::
endl
;
// run the best intance
// run the best intance
if
(
best_op_id
!=
-
1
)
{
{
std
::
cout
<<
"Best Perf: "
<<
std
::
setw
(
10
)
<<
best_avg_time
<<
" ms, "
<<
best_tflops
<<
" TFlops, "
<<
best_gb_per_sec
<<
" GB/s, "
<<
best_op_name
<<
std
::
endl
;
auto
&
op_ptr
=
op_ptrs
[
best_op_id
];
auto
&
op_ptr
=
op_ptrs
[
best_op_id
];
std
::
cout
<<
"Run the best instance without timing: "
<<
op_ptr
->
GetTypeString
()
std
::
cout
<<
"Run the best instance without timing: "
<<
op_ptr
->
GetTypeString
()
<<
std
::
endl
;
<<
std
::
endl
;
...
...
client_example/09_quantization/conv2d_fwd_perchannel_quantization.cpp
View file @
ff46bd90
...
@@ -26,15 +26,15 @@ using OutElementOp = ck::tensor_operation::element_wise::Activation_Mul2_C
...
@@ -26,15 +26,15 @@ using OutElementOp = ck::tensor_operation::element_wise::Activation_Mul2_C
static
constexpr
ck
::
index_t
NumDimSpatial
=
2
;
static
constexpr
ck
::
index_t
NumDimSpatial
=
2
;
static
constexpr
ck
::
index_t
G
=
1
;
static
constexpr
ck
::
index_t
G
=
1
;
static
constexpr
ck
::
index_t
N
=
4
;
static
constexpr
ck
::
index_t
N
=
4
;
// batch size
static
constexpr
ck
::
index_t
K
=
64
;
static
constexpr
ck
::
index_t
K
=
64
;
// output channel
static
constexpr
ck
::
index_t
C
=
32
;
static
constexpr
ck
::
index_t
C
=
192
;
// input channel
static
constexpr
ck
::
index_t
Y
=
3
;
static
constexpr
ck
::
index_t
Y
=
3
;
// filter H
static
constexpr
ck
::
index_t
X
=
3
;
static
constexpr
ck
::
index_t
X
=
3
;
// filter W
static
constexpr
ck
::
index_t
Hi
=
71
;
static
constexpr
ck
::
index_t
Hi
=
71
;
// input H
static
constexpr
ck
::
index_t
Wi
=
71
;
static
constexpr
ck
::
index_t
Wi
=
71
;
// input W
static
constexpr
ck
::
index_t
Ho
=
36
;
static
constexpr
ck
::
index_t
Ho
=
36
;
// output H
static
constexpr
ck
::
index_t
Wo
=
36
;
static
constexpr
ck
::
index_t
Wo
=
36
;
// output W
struct
SimpleDeviceMem
struct
SimpleDeviceMem
{
{
...
@@ -60,8 +60,8 @@ int main(int argc, char* argv[])
...
@@ -60,8 +60,8 @@ int main(int argc, char* argv[])
std
::
array
<
ck
::
index_t
,
5
>
weight_strides
{
K
*
Y
*
X
*
C
,
Y
*
X
*
C
,
1
,
X
*
C
,
C
};
std
::
array
<
ck
::
index_t
,
5
>
weight_strides
{
K
*
Y
*
X
*
C
,
Y
*
X
*
C
,
1
,
X
*
C
,
C
};
std
::
array
<
ck
::
index_t
,
5
>
requant_scale_lengths
{
G
,
N
,
K
,
Ho
,
Wo
};
std
::
array
<
ck
::
index_t
,
5
>
requant_scale_lengths
{
G
,
N
,
K
,
Ho
,
Wo
};
std
::
array
<
ck
::
index_t
,
5
>
requant_scale_strides
{
K
,
0
,
1
,
0
,
0
};
std
::
array
<
ck
::
index_t
,
5
>
requant_scale_strides
{
K
,
0
,
1
,
0
,
0
};
std
::
array
<
ck
::
index_t
,
5
>
out_lengths
{
G
,
N
,
C
,
Ho
,
Wo
};
std
::
array
<
ck
::
index_t
,
5
>
out_lengths
{
G
,
N
,
K
,
Ho
,
Wo
};
std
::
array
<
ck
::
index_t
,
5
>
out_strides
{
N
*
Ho
*
Wo
*
C
,
Ho
*
Wo
*
C
,
1
,
Wo
*
C
,
C
};
std
::
array
<
ck
::
index_t
,
5
>
out_strides
{
N
*
Ho
*
Wo
*
K
,
Ho
*
Wo
*
K
,
1
,
Wo
*
K
,
K
};
std
::
array
<
ck
::
index_t
,
2
>
in_left_pad
{
1
,
1
};
std
::
array
<
ck
::
index_t
,
2
>
in_left_pad
{
1
,
1
};
std
::
array
<
ck
::
index_t
,
2
>
in_right_pad
{
1
,
1
};
std
::
array
<
ck
::
index_t
,
2
>
in_right_pad
{
1
,
1
};
std
::
array
<
ck
::
index_t
,
2
>
conv_strides
{
2
,
2
};
std
::
array
<
ck
::
index_t
,
2
>
conv_strides
{
2
,
2
};
...
@@ -156,11 +156,12 @@ int main(int argc, char* argv[])
...
@@ -156,11 +156,12 @@ int main(int argc, char* argv[])
}
}
}
}
std
::
cout
<<
"Best Perf: "
<<
std
::
setw
(
10
)
<<
best_avg_time
<<
" ms, "
<<
best_tflops
<<
" TFlops, "
<<
best_gb_per_sec
<<
" GB/s, "
<<
best_op_name
<<
std
::
endl
;
// run the best intance
// run the best intance
if
(
best_op_id
!=
-
1
)
{
{
std
::
cout
<<
"Best Perf: "
<<
std
::
setw
(
10
)
<<
best_avg_time
<<
" ms, "
<<
best_tflops
<<
" TFlops, "
<<
best_gb_per_sec
<<
" GB/s, "
<<
best_op_name
<<
std
::
endl
;
auto
&
op_ptr
=
op_ptrs
[
best_op_id
];
auto
&
op_ptr
=
op_ptrs
[
best_op_id
];
std
::
cout
<<
"Run the best instance without timing: "
<<
op_ptr
->
GetTypeString
()
std
::
cout
<<
"Run the best instance without timing: "
<<
op_ptr
->
GetTypeString
()
<<
std
::
endl
;
<<
std
::
endl
;
...
...
client_example/09_quantization/conv2d_fwd_perlayer_quantization.cpp
View file @
ff46bd90
...
@@ -24,15 +24,15 @@ using OutElementOp = ck::tensor_operation::element_wise::Activation_Mul_Clamp<Ac
...
@@ -24,15 +24,15 @@ using OutElementOp = ck::tensor_operation::element_wise::Activation_Mul_Clamp<Ac
static
constexpr
ck
::
index_t
NumDimSpatial
=
2
;
static
constexpr
ck
::
index_t
NumDimSpatial
=
2
;
static
constexpr
ck
::
index_t
G
=
1
;
static
constexpr
ck
::
index_t
G
=
1
;
static
constexpr
ck
::
index_t
N
=
4
;
static
constexpr
ck
::
index_t
N
=
4
;
// batch size
static
constexpr
ck
::
index_t
K
=
64
;
static
constexpr
ck
::
index_t
K
=
64
;
// output channel
static
constexpr
ck
::
index_t
C
=
32
;
static
constexpr
ck
::
index_t
C
=
192
;
// input channel
static
constexpr
ck
::
index_t
Y
=
3
;
static
constexpr
ck
::
index_t
Y
=
3
;
// filter H
static
constexpr
ck
::
index_t
X
=
3
;
static
constexpr
ck
::
index_t
X
=
3
;
// filter W
static
constexpr
ck
::
index_t
Hi
=
71
;
static
constexpr
ck
::
index_t
Hi
=
71
;
// input H
static
constexpr
ck
::
index_t
Wi
=
71
;
static
constexpr
ck
::
index_t
Wi
=
71
;
// input W
static
constexpr
ck
::
index_t
Ho
=
36
;
static
constexpr
ck
::
index_t
Ho
=
36
;
// output H
static
constexpr
ck
::
index_t
Wo
=
36
;
static
constexpr
ck
::
index_t
Wo
=
36
;
// output W
struct
SimpleDeviceMem
struct
SimpleDeviceMem
{
{
...
@@ -56,8 +56,8 @@ int main(int argc, char* argv[])
...
@@ -56,8 +56,8 @@ int main(int argc, char* argv[])
std
::
array
<
ck
::
index_t
,
5
>
in_strides
{
N
*
Hi
*
Wi
*
C
,
Hi
*
Wi
*
C
,
1
,
Wi
*
C
,
C
};
std
::
array
<
ck
::
index_t
,
5
>
in_strides
{
N
*
Hi
*
Wi
*
C
,
Hi
*
Wi
*
C
,
1
,
Wi
*
C
,
C
};
std
::
array
<
ck
::
index_t
,
5
>
weight_lengths
{
G
,
K
,
C
,
Y
,
X
};
std
::
array
<
ck
::
index_t
,
5
>
weight_lengths
{
G
,
K
,
C
,
Y
,
X
};
std
::
array
<
ck
::
index_t
,
5
>
weight_strides
{
K
*
Y
*
X
*
C
,
Y
*
X
*
C
,
1
,
X
*
C
,
C
};
std
::
array
<
ck
::
index_t
,
5
>
weight_strides
{
K
*
Y
*
X
*
C
,
Y
*
X
*
C
,
1
,
X
*
C
,
C
};
std
::
array
<
ck
::
index_t
,
5
>
out_lengths
{
G
,
N
,
C
,
Ho
,
Wo
};
std
::
array
<
ck
::
index_t
,
5
>
out_lengths
{
G
,
N
,
K
,
Ho
,
Wo
};
std
::
array
<
ck
::
index_t
,
5
>
out_strides
{
N
*
Ho
*
Wo
*
C
,
Ho
*
Wo
*
C
,
1
,
Wo
*
C
,
C
};
std
::
array
<
ck
::
index_t
,
5
>
out_strides
{
N
*
Ho
*
Wo
*
K
,
Ho
*
Wo
*
K
,
1
,
Wo
*
K
,
K
};
std
::
array
<
ck
::
index_t
,
2
>
in_left_pad
{
1
,
1
};
std
::
array
<
ck
::
index_t
,
2
>
in_left_pad
{
1
,
1
};
std
::
array
<
ck
::
index_t
,
2
>
in_right_pad
{
1
,
1
};
std
::
array
<
ck
::
index_t
,
2
>
in_right_pad
{
1
,
1
};
std
::
array
<
ck
::
index_t
,
2
>
conv_strides
{
2
,
2
};
std
::
array
<
ck
::
index_t
,
2
>
conv_strides
{
2
,
2
};
...
@@ -150,11 +150,11 @@ int main(int argc, char* argv[])
...
@@ -150,11 +150,11 @@ int main(int argc, char* argv[])
}
}
}
}
std
::
cout
<<
"Best Perf: "
<<
std
::
setw
(
10
)
<<
best_avg_time
<<
" ms, "
<<
best_tflops
if
(
best_op_id
!=
-
1
)
<<
" TFlops, "
<<
best_gb_per_sec
<<
" GB/s, "
<<
best_op_name
<<
std
::
endl
;
// run the best intance
{
{
std
::
cout
<<
"Best Perf: "
<<
std
::
setw
(
10
)
<<
best_avg_time
<<
" ms, "
<<
best_tflops
<<
" TFlops, "
<<
best_gb_per_sec
<<
" GB/s, "
<<
best_op_name
<<
std
::
endl
;
auto
&
op_ptr
=
op_ptrs
[
best_op_id
];
auto
&
op_ptr
=
op_ptrs
[
best_op_id
];
std
::
cout
<<
"Run the best instance without timing: "
<<
op_ptr
->
GetTypeString
()
std
::
cout
<<
"Run the best instance without timing: "
<<
op_ptr
->
GetTypeString
()
<<
std
::
endl
;
<<
std
::
endl
;
...
...
library/include/ck/library/tensor_operation_instance/gpu/quantization/grouped_convolution_bias_forward_perchannel_quantization.hpp
View file @
ff46bd90
...
@@ -18,7 +18,7 @@ namespace device {
...
@@ -18,7 +18,7 @@ namespace device {
namespace
instance
{
namespace
instance
{
// grouped conv2d forward, GNHWC/GKYXC/GNHWK
// grouped conv2d forward, GNHWC/GKYXC/GNHWK
void
add_device_conv2d_bias_perchannel_quantization_int8_instances
(
void
add_device_conv2d_
dl_
bias_perchannel_quantization_int8_instances
(
std
::
vector
<
std
::
vector
<
std
::
unique_ptr
<
DeviceGroupedConvFwdMultipleD
<
2
,
std
::
unique_ptr
<
DeviceGroupedConvFwdMultipleD
<
2
,
GNHWC
,
GNHWC
,
...
@@ -34,7 +34,38 @@ void add_device_conv2d_bias_perchannel_quantization_int8_instances(
...
@@ -34,7 +34,38 @@ void add_device_conv2d_bias_perchannel_quantization_int8_instances(
Add_Activation_Mul2_Clamp
<
PassThrough
>>>>&
Add_Activation_Mul2_Clamp
<
PassThrough
>>>>&
instances
);
instances
);
void
add_device_conv2d_bias_relu_perchannel_quantization_int8_instances
(
void
add_device_conv2d_dl_bias_relu_perchannel_quantization_int8_instances
(
std
::
vector
<
std
::
unique_ptr
<
DeviceGroupedConvFwdMultipleD
<
2
,
GNHWC
,
GKYXC
,
GK_GK_Tuple
,
GNHWK
,
int8_t
,
int8_t
,
I32_F32_Tuple
,
int8_t
,
PassThrough
,
PassThrough
,
Add_Activation_Mul2_Clamp
<
Relu
>>>>&
instances
);
void
add_device_conv2d_xdl_bias_perchannel_quantization_int8_instances
(
std
::
vector
<
std
::
unique_ptr
<
DeviceGroupedConvFwdMultipleD
<
2
,
GNHWC
,
GKYXC
,
GK_GK_Tuple
,
GNHWK
,
int8_t
,
int8_t
,
I32_F32_Tuple
,
int8_t
,
PassThrough
,
PassThrough
,
Add_Activation_Mul2_Clamp
<
PassThrough
>>>>&
instances
);
void
add_device_conv2d_xdl_bias_relu_perchannel_quantization_int8_instances
(
std
::
vector
<
std
::
unique_ptr
<
DeviceGroupedConvFwdMultipleD
<
2
,
std
::
vector
<
std
::
unique_ptr
<
DeviceGroupedConvFwdMultipleD
<
2
,
GNHWC
,
GNHWC
,
GKYXC
,
GKYXC
,
...
@@ -98,9 +129,15 @@ struct DeviceOperationInstanceFactory<ck::tensor_operation::device::DeviceGroupe
...
@@ -98,9 +129,15 @@ struct DeviceOperationInstanceFactory<ck::tensor_operation::device::DeviceGroupe
is_same_v
<
DsDataType
,
I32_F32_Tuple
>
&&
is_same_v
<
OutDataType
,
int8_t
>
)
is_same_v
<
DsDataType
,
I32_F32_Tuple
>
&&
is_same_v
<
OutDataType
,
int8_t
>
)
{
{
if
constexpr
(
is_same_v
<
Activation
,
PassThrough
>
)
if
constexpr
(
is_same_v
<
Activation
,
PassThrough
>
)
add_device_conv2d_bias_perchannel_quantization_int8_instances
(
op_ptrs
);
{
add_device_conv2d_dl_bias_perchannel_quantization_int8_instances
(
op_ptrs
);
add_device_conv2d_xdl_bias_perchannel_quantization_int8_instances
(
op_ptrs
);
}
else
if
constexpr
(
is_same_v
<
Activation
,
Relu
>
)
else
if
constexpr
(
is_same_v
<
Activation
,
Relu
>
)
add_device_conv2d_bias_relu_perchannel_quantization_int8_instances
(
op_ptrs
);
{
add_device_conv2d_dl_bias_relu_perchannel_quantization_int8_instances
(
op_ptrs
);
add_device_conv2d_xdl_bias_relu_perchannel_quantization_int8_instances
(
op_ptrs
);
}
}
}
}
}
...
...
library/include/ck/library/tensor_operation_instance/gpu/quantization/grouped_convolution_bias_forward_perlayer_quantization.hpp
View file @
ff46bd90
...
@@ -18,7 +18,7 @@ namespace device {
...
@@ -18,7 +18,7 @@ namespace device {
namespace
instance
{
namespace
instance
{
// grouped conv2d forward, GNHWC/GKYXC/GNHWK
// grouped conv2d forward, GNHWC/GKYXC/GNHWK
void
add_device_conv2d_bias_perlayer_quantization_int8_instances
(
void
add_device_conv2d_
dl_
bias_perlayer_quantization_int8_instances
(
std
::
vector
<
std
::
vector
<
std
::
unique_ptr
<
DeviceGroupedConvFwdMultipleD
<
2
,
std
::
unique_ptr
<
DeviceGroupedConvFwdMultipleD
<
2
,
GNHWC
,
GNHWC
,
...
@@ -34,7 +34,38 @@ void add_device_conv2d_bias_perlayer_quantization_int8_instances(
...
@@ -34,7 +34,38 @@ void add_device_conv2d_bias_perlayer_quantization_int8_instances(
Add_Activation_Mul_Clamp
<
PassThrough
>>>>&
Add_Activation_Mul_Clamp
<
PassThrough
>>>>&
instances
);
instances
);
void
add_device_conv2d_bias_relu_perlayer_quantization_int8_instances
(
void
add_device_conv2d_dl_bias_relu_perlayer_quantization_int8_instances
(
std
::
vector
<
std
::
unique_ptr
<
DeviceGroupedConvFwdMultipleD
<
2
,
GNHWC
,
GKYXC
,
GK_Tuple
,
GNHWK
,
int8_t
,
int8_t
,
I32_Tuple
,
int8_t
,
PassThrough
,
PassThrough
,
Add_Activation_Mul_Clamp
<
Relu
>>>>&
instances
);
void
add_device_conv2d_xdl_bias_perlayer_quantization_int8_instances
(
std
::
vector
<
std
::
unique_ptr
<
DeviceGroupedConvFwdMultipleD
<
2
,
GNHWC
,
GKYXC
,
GK_Tuple
,
GNHWK
,
int8_t
,
int8_t
,
I32_Tuple
,
int8_t
,
PassThrough
,
PassThrough
,
Add_Activation_Mul_Clamp
<
PassThrough
>>>>&
instances
);
void
add_device_conv2d_xdl_bias_relu_perlayer_quantization_int8_instances
(
std
::
vector
<
std
::
unique_ptr
<
DeviceGroupedConvFwdMultipleD
<
2
,
std
::
vector
<
std
::
unique_ptr
<
DeviceGroupedConvFwdMultipleD
<
2
,
GNHWC
,
GNHWC
,
GKYXC
,
GKYXC
,
...
@@ -98,9 +129,15 @@ struct DeviceOperationInstanceFactory<ck::tensor_operation::device::DeviceGroupe
...
@@ -98,9 +129,15 @@ struct DeviceOperationInstanceFactory<ck::tensor_operation::device::DeviceGroupe
is_same_v
<
DsDataType
,
I32_Tuple
>
&&
is_same_v
<
OutDataType
,
int8_t
>
)
is_same_v
<
DsDataType
,
I32_Tuple
>
&&
is_same_v
<
OutDataType
,
int8_t
>
)
{
{
if
constexpr
(
is_same_v
<
Activation
,
PassThrough
>
)
if
constexpr
(
is_same_v
<
Activation
,
PassThrough
>
)
add_device_conv2d_bias_perlayer_quantization_int8_instances
(
op_ptrs
);
{
add_device_conv2d_dl_bias_perlayer_quantization_int8_instances
(
op_ptrs
);
add_device_conv2d_xdl_bias_perlayer_quantization_int8_instances
(
op_ptrs
);
}
else
if
constexpr
(
is_same_v
<
Activation
,
Relu
>
)
else
if
constexpr
(
is_same_v
<
Activation
,
Relu
>
)
add_device_conv2d_bias_relu_perlayer_quantization_int8_instances
(
op_ptrs
);
{
add_device_conv2d_dl_bias_relu_perlayer_quantization_int8_instances
(
op_ptrs
);
add_device_conv2d_xdl_bias_relu_perlayer_quantization_int8_instances
(
op_ptrs
);
}
}
}
}
}
...
...
library/include/ck/library/tensor_operation_instance/gpu/quantization/grouped_convolution_forward_perchannel_quantization.hpp
View file @
ff46bd90
...
@@ -18,7 +18,7 @@ namespace device {
...
@@ -18,7 +18,7 @@ namespace device {
namespace
instance
{
namespace
instance
{
// grouped conv2d forward, GNHWC/GKYXC/GNHWK
// grouped conv2d forward, GNHWC/GKYXC/GNHWK
void
add_device_conv2d_perchannel_quantization_int8_instances
(
void
add_device_conv2d_
dl_
perchannel_quantization_int8_instances
(
std
::
vector
<
std
::
unique_ptr
<
DeviceGroupedConvFwdMultipleD
<
2
,
std
::
vector
<
std
::
unique_ptr
<
DeviceGroupedConvFwdMultipleD
<
2
,
GNHWC
,
GNHWC
,
GKYXC
,
GKYXC
,
...
@@ -33,7 +33,37 @@ void add_device_conv2d_perchannel_quantization_int8_instances(
...
@@ -33,7 +33,37 @@ void add_device_conv2d_perchannel_quantization_int8_instances(
Activation_Mul2_Clamp
<
PassThrough
>>>>&
Activation_Mul2_Clamp
<
PassThrough
>>>>&
instances
);
instances
);
void
add_device_conv2d_relu_perchannel_quantization_int8_instances
(
void
add_device_conv2d_dl_relu_perchannel_quantization_int8_instances
(
std
::
vector
<
std
::
unique_ptr
<
DeviceGroupedConvFwdMultipleD
<
2
,
GNHWC
,
GKYXC
,
GK_Tuple
,
GNHWK
,
int8_t
,
int8_t
,
F32_Tuple
,
int8_t
,
PassThrough
,
PassThrough
,
Activation_Mul2_Clamp
<
Relu
>>>>&
instances
);
void
add_device_conv2d_xdl_perchannel_quantization_int8_instances
(
std
::
vector
<
std
::
unique_ptr
<
DeviceGroupedConvFwdMultipleD
<
2
,
GNHWC
,
GKYXC
,
GK_Tuple
,
GNHWK
,
int8_t
,
int8_t
,
F32_Tuple
,
int8_t
,
PassThrough
,
PassThrough
,
Activation_Mul2_Clamp
<
PassThrough
>>>>&
instances
);
void
add_device_conv2d_xdl_relu_perchannel_quantization_int8_instances
(
std
::
vector
<
std
::
unique_ptr
<
DeviceGroupedConvFwdMultipleD
<
2
,
std
::
vector
<
std
::
unique_ptr
<
DeviceGroupedConvFwdMultipleD
<
2
,
GNHWC
,
GNHWC
,
GKYXC
,
GKYXC
,
...
@@ -97,9 +127,15 @@ struct DeviceOperationInstanceFactory<ck::tensor_operation::device::DeviceGroupe
...
@@ -97,9 +127,15 @@ struct DeviceOperationInstanceFactory<ck::tensor_operation::device::DeviceGroupe
is_same_v
<
OutDataType
,
int8_t
>
)
is_same_v
<
OutDataType
,
int8_t
>
)
{
{
if
constexpr
(
is_same_v
<
Activation
,
PassThrough
>
)
if
constexpr
(
is_same_v
<
Activation
,
PassThrough
>
)
add_device_conv2d_perchannel_quantization_int8_instances
(
op_ptrs
);
{
add_device_conv2d_dl_perchannel_quantization_int8_instances
(
op_ptrs
);
add_device_conv2d_xdl_perchannel_quantization_int8_instances
(
op_ptrs
);
}
else
if
constexpr
(
is_same_v
<
Activation
,
Relu
>
)
else
if
constexpr
(
is_same_v
<
Activation
,
Relu
>
)
add_device_conv2d_relu_perchannel_quantization_int8_instances
(
op_ptrs
);
{
add_device_conv2d_dl_relu_perchannel_quantization_int8_instances
(
op_ptrs
);
add_device_conv2d_xdl_relu_perchannel_quantization_int8_instances
(
op_ptrs
);
}
}
}
}
}
...
...
library/include/ck/library/tensor_operation_instance/gpu/quantization/grouped_convolution_forward_perlayer_quantization.hpp
View file @
ff46bd90
...
@@ -18,7 +18,7 @@ namespace device {
...
@@ -18,7 +18,7 @@ namespace device {
namespace
instance
{
namespace
instance
{
// grouped conv2d forward, GNHWC/GKYXC/GNHWK
// grouped conv2d forward, GNHWC/GKYXC/GNHWK
void
add_device_conv2d_perlayer_quantization_int8_instances
(
void
add_device_conv2d_
dl_
perlayer_quantization_int8_instances
(
std
::
vector
<
std
::
unique_ptr
<
DeviceGroupedConvFwdMultipleD
<
2
,
std
::
vector
<
std
::
unique_ptr
<
DeviceGroupedConvFwdMultipleD
<
2
,
GNHWC
,
GNHWC
,
GKYXC
,
GKYXC
,
...
@@ -33,7 +33,37 @@ void add_device_conv2d_perlayer_quantization_int8_instances(
...
@@ -33,7 +33,37 @@ void add_device_conv2d_perlayer_quantization_int8_instances(
Activation_Mul_Clamp
<
PassThrough
>>>>&
Activation_Mul_Clamp
<
PassThrough
>>>>&
instances
);
instances
);
void
add_device_conv2d_relu_perlayer_quantization_int8_instances
(
void
add_device_conv2d_dl_relu_perlayer_quantization_int8_instances
(
std
::
vector
<
std
::
unique_ptr
<
DeviceGroupedConvFwdMultipleD
<
2
,
GNHWC
,
GKYXC
,
Empty_Tuple
,
GNHWK
,
int8_t
,
int8_t
,
Empty_Tuple
,
int8_t
,
PassThrough
,
PassThrough
,
Activation_Mul_Clamp
<
Relu
>>>>&
instances
);
void
add_device_conv2d_xdl_perlayer_quantization_int8_instances
(
std
::
vector
<
std
::
unique_ptr
<
DeviceGroupedConvFwdMultipleD
<
2
,
GNHWC
,
GKYXC
,
Empty_Tuple
,
GNHWK
,
int8_t
,
int8_t
,
Empty_Tuple
,
int8_t
,
PassThrough
,
PassThrough
,
Activation_Mul_Clamp
<
PassThrough
>>>>&
instances
);
void
add_device_conv2d_xdl_relu_perlayer_quantization_int8_instances
(
std
::
vector
<
std
::
unique_ptr
<
DeviceGroupedConvFwdMultipleD
<
2
,
std
::
vector
<
std
::
unique_ptr
<
DeviceGroupedConvFwdMultipleD
<
2
,
GNHWC
,
GNHWC
,
GKYXC
,
GKYXC
,
...
@@ -94,9 +124,15 @@ struct DeviceOperationInstanceFactory<ck::tensor_operation::device::DeviceGroupe
...
@@ -94,9 +124,15 @@ struct DeviceOperationInstanceFactory<ck::tensor_operation::device::DeviceGroupe
is_same_v
<
OutDataType
,
int8_t
>
)
is_same_v
<
OutDataType
,
int8_t
>
)
{
{
if
constexpr
(
is_same_v
<
Activation
,
PassThrough
>
)
if
constexpr
(
is_same_v
<
Activation
,
PassThrough
>
)
add_device_conv2d_perlayer_quantization_int8_instances
(
op_ptrs
);
{
add_device_conv2d_dl_perlayer_quantization_int8_instances
(
op_ptrs
);
add_device_conv2d_xdl_perlayer_quantization_int8_instances
(
op_ptrs
);
}
else
if
constexpr
(
is_same_v
<
Activation
,
Relu
>
)
else
if
constexpr
(
is_same_v
<
Activation
,
Relu
>
)
add_device_conv2d_relu_perlayer_quantization_int8_instances
(
op_ptrs
);
{
add_device_conv2d_dl_relu_perlayer_quantization_int8_instances
(
op_ptrs
);
add_device_conv2d_xdl_relu_perlayer_quantization_int8_instances
(
op_ptrs
);
}
}
}
}
}
...
...
library/src/tensor_operation_instance/gpu/quantization/CMakeLists.txt
View file @
ff46bd90
set
(
CONV2D_PERLAYER_QUANT_SRC
conv2d_fwd/device_conv2d_dl_perlayer_quantization_int8_instance.cpp
conv2d_fwd/device_conv2d_xdl_perlayer_quantization_int8_instance.cpp
)
set
(
CONV2D_PERCHANNEL_QUANT_SRC
conv2d_fwd/device_conv2d_dl_perchannel_quantization_int8_instance.cpp
conv2d_fwd/device_conv2d_xdl_perchannel_quantization_int8_instance.cpp
)
set
(
CONV2D_BIAS_PERLAYER_QUANT_SRC
conv2d_fwd/device_conv2d_dl_bias_perlayer_quantization_int8_instance.cpp
conv2d_fwd/device_conv2d_xdl_bias_perlayer_quantization_int8_instance.cpp
)
set
(
CONV2D_BIAS_PERCHANNEL_QUANT_SRC
conv2d_fwd/device_conv2d_dl_bias_perchannel_quantization_int8_instance.cpp
conv2d_fwd/device_conv2d_xdl_bias_perchannel_quantization_int8_instance.cpp
)
add_instance_library
(
device_quantization_instance
add_instance_library
(
device_quantization_instance
conv2d_fwd/device_conv2d_bias_perchannel_quantization_int8_instance.cpp
${
CONV2D_PERLAYER_QUANT_SRC
}
conv2d_fwd/device_conv2d_bias_perlayer_quantization_int8_instance.cpp
${
CONV2D_PERCHANNEL_QUANT_SRC
}
conv2d_fwd/device_conv2d_perchannel_quantization_int8_instance.cpp
${
CONV2D_BIAS_PERLAYER_QUANT_SRC
}
conv2d_fwd/device_conv2d_perlayer_quantization_int8_instance.cpp
${
CONV2D_BIAS_PERCHANNEL_QUANT_SRC
}
)
)
library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/conv2d_quantization_common.hpp
0 → 100644
View file @
ff46bd90
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include "ck/ck.hpp"
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "ck/library/tensor_operation_instance/add_device_operation_instance.hpp"
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
#include "ck/tensor_operation/gpu/device/convolution_forward_specialization.hpp"
namespace
ck
{
namespace
tensor_operation
{
namespace
device
{
namespace
instance
{
using
Empty_Tuple
=
ck
::
Tuple
<>
;
template
<
ck
::
index_t
...
Is
>
using
S
=
ck
::
Sequence
<
Is
...
>
;
using
GNHWC
=
ck
::
tensor_layout
::
convolution
::
GNHWC
;
using
GKYXC
=
ck
::
tensor_layout
::
convolution
::
GKYXC
;
using
GNHWK
=
ck
::
tensor_layout
::
convolution
::
GNHWK
;
using
GK
=
ck
::
tensor_layout
::
convolution
::
G_K
;
using
PassThrough
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
Relu
=
ck
::
tensor_operation
::
element_wise
::
Relu
;
using
GK_Tuple
=
ck
::
Tuple
<
GK
>
;
using
GK_GK_Tuple
=
ck
::
Tuple
<
GK
,
GK
>
;
using
I32_Tuple
=
ck
::
Tuple
<
int32_t
>
;
using
F32_Tuple
=
ck
::
Tuple
<
float
>
;
using
I32_F32_Tuple
=
ck
::
Tuple
<
int32_t
,
float
>
;
using
Mul_Clamp
=
ck
::
tensor_operation
::
element_wise
::
Activation_Mul_Clamp
<
PassThrough
>
;
using
Relu_Mul_Clamp
=
ck
::
tensor_operation
::
element_wise
::
Activation_Mul_Clamp
<
Relu
>
;
using
Add_Mul_Clamp
=
ck
::
tensor_operation
::
element_wise
::
Add_Activation_Mul_Clamp
<
PassThrough
>
;
using
Add_Relu_Mul_Clamp
=
ck
::
tensor_operation
::
element_wise
::
Add_Activation_Mul_Clamp
<
Relu
>
;
using
Mul2_Clamp
=
ck
::
tensor_operation
::
element_wise
::
Activation_Mul2_Clamp
<
PassThrough
>
;
using
Relu_Mul2_Clamp
=
ck
::
tensor_operation
::
element_wise
::
Activation_Mul2_Clamp
<
Relu
>
;
using
Add_Mul2_Clamp
=
ck
::
tensor_operation
::
element_wise
::
Add_Activation_Mul2_Clamp
<
PassThrough
>
;
using
Add_Relu_Mul2_Clamp
=
ck
::
tensor_operation
::
element_wise
::
Add_Activation_Mul2_Clamp
<
Relu
>
;
static
constexpr
ck
::
index_t
NDimSpatial
=
2
;
static
constexpr
auto
GemmSpec
=
ck
::
tensor_operation
::
device
::
GemmSpecialization
::
MNKPadding
;
static
constexpr
auto
ConvFwdDefault
=
ck
::
tensor_operation
::
device
::
ConvolutionForwardSpecialization
::
Default
;
static
constexpr
auto
ConvFwd1x1P0
=
ck
::
tensor_operation
::
device
::
ConvolutionForwardSpecialization
::
Filter1x1Pad0
;
static
constexpr
auto
ConvFwd1x1S1P0
=
ck
::
tensor_operation
::
device
::
ConvolutionForwardSpecialization
::
Filter1x1Stride1Pad0
;
}
// namespace instance
}
// namespace device
}
// namespace tensor_operation
}
// namespace ck
library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_dl_bias_perchannel_quantization_int8_instance.cpp
0 → 100644
View file @
ff46bd90
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include "device_conv2d_dl_int8_instance.hpp"
namespace
ck
{
namespace
tensor_operation
{
namespace
device
{
namespace
instance
{
void
add_device_conv2d_dl_bias_perchannel_quantization_int8_instances
(
std
::
vector
<
std
::
unique_ptr
<
DeviceGroupedConvFwdMultipleD
<
NDimSpatial
,
GNHWC
,
GKYXC
,
GK_GK_Tuple
,
GNHWK
,
int8_t
,
int8_t
,
I32_F32_Tuple
,
int8_t
,
PassThrough
,
PassThrough
,
Add_Mul2_Clamp
>>>&
instances
)
{
// dl
add_device_operation_instances
(
instances
,
device_grouped_conv2d_dl_int8_instances
<
GK_GK_Tuple
,
I32_F32_Tuple
,
Add_Mul2_Clamp
,
ConvFwdDefault
,
4
>
{});
add_device_operation_instances
(
instances
,
device_grouped_conv2d_dl_int8_instances
<
GK_GK_Tuple
,
I32_F32_Tuple
,
Add_Mul2_Clamp
,
ConvFwd1x1P0
,
4
>
{});
add_device_operation_instances
(
instances
,
device_grouped_conv2d_dl_int8_instances
<
GK_GK_Tuple
,
I32_F32_Tuple
,
Add_Mul2_Clamp
,
ConvFwd1x1S1P0
,
4
>
{});
}
void
add_device_conv2d_dl_bias_relu_perchannel_quantization_int8_instances
(
std
::
vector
<
std
::
unique_ptr
<
DeviceGroupedConvFwdMultipleD
<
NDimSpatial
,
GNHWC
,
GKYXC
,
GK_GK_Tuple
,
GNHWK
,
int8_t
,
int8_t
,
I32_F32_Tuple
,
int8_t
,
PassThrough
,
PassThrough
,
Add_Relu_Mul2_Clamp
>>>&
instances
)
{
// dl
add_device_operation_instances
(
instances
,
device_grouped_conv2d_dl_int8_instances
<
GK_GK_Tuple
,
I32_F32_Tuple
,
Add_Relu_Mul2_Clamp
,
ConvFwdDefault
,
4
>
{});
add_device_operation_instances
(
instances
,
device_grouped_conv2d_dl_int8_instances
<
GK_GK_Tuple
,
I32_F32_Tuple
,
Add_Relu_Mul2_Clamp
,
ConvFwd1x1P0
,
4
>
{});
add_device_operation_instances
(
instances
,
device_grouped_conv2d_dl_int8_instances
<
GK_GK_Tuple
,
I32_F32_Tuple
,
Add_Relu_Mul2_Clamp
,
ConvFwd1x1S1P0
,
4
>
{});
}
}
// namespace instance
}
// namespace device
}
// namespace tensor_operation
}
// namespace ck
library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_dl_bias_perlayer_quantization_int8_instance.cpp
0 → 100644
View file @
ff46bd90
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include "device_conv2d_dl_int8_instance.hpp"
namespace
ck
{
namespace
tensor_operation
{
namespace
device
{
namespace
instance
{
void
add_device_conv2d_dl_bias_perlayer_quantization_int8_instances
(
std
::
vector
<
std
::
unique_ptr
<
DeviceGroupedConvFwdMultipleD
<
NDimSpatial
,
GNHWC
,
GKYXC
,
GK_Tuple
,
GNHWK
,
int8_t
,
int8_t
,
I32_Tuple
,
int8_t
,
PassThrough
,
PassThrough
,
Add_Mul_Clamp
>>>&
instances
)
{
add_device_operation_instances
(
instances
,
device_grouped_conv2d_dl_int8_instances
<
GK_Tuple
,
I32_Tuple
,
Add_Mul_Clamp
,
ConvFwdDefault
,
4
>
{});
add_device_operation_instances
(
instances
,
device_grouped_conv2d_dl_int8_instances
<
GK_Tuple
,
I32_Tuple
,
Add_Mul_Clamp
,
ConvFwd1x1P0
,
4
>
{});
add_device_operation_instances
(
instances
,
device_grouped_conv2d_dl_int8_instances
<
GK_Tuple
,
I32_Tuple
,
Add_Mul_Clamp
,
ConvFwd1x1S1P0
,
4
>
{});
}
void
add_device_conv2d_dl_bias_relu_perlayer_quantization_int8_instances
(
std
::
vector
<
std
::
unique_ptr
<
DeviceGroupedConvFwdMultipleD
<
NDimSpatial
,
GNHWC
,
GKYXC
,
GK_Tuple
,
GNHWK
,
int8_t
,
int8_t
,
I32_Tuple
,
int8_t
,
PassThrough
,
PassThrough
,
Add_Relu_Mul_Clamp
>>>&
instances
)
{
add_device_operation_instances
(
instances
,
device_grouped_conv2d_dl_int8_instances
<
GK_Tuple
,
I32_Tuple
,
Add_Relu_Mul_Clamp
,
ConvFwdDefault
,
4
>
{});
add_device_operation_instances
(
instances
,
device_grouped_conv2d_dl_int8_instances
<
GK_Tuple
,
I32_Tuple
,
Add_Relu_Mul_Clamp
,
ConvFwd1x1P0
,
4
>
{});
add_device_operation_instances
(
instances
,
device_grouped_conv2d_dl_int8_instances
<
GK_Tuple
,
I32_Tuple
,
Add_Relu_Mul_Clamp
,
ConvFwd1x1S1P0
,
4
>
{});
}
}
// namespace instance
}
// namespace device
}
// namespace tensor_operation
}
// namespace ck
library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_dl_int8_instance.hpp
0 → 100644
View file @
ff46bd90
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include "conv2d_quantization_common.hpp"
#include "ck/tensor_operation/gpu/device/device_grouped_conv_fwd_dl_multiple_d_nhwc_kyxc_nhwk.hpp"
namespace
ck
{
namespace
tensor_operation
{
namespace
device
{
namespace
instance
{
// clang-format off
template
<
typename
DsLayout
,
typename
DsDatatype
,
typename
OutElementOp
,
ConvolutionForwardSpecialization
ConvSpec
,
index_t
DstScalarPerVector
>
using
device_grouped_conv2d_dl_int8_instances
=
std
::
tuple
<
// ###########################################| NDim| InData| WeiData| MultpleD| OutData| AccData| InLayout| WeiLayout| MultipleD| OutLayout| In| Wei| Out| Convolution| GEMM| Block| MPer| NPer| K0Per| K1| M1Per| N1Per| KPer| M11N11Thread| M11N11Thread| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| BBlockTransfer| CThreadTransfer| CThreadTransfer| CThreadTransfer|
// ###########################################| Spatial| Type| Type| Type| Type| Type| | | Layout| | Elementwise| Elementwise| Elementwise| Forward| Spacialization| Size| Block| Block| Block| | ThreadM111| ThreadN111| Thread| ClusterM110Xs| ClusterN110Xs| ThreadSliceLengths| ThreadClusterLengths| ThreadCluster| SrcAccess| SrcVectorTensor| SrcVectorTensor| DstVectorTensor| ThreadSliceLengths| ThreadClusterLengths| ThreadCluster| SrcAccess| SrcVectorTensor| SrcVectorTensor| DstVectorTensor| SrcDstAccess| SrcDstVectorDim| DstScalarPerVector|
// ###########################################| | | | | | | | | | | Operation| Operation| Operation| Specialization| | | | | | | | | | | | K0_M0_M1_K1| K0_M0_M1_K1| ArrangeOrder| Order| Lengths_K0_M0_M1_K1| ContiguousDimOrder| Lengths_K0_M0_M1_K1| K0_N0_N1_K1| K0_N0_N1_K1| ArrangeOrder| Order| Lengths_K0_N0_N1_K1| ContiguousDimOrder| Lengths_K0_N0_N1_K1| Order| | |
// ###########################################| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
DeviceGroupedConvFwdDlMultipleD_NHWC_KYXC_NHWK
<
NDimSpatial
,
int8_t
,
int8_t
,
DsDatatype
,
int8_t
,
int32_t
,
GNHWC
,
GKYXC
,
DsLayout
,
GNHWK
,
PassThrough
,
PassThrough
,
OutElementOp
,
ConvSpec
,
GemmSpec
,
256
,
128
,
128
,
16
,
4
,
4
,
4
,
1
,
S
<
8
,
2
>
,
S
<
8
,
2
>
,
S
<
8
,
1
,
1
,
4
>
,
S
<
2
,
1
,
128
,
1
>
,
S
<
1
,
2
,
0
,
3
>
,
S
<
1
,
2
,
0
,
3
>
,
S
<
4
,
1
,
1
,
4
>
,
S
<
1
,
2
,
0
,
3
>
,
S
<
1
,
1
,
1
,
4
>
,
S
<
8
,
1
,
1
,
4
>
,
S
<
2
,
1
,
128
,
1
>
,
S
<
1
,
2
,
0
,
3
>
,
S
<
1
,
2
,
0
,
3
>
,
S
<
4
,
1
,
1
,
4
>
,
S
<
1
,
2
,
0
,
3
>
,
S
<
1
,
1
,
1
,
4
>
,
S
<
0
,
1
,
2
,
3
,
4
,
5
>
,
5
,
DstScalarPerVector
>
>
;
// clang-format on
}
// namespace instance
}
// namespace device
}
// namespace tensor_operation
}
// namespace ck
library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_dl_perchannel_quantization_int8_instance.cpp
0 → 100644
View file @
ff46bd90
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include "device_conv2d_dl_int8_instance.hpp"
namespace
ck
{
namespace
tensor_operation
{
namespace
device
{
namespace
instance
{
void
add_device_conv2d_dl_perchannel_quantization_int8_instances
(
std
::
vector
<
std
::
unique_ptr
<
DeviceGroupedConvFwdMultipleD
<
NDimSpatial
,
GNHWC
,
GKYXC
,
GK_Tuple
,
GNHWK
,
int8_t
,
int8_t
,
F32_Tuple
,
int8_t
,
PassThrough
,
PassThrough
,
Mul2_Clamp
>>>&
instances
)
{
add_device_operation_instances
(
instances
,
device_grouped_conv2d_dl_int8_instances
<
GK_Tuple
,
F32_Tuple
,
Mul2_Clamp
,
ConvFwdDefault
,
4
>
{});
add_device_operation_instances
(
instances
,
device_grouped_conv2d_dl_int8_instances
<
GK_Tuple
,
F32_Tuple
,
Mul2_Clamp
,
ConvFwd1x1P0
,
4
>
{});
add_device_operation_instances
(
instances
,
device_grouped_conv2d_dl_int8_instances
<
GK_Tuple
,
F32_Tuple
,
Mul2_Clamp
,
ConvFwd1x1S1P0
,
4
>
{});
}
void
add_device_conv2d_dl_relu_perchannel_quantization_int8_instances
(
std
::
vector
<
std
::
unique_ptr
<
DeviceGroupedConvFwdMultipleD
<
NDimSpatial
,
GNHWC
,
GKYXC
,
GK_Tuple
,
GNHWK
,
int8_t
,
int8_t
,
F32_Tuple
,
int8_t
,
PassThrough
,
PassThrough
,
Relu_Mul2_Clamp
>>>&
instances
)
{
add_device_operation_instances
(
instances
,
device_grouped_conv2d_dl_int8_instances
<
GK_Tuple
,
F32_Tuple
,
Relu_Mul2_Clamp
,
ConvFwdDefault
,
4
>
{});
add_device_operation_instances
(
instances
,
device_grouped_conv2d_dl_int8_instances
<
GK_Tuple
,
F32_Tuple
,
Relu_Mul2_Clamp
,
ConvFwd1x1P0
,
4
>
{});
add_device_operation_instances
(
instances
,
device_grouped_conv2d_dl_int8_instances
<
GK_Tuple
,
F32_Tuple
,
Relu_Mul2_Clamp
,
ConvFwd1x1S1P0
,
4
>
{});
}
}
// namespace instance
}
// namespace device
}
// namespace tensor_operation
}
// namespace ck
library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_dl_perlayer_quantization_int8_instance.cpp
0 → 100644
View file @
ff46bd90
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include "device_conv2d_dl_int8_instance.hpp"
namespace
ck
{
namespace
tensor_operation
{
namespace
device
{
namespace
instance
{
void
add_device_conv2d_dl_perlayer_quantization_int8_instances
(
std
::
vector
<
std
::
unique_ptr
<
DeviceGroupedConvFwdMultipleD
<
NDimSpatial
,
GNHWC
,
GKYXC
,
Empty_Tuple
,
GNHWK
,
int8_t
,
int8_t
,
Empty_Tuple
,
int8_t
,
PassThrough
,
PassThrough
,
Mul_Clamp
>>>&
instances
)
{
add_device_operation_instances
(
instances
,
device_grouped_conv2d_dl_int8_instances
<
Empty_Tuple
,
Empty_Tuple
,
Mul_Clamp
,
ConvFwdDefault
,
4
>
{});
add_device_operation_instances
(
instances
,
device_grouped_conv2d_dl_int8_instances
<
Empty_Tuple
,
Empty_Tuple
,
Mul_Clamp
,
ConvFwd1x1P0
,
4
>
{});
add_device_operation_instances
(
instances
,
device_grouped_conv2d_dl_int8_instances
<
Empty_Tuple
,
Empty_Tuple
,
Mul_Clamp
,
ConvFwd1x1S1P0
,
4
>
{});
}
void
add_device_conv2d_dl_relu_perlayer_quantization_int8_instances
(
std
::
vector
<
std
::
unique_ptr
<
DeviceGroupedConvFwdMultipleD
<
NDimSpatial
,
GNHWC
,
GKYXC
,
Empty_Tuple
,
GNHWK
,
int8_t
,
int8_t
,
Empty_Tuple
,
int8_t
,
PassThrough
,
PassThrough
,
Relu_Mul_Clamp
>>>&
instances
)
{
add_device_operation_instances
(
instances
,
device_grouped_conv2d_dl_int8_instances
<
Empty_Tuple
,
Empty_Tuple
,
Relu_Mul_Clamp
,
ConvFwdDefault
,
4
>
{});
add_device_operation_instances
(
instances
,
device_grouped_conv2d_dl_int8_instances
<
Empty_Tuple
,
Empty_Tuple
,
Relu_Mul_Clamp
,
ConvFwd1x1P0
,
4
>
{});
add_device_operation_instances
(
instances
,
device_grouped_conv2d_dl_int8_instances
<
Empty_Tuple
,
Empty_Tuple
,
Relu_Mul_Clamp
,
ConvFwd1x1S1P0
,
4
>
{});
}
}
// namespace instance
}
// namespace device
}
// namespace tensor_operation
}
// namespace ck
library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_int8_instance.hpp
deleted
100644 → 0
View file @
dc7b6568
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include "ck/ck.hpp"
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
#include "ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_multiple_d_xdl_cshuffle.hpp"
#include "ck/tensor_operation/gpu/element/element_wise_operation.hpp"
#include "ck/library/tensor_operation_instance/add_device_operation_instance.hpp"
namespace
ck
{
namespace
tensor_operation
{
namespace
device
{
namespace
instance
{
using
Empty_Tuple
=
ck
::
Tuple
<>
;
template
<
ck
::
index_t
...
Is
>
using
S
=
ck
::
Sequence
<
Is
...
>
;
using
GNHWC
=
ck
::
tensor_layout
::
convolution
::
GNHWC
;
using
GKYXC
=
ck
::
tensor_layout
::
convolution
::
GKYXC
;
using
GNHWK
=
ck
::
tensor_layout
::
convolution
::
GNHWK
;
using
GK
=
ck
::
tensor_layout
::
convolution
::
G_K
;
using
PassThrough
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
Relu
=
ck
::
tensor_operation
::
element_wise
::
Relu
;
using
GK_Tuple
=
ck
::
Tuple
<
GK
>
;
using
GK_GK_Tuple
=
ck
::
Tuple
<
GK
,
GK
>
;
using
I32_Tuple
=
ck
::
Tuple
<
int32_t
>
;
using
F32_Tuple
=
ck
::
Tuple
<
float
>
;
using
I32_F32_Tuple
=
ck
::
Tuple
<
int32_t
,
float
>
;
using
Mul_Clamp
=
ck
::
tensor_operation
::
element_wise
::
Activation_Mul_Clamp
<
PassThrough
>
;
using
Relu_Mul_Clamp
=
ck
::
tensor_operation
::
element_wise
::
Activation_Mul_Clamp
<
Relu
>
;
using
Add_Mul_Clamp
=
ck
::
tensor_operation
::
element_wise
::
Add_Activation_Mul_Clamp
<
PassThrough
>
;
using
Add_Relu_Mul_Clamp
=
ck
::
tensor_operation
::
element_wise
::
Add_Activation_Mul_Clamp
<
Relu
>
;
using
Mul2_Clamp
=
ck
::
tensor_operation
::
element_wise
::
Activation_Mul2_Clamp
<
PassThrough
>
;
using
Relu_Mul2_Clamp
=
ck
::
tensor_operation
::
element_wise
::
Activation_Mul2_Clamp
<
Relu
>
;
using
Add_Mul2_Clamp
=
ck
::
tensor_operation
::
element_wise
::
Add_Activation_Mul2_Clamp
<
PassThrough
>
;
using
Add_Relu_Mul2_Clamp
=
ck
::
tensor_operation
::
element_wise
::
Add_Activation_Mul2_Clamp
<
Relu
>
;
static
constexpr
ck
::
index_t
NDimSpatial
=
2
;
static
constexpr
auto
GemmSpec
=
ck
::
tensor_operation
::
device
::
GemmSpecialization
::
MNKPadding
;
static
constexpr
auto
ConvFwdDefault
=
ck
::
tensor_operation
::
device
::
ConvolutionForwardSpecialization
::
Default
;
static
constexpr
auto
ConvFwd1x1P0
=
ck
::
tensor_operation
::
device
::
ConvolutionForwardSpecialization
::
Filter1x1Pad0
;
static
constexpr
auto
ConvFwd1x1S1P0
=
ck
::
tensor_operation
::
device
::
ConvolutionForwardSpecialization
::
Filter1x1Stride1Pad0
;
template
<
typename
DsLayout
,
typename
DsDatatype
,
typename
OutElementOp
,
ConvolutionForwardSpecialization
ConvSpec
,
index_t
DstScalarPerVector
=
16
>
// clang-format off
using
device_grouped_conv2d_xdl_int8_instances
=
std
::
tuple
<
//########################################| NumDim| A| B| Ds| E| AData| BData| AccData| CShuffle| Ds| EData| A| B| CDE| ConvForward| GEMM| NumGemmK| Block| MPer| NPer| KPer| AK1| BK1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer|
//########################################| Spatial| Layout| Layout| Layout| Layout| Type| Type| Type| DataType| DataType| Type| Elementwise| Elementwise| Elementwise| Specialization| Specialization| Prefetch| Size| Block| Block| Block| | | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MWaveMPerXdl| ScalarPerVector|
//########################################| | | | | | | | | | | | Operation| Operation| Operation| | | Stage| | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NWaveNPerXdl| _NWaveNPerXdl|
//########################################| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
DeviceGroupedConvFwdMultipleD_Xdl_CShuffle
<
2
,
GNHWC
,
GKYXC
,
DsLayout
,
GNHWK
,
int8_t
,
int8_t
,
int32_t
,
int32_t
,
DsDatatype
,
int8_t
,
PassThrough
,
PassThrough
,
OutElementOp
,
ConvSpec
,
GemmSpec
,
1
,
256
,
256
,
128
,
64
,
16
,
16
,
32
,
32
,
4
,
2
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
1
,
1
,
S
<
1
,
64
,
1
,
4
>
,
DstScalarPerVector
>
,
DeviceGroupedConvFwdMultipleD_Xdl_CShuffle
<
2
,
GNHWC
,
GKYXC
,
DsLayout
,
GNHWK
,
int8_t
,
int8_t
,
int32_t
,
int32_t
,
DsDatatype
,
int8_t
,
PassThrough
,
PassThrough
,
OutElementOp
,
ConvSpec
,
GemmSpec
,
1
,
256
,
128
,
256
,
64
,
16
,
16
,
32
,
32
,
2
,
4
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
1
,
1
,
S
<
1
,
64
,
1
,
4
>
,
DstScalarPerVector
>
,
DeviceGroupedConvFwdMultipleD_Xdl_CShuffle
<
2
,
GNHWC
,
GKYXC
,
DsLayout
,
GNHWK
,
int8_t
,
int8_t
,
int32_t
,
int32_t
,
DsDatatype
,
int8_t
,
PassThrough
,
PassThrough
,
OutElementOp
,
ConvSpec
,
GemmSpec
,
1
,
128
,
128
,
128
,
64
,
16
,
16
,
32
,
32
,
4
,
2
,
S
<
4
,
32
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
S
<
4
,
32
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
1
,
1
,
S
<
1
,
32
,
1
,
4
>
,
DstScalarPerVector
>
,
DeviceGroupedConvFwdMultipleD_Xdl_CShuffle
<
2
,
GNHWC
,
GKYXC
,
DsLayout
,
GNHWK
,
int8_t
,
int8_t
,
int32_t
,
int32_t
,
DsDatatype
,
int8_t
,
PassThrough
,
PassThrough
,
OutElementOp
,
ConvSpec
,
GemmSpec
,
1
,
256
,
128
,
128
,
64
,
16
,
16
,
32
,
32
,
2
,
2
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
1
,
1
,
S
<
1
,
64
,
1
,
4
>
,
DstScalarPerVector
>
,
DeviceGroupedConvFwdMultipleD_Xdl_CShuffle
<
2
,
GNHWC
,
GKYXC
,
DsLayout
,
GNHWK
,
int8_t
,
int8_t
,
int32_t
,
int32_t
,
DsDatatype
,
int8_t
,
PassThrough
,
PassThrough
,
OutElementOp
,
ConvSpec
,
GemmSpec
,
1
,
128
,
128
,
64
,
64
,
16
,
16
,
32
,
32
,
2
,
2
,
S
<
4
,
32
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
S
<
4
,
32
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
1
,
1
,
S
<
1
,
64
,
1
,
2
>
,
DstScalarPerVector
>
,
DeviceGroupedConvFwdMultipleD_Xdl_CShuffle
<
2
,
GNHWC
,
GKYXC
,
DsLayout
,
GNHWK
,
int8_t
,
int8_t
,
int32_t
,
int32_t
,
DsDatatype
,
int8_t
,
PassThrough
,
PassThrough
,
OutElementOp
,
ConvSpec
,
GemmSpec
,
1
,
128
,
64
,
128
,
64
,
16
,
16
,
32
,
32
,
2
,
2
,
S
<
4
,
32
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
S
<
4
,
32
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
1
,
1
,
S
<
1
,
32
,
1
,
4
>
,
DstScalarPerVector
>
,
DeviceGroupedConvFwdMultipleD_Xdl_CShuffle
<
2
,
GNHWC
,
GKYXC
,
DsLayout
,
GNHWK
,
int8_t
,
int8_t
,
int32_t
,
int32_t
,
DsDatatype
,
int8_t
,
PassThrough
,
PassThrough
,
OutElementOp
,
ConvSpec
,
GemmSpec
,
1
,
64
,
64
,
64
,
64
,
16
,
16
,
32
,
32
,
2
,
2
,
S
<
4
,
16
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
S
<
4
,
16
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
1
,
1
,
S
<
1
,
32
,
1
,
2
>
,
DstScalarPerVector
>
,
DeviceGroupedConvFwdMultipleD_Xdl_CShuffle
<
2
,
GNHWC
,
GKYXC
,
DsLayout
,
GNHWK
,
int8_t
,
int8_t
,
int32_t
,
int32_t
,
DsDatatype
,
int8_t
,
PassThrough
,
PassThrough
,
OutElementOp
,
ConvSpec
,
GemmSpec
,
1
,
256
,
128
,
64
,
64
,
16
,
16
,
32
,
32
,
2
,
1
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
1
,
1
,
S
<
1
,
64
,
1
,
4
>
,
DstScalarPerVector
>
,
DeviceGroupedConvFwdMultipleD_Xdl_CShuffle
<
2
,
GNHWC
,
GKYXC
,
DsLayout
,
GNHWK
,
int8_t
,
int8_t
,
int32_t
,
int32_t
,
DsDatatype
,
int8_t
,
PassThrough
,
PassThrough
,
OutElementOp
,
ConvSpec
,
GemmSpec
,
1
,
256
,
64
,
128
,
64
,
16
,
16
,
32
,
32
,
1
,
2
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
1
,
1
,
S
<
1
,
64
,
1
,
4
>
,
DstScalarPerVector
>
,
DeviceGroupedConvFwdMultipleD_Xdl_CShuffle
<
2
,
GNHWC
,
GKYXC
,
DsLayout
,
GNHWK
,
int8_t
,
int8_t
,
int32_t
,
int32_t
,
DsDatatype
,
int8_t
,
PassThrough
,
PassThrough
,
OutElementOp
,
ConvSpec
,
GemmSpec
,
1
,
128
,
128
,
32
,
64
,
16
,
16
,
32
,
32
,
2
,
1
,
S
<
4
,
32
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
S
<
4
,
32
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
1
,
1
,
S
<
1
,
64
,
1
,
2
>
,
DstScalarPerVector
>
,
DeviceGroupedConvFwdMultipleD_Xdl_CShuffle
<
2
,
GNHWC
,
GKYXC
,
DsLayout
,
GNHWK
,
int8_t
,
int8_t
,
int32_t
,
int32_t
,
DsDatatype
,
int8_t
,
PassThrough
,
PassThrough
,
OutElementOp
,
ConvSpec
,
GemmSpec
,
1
,
128
,
32
,
128
,
64
,
16
,
16
,
32
,
32
,
1
,
2
,
S
<
4
,
32
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
S
<
4
,
32
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
1
,
1
,
S
<
1
,
32
,
1
,
4
>
,
DstScalarPerVector
>
,
DeviceGroupedConvFwdMultipleD_Xdl_CShuffle
<
2
,
GNHWC
,
GKYXC
,
DsLayout
,
GNHWK
,
int8_t
,
int8_t
,
int32_t
,
int32_t
,
DsDatatype
,
int8_t
,
PassThrough
,
PassThrough
,
OutElementOp
,
ConvSpec
,
GemmSpec
,
1
,
64
,
64
,
32
,
64
,
16
,
16
,
32
,
32
,
2
,
1
,
S
<
4
,
16
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
S
<
4
,
16
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
1
,
1
,
S
<
1
,
32
,
1
,
2
>
,
DstScalarPerVector
>
,
DeviceGroupedConvFwdMultipleD_Xdl_CShuffle
<
2
,
GNHWC
,
GKYXC
,
DsLayout
,
GNHWK
,
int8_t
,
int8_t
,
int32_t
,
int32_t
,
DsDatatype
,
int8_t
,
PassThrough
,
PassThrough
,
OutElementOp
,
ConvSpec
,
GemmSpec
,
1
,
64
,
32
,
64
,
64
,
16
,
16
,
32
,
32
,
1
,
2
,
S
<
4
,
16
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
S
<
4
,
16
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
1
,
1
,
S
<
1
,
32
,
1
,
2
>
,
DstScalarPerVector
>
>
;
// clang-format on
}
// namespace instance
}
// namespace device
}
// namespace tensor_operation
}
// namespace ck
library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_bias_perchannel_quantization_int8_instance.cpp
→
library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_
xdl_
bias_perchannel_quantization_int8_instance.cpp
View file @
ff46bd90
// SPDX-License-Identifier: MIT
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include "device_conv2d_int8_instance.hpp"
#include "device_conv2d_
xdl_
int8_instance.hpp"
namespace
ck
{
namespace
ck
{
namespace
tensor_operation
{
namespace
tensor_operation
{
namespace
device
{
namespace
device
{
namespace
instance
{
namespace
instance
{
void
add_device_conv2d_bias_perchannel_quantization_int8_instances
(
void
add_device_conv2d_
xdl_
bias_perchannel_quantization_int8_instances
(
std
::
vector
<
std
::
unique_ptr
<
DeviceGroupedConvFwdMultipleD
<
NDimSpatial
,
std
::
vector
<
std
::
unique_ptr
<
DeviceGroupedConvFwdMultipleD
<
NDimSpatial
,
GNHWC
,
GNHWC
,
GKYXC
,
GKYXC
,
...
@@ -41,7 +41,7 @@ void add_device_conv2d_bias_perchannel_quantization_int8_instances(
...
@@ -41,7 +41,7 @@ void add_device_conv2d_bias_perchannel_quantization_int8_instances(
8
>
{});
8
>
{});
}
}
void
add_device_conv2d_bias_relu_perchannel_quantization_int8_instances
(
void
add_device_conv2d_
xdl_
bias_relu_perchannel_quantization_int8_instances
(
std
::
vector
<
std
::
unique_ptr
<
DeviceGroupedConvFwdMultipleD
<
NDimSpatial
,
std
::
vector
<
std
::
unique_ptr
<
DeviceGroupedConvFwdMultipleD
<
NDimSpatial
,
GNHWC
,
GNHWC
,
GKYXC
,
GKYXC
,
...
...
library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_bias_perlayer_quantization_int8_instance.cpp
→
library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_
xdl_
bias_perlayer_quantization_int8_instance.cpp
View file @
ff46bd90
// SPDX-License-Identifier: MIT
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include "device_conv2d_int8_instance.hpp"
#include "device_conv2d_
xdl_
int8_instance.hpp"
namespace
ck
{
namespace
ck
{
namespace
tensor_operation
{
namespace
tensor_operation
{
namespace
device
{
namespace
device
{
namespace
instance
{
namespace
instance
{
void
add_device_conv2d_bias_perlayer_quantization_int8_instances
(
void
add_device_conv2d_
xdl_
bias_perlayer_quantization_int8_instances
(
std
::
vector
<
std
::
unique_ptr
<
DeviceGroupedConvFwdMultipleD
<
NDimSpatial
,
std
::
vector
<
std
::
unique_ptr
<
DeviceGroupedConvFwdMultipleD
<
NDimSpatial
,
GNHWC
,
GNHWC
,
GKYXC
,
GKYXC
,
...
@@ -41,7 +41,7 @@ void add_device_conv2d_bias_perlayer_quantization_int8_instances(
...
@@ -41,7 +41,7 @@ void add_device_conv2d_bias_perlayer_quantization_int8_instances(
8
>
{});
8
>
{});
}
}
void
add_device_conv2d_bias_relu_perlayer_quantization_int8_instances
(
void
add_device_conv2d_
xdl_
bias_relu_perlayer_quantization_int8_instances
(
std
::
vector
<
std
::
unique_ptr
<
DeviceGroupedConvFwdMultipleD
<
NDimSpatial
,
std
::
vector
<
std
::
unique_ptr
<
DeviceGroupedConvFwdMultipleD
<
NDimSpatial
,
GNHWC
,
GNHWC
,
GKYXC
,
GKYXC
,
...
...
library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_xdl_int8_instance.hpp
0 → 100644
View file @
ff46bd90
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include "conv2d_quantization_common.hpp"
#include "ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_multiple_d_xdl_cshuffle.hpp"
namespace
ck
{
namespace
tensor_operation
{
namespace
device
{
namespace
instance
{
// clang-format off
template
<
typename
DsLayout
,
typename
DsDatatype
,
typename
OutElementOp
,
ConvolutionForwardSpecialization
ConvSpec
,
index_t
DstScalarPerVector
>
using
device_grouped_conv2d_xdl_int8_instances
=
std
::
tuple
<
//########################################| NumDim| A| B| Ds| E| AData| BData| AccData| CShuffle| Ds| EData| A| B| CDE| ConvForward| GEMM| NumGemmK| Block| MPer| NPer| KPer| AK1| BK1| MPer| NPer| MXdl| NXdl| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockTransfer| ABlockLds| BBlockTransfer| BBlockTransfer| BBlockTransfer| BlockTransfer| BBlockTransfer| BBlockTransfer| BBlockLds| CShuffle| CShuffle| CBlockTransferClusterLengths| CBlockTransfer|
//########################################| Spatial| Layout| Layout| Layout| Layout| Type| Type| Type| DataType| DataType| Type| Elementwise| Elementwise| Elementwise| Specialization| Specialization| Prefetch| Size| Block| Block| Block| | | XDL| XDL| Per| Per| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraM| ThreadCluster| ThreadCluster| SrcAccessOrder| SrcVectorDim| SrcScalar| DstScalar| AddExtraN| MXdlPerWave| NXdlPerWave| _MBlock_MWaveMPerXdl| ScalarPerVector|
//########################################| | | | | | | | | | | | Operation| Operation| Operation| | | Stage| | | | | | | | | Wave| Wave| Lengths_K0_M_K1| ArrangeOrder| | | PerVector| PerVector_K1| | Lengths_K0_N_K1| ArrangeOrder| | | PerVector| PerVector_K1| | PerShuffle| PerShuffle| _NBlock_NWaveNPerXdl| _NWaveNPerXdl|
//########################################| | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | | |
DeviceGroupedConvFwdMultipleD_Xdl_CShuffle
<
NDimSpatial
,
GNHWC
,
GKYXC
,
DsLayout
,
GNHWK
,
int8_t
,
int8_t
,
int32_t
,
int32_t
,
DsDatatype
,
int8_t
,
PassThrough
,
PassThrough
,
OutElementOp
,
ConvSpec
,
GemmSpec
,
1
,
256
,
256
,
128
,
64
,
16
,
16
,
32
,
32
,
4
,
2
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
1
,
1
,
S
<
1
,
64
,
1
,
4
>
,
DstScalarPerVector
>
,
DeviceGroupedConvFwdMultipleD_Xdl_CShuffle
<
NDimSpatial
,
GNHWC
,
GKYXC
,
DsLayout
,
GNHWK
,
int8_t
,
int8_t
,
int32_t
,
int32_t
,
DsDatatype
,
int8_t
,
PassThrough
,
PassThrough
,
OutElementOp
,
ConvSpec
,
GemmSpec
,
1
,
256
,
128
,
256
,
64
,
16
,
16
,
32
,
32
,
2
,
4
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
1
,
1
,
S
<
1
,
64
,
1
,
4
>
,
DstScalarPerVector
>
,
DeviceGroupedConvFwdMultipleD_Xdl_CShuffle
<
NDimSpatial
,
GNHWC
,
GKYXC
,
DsLayout
,
GNHWK
,
int8_t
,
int8_t
,
int32_t
,
int32_t
,
DsDatatype
,
int8_t
,
PassThrough
,
PassThrough
,
OutElementOp
,
ConvSpec
,
GemmSpec
,
1
,
128
,
128
,
128
,
64
,
16
,
16
,
32
,
32
,
4
,
2
,
S
<
4
,
32
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
S
<
4
,
32
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
1
,
1
,
S
<
1
,
32
,
1
,
4
>
,
DstScalarPerVector
>
,
DeviceGroupedConvFwdMultipleD_Xdl_CShuffle
<
NDimSpatial
,
GNHWC
,
GKYXC
,
DsLayout
,
GNHWK
,
int8_t
,
int8_t
,
int32_t
,
int32_t
,
DsDatatype
,
int8_t
,
PassThrough
,
PassThrough
,
OutElementOp
,
ConvSpec
,
GemmSpec
,
1
,
256
,
128
,
128
,
64
,
16
,
16
,
32
,
32
,
2
,
2
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
1
,
1
,
S
<
1
,
64
,
1
,
4
>
,
DstScalarPerVector
>
,
DeviceGroupedConvFwdMultipleD_Xdl_CShuffle
<
NDimSpatial
,
GNHWC
,
GKYXC
,
DsLayout
,
GNHWK
,
int8_t
,
int8_t
,
int32_t
,
int32_t
,
DsDatatype
,
int8_t
,
PassThrough
,
PassThrough
,
OutElementOp
,
ConvSpec
,
GemmSpec
,
1
,
128
,
128
,
64
,
64
,
16
,
16
,
32
,
32
,
2
,
2
,
S
<
4
,
32
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
S
<
4
,
32
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
1
,
1
,
S
<
1
,
64
,
1
,
2
>
,
DstScalarPerVector
>
,
DeviceGroupedConvFwdMultipleD_Xdl_CShuffle
<
NDimSpatial
,
GNHWC
,
GKYXC
,
DsLayout
,
GNHWK
,
int8_t
,
int8_t
,
int32_t
,
int32_t
,
DsDatatype
,
int8_t
,
PassThrough
,
PassThrough
,
OutElementOp
,
ConvSpec
,
GemmSpec
,
1
,
128
,
64
,
128
,
64
,
16
,
16
,
32
,
32
,
2
,
2
,
S
<
4
,
32
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
S
<
4
,
32
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
1
,
1
,
S
<
1
,
32
,
1
,
4
>
,
DstScalarPerVector
>
,
DeviceGroupedConvFwdMultipleD_Xdl_CShuffle
<
NDimSpatial
,
GNHWC
,
GKYXC
,
DsLayout
,
GNHWK
,
int8_t
,
int8_t
,
int32_t
,
int32_t
,
DsDatatype
,
int8_t
,
PassThrough
,
PassThrough
,
OutElementOp
,
ConvSpec
,
GemmSpec
,
1
,
64
,
64
,
64
,
64
,
16
,
16
,
32
,
32
,
2
,
2
,
S
<
4
,
16
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
S
<
4
,
16
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
1
,
1
,
S
<
1
,
32
,
1
,
2
>
,
DstScalarPerVector
>
,
DeviceGroupedConvFwdMultipleD_Xdl_CShuffle
<
NDimSpatial
,
GNHWC
,
GKYXC
,
DsLayout
,
GNHWK
,
int8_t
,
int8_t
,
int32_t
,
int32_t
,
DsDatatype
,
int8_t
,
PassThrough
,
PassThrough
,
OutElementOp
,
ConvSpec
,
GemmSpec
,
1
,
256
,
128
,
64
,
64
,
16
,
16
,
32
,
32
,
2
,
1
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
1
,
1
,
S
<
1
,
64
,
1
,
4
>
,
DstScalarPerVector
>
,
DeviceGroupedConvFwdMultipleD_Xdl_CShuffle
<
NDimSpatial
,
GNHWC
,
GKYXC
,
DsLayout
,
GNHWK
,
int8_t
,
int8_t
,
int32_t
,
int32_t
,
DsDatatype
,
int8_t
,
PassThrough
,
PassThrough
,
OutElementOp
,
ConvSpec
,
GemmSpec
,
1
,
256
,
64
,
128
,
64
,
16
,
16
,
32
,
32
,
1
,
2
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
S
<
4
,
64
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
1
,
1
,
S
<
1
,
64
,
1
,
4
>
,
DstScalarPerVector
>
,
DeviceGroupedConvFwdMultipleD_Xdl_CShuffle
<
NDimSpatial
,
GNHWC
,
GKYXC
,
DsLayout
,
GNHWK
,
int8_t
,
int8_t
,
int32_t
,
int32_t
,
DsDatatype
,
int8_t
,
PassThrough
,
PassThrough
,
OutElementOp
,
ConvSpec
,
GemmSpec
,
1
,
128
,
128
,
32
,
64
,
16
,
16
,
32
,
32
,
2
,
1
,
S
<
4
,
32
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
S
<
4
,
32
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
1
,
1
,
S
<
1
,
64
,
1
,
2
>
,
DstScalarPerVector
>
,
DeviceGroupedConvFwdMultipleD_Xdl_CShuffle
<
NDimSpatial
,
GNHWC
,
GKYXC
,
DsLayout
,
GNHWK
,
int8_t
,
int8_t
,
int32_t
,
int32_t
,
DsDatatype
,
int8_t
,
PassThrough
,
PassThrough
,
OutElementOp
,
ConvSpec
,
GemmSpec
,
1
,
128
,
32
,
128
,
64
,
16
,
16
,
32
,
32
,
1
,
2
,
S
<
4
,
32
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
S
<
4
,
32
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
1
,
1
,
S
<
1
,
32
,
1
,
4
>
,
DstScalarPerVector
>
,
DeviceGroupedConvFwdMultipleD_Xdl_CShuffle
<
NDimSpatial
,
GNHWC
,
GKYXC
,
DsLayout
,
GNHWK
,
int8_t
,
int8_t
,
int32_t
,
int32_t
,
DsDatatype
,
int8_t
,
PassThrough
,
PassThrough
,
OutElementOp
,
ConvSpec
,
GemmSpec
,
1
,
64
,
64
,
32
,
64
,
16
,
16
,
32
,
32
,
2
,
1
,
S
<
4
,
16
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
S
<
4
,
16
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
1
,
1
,
S
<
1
,
32
,
1
,
2
>
,
DstScalarPerVector
>
,
DeviceGroupedConvFwdMultipleD_Xdl_CShuffle
<
NDimSpatial
,
GNHWC
,
GKYXC
,
DsLayout
,
GNHWK
,
int8_t
,
int8_t
,
int32_t
,
int32_t
,
DsDatatype
,
int8_t
,
PassThrough
,
PassThrough
,
OutElementOp
,
ConvSpec
,
GemmSpec
,
1
,
64
,
32
,
64
,
64
,
16
,
16
,
32
,
32
,
1
,
2
,
S
<
4
,
16
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
S
<
4
,
16
,
1
>
,
S
<
1
,
0
,
2
>
,
S
<
1
,
0
,
2
>
,
2
,
16
,
16
,
1
,
1
,
1
,
S
<
1
,
32
,
1
,
2
>
,
DstScalarPerVector
>
>
;
// clang-format on
}
// namespace instance
}
// namespace device
}
// namespace tensor_operation
}
// namespace ck
library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_perchannel_quantization_int8_instance.cpp
→
library/src/tensor_operation_instance/gpu/quantization/conv2d_fwd/device_conv2d_
xdl_
perchannel_quantization_int8_instance.cpp
View file @
ff46bd90
// SPDX-License-Identifier: MIT
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
// Copyright (c) 2018-2022, Advanced Micro Devices, Inc. All rights reserved.
#include "device_conv2d_int8_instance.hpp"
#include "device_conv2d_
xdl_
int8_instance.hpp"
namespace
ck
{
namespace
ck
{
namespace
tensor_operation
{
namespace
tensor_operation
{
namespace
device
{
namespace
device
{
namespace
instance
{
namespace
instance
{
void
add_device_conv2d_perchannel_quantization_int8_instances
(
void
add_device_conv2d_
xdl_
perchannel_quantization_int8_instances
(
std
::
vector
<
std
::
unique_ptr
<
DeviceGroupedConvFwdMultipleD
<
NDimSpatial
,
std
::
vector
<
std
::
unique_ptr
<
DeviceGroupedConvFwdMultipleD
<
NDimSpatial
,
GNHWC
,
GNHWC
,
GKYXC
,
GKYXC
,
...
@@ -41,7 +41,7 @@ void add_device_conv2d_perchannel_quantization_int8_instances(
...
@@ -41,7 +41,7 @@ void add_device_conv2d_perchannel_quantization_int8_instances(
8
>
{});
8
>
{});
}
}
void
add_device_conv2d_relu_perchannel_quantization_int8_instances
(
void
add_device_conv2d_
xdl_
relu_perchannel_quantization_int8_instances
(
std
::
vector
<
std
::
unique_ptr
<
DeviceGroupedConvFwdMultipleD
<
NDimSpatial
,
std
::
vector
<
std
::
unique_ptr
<
DeviceGroupedConvFwdMultipleD
<
NDimSpatial
,
GNHWC
,
GNHWC
,
GKYXC
,
GKYXC
,
...
...
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment