Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
composable_kernel
Commits
dd29eb09
"git@developer.sourcefind.cn:gaoqiong/composable_kernel.git" did not exist on "23220fe5c16e38575d186ec62bdca2c3f16951b4"
Commit
dd29eb09
authored
Apr 14, 2022
by
Chao Liu
Browse files
gemm/conv activation fusion example
parent
ac0d8066
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
115 additions
and
8 deletions
+115
-8
example/01_gemm/gemm_xdl_fp16.cpp
example/01_gemm/gemm_xdl_fp16.cpp
+36
-2
example/09_convnd_fwd/convnd_fwd_xdl.cpp
example/09_convnd_fwd/convnd_fwd_xdl.cpp
+36
-2
example/09_convnd_fwd/convnd_fwd_xdl_fp16.cpp
example/09_convnd_fwd/convnd_fwd_xdl_fp16.cpp
+36
-2
example/11_conv2d_bwd_weight/conv2d_bwd_weight_xdl.cpp
example/11_conv2d_bwd_weight/conv2d_bwd_weight_xdl.cpp
+7
-2
No files found.
example/01_gemm/gemm_xdl_fp16.cpp
View file @
dd29eb09
...
...
@@ -38,9 +38,43 @@ using ALayout = ck::tensor_layout::gemm::RowMajor;
using
BLayout
=
ck
::
tensor_layout
::
gemm
::
ColumnMajor
;
using
CLayout
=
ck
::
tensor_layout
::
gemm
::
RowMajor
;
using
AElementOp
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
struct
Relu
{
__host__
__device__
constexpr
void
operator
()(
float
&
y
,
const
float
&
x
)
const
{
const
float
a
=
x
;
y
=
a
>
0
?
a
:
0
;
}
__host__
__device__
constexpr
void
operator
()(
ck
::
half_t
&
y
,
const
ck
::
half_t
&
x
)
const
{
const
ck
::
half_t
a
=
x
;
y
=
a
>
0
?
a
:
0
;
}
};
struct
Hardswish
{
__host__
__device__
constexpr
void
operator
()(
float
&
y
,
const
float
&
x
)
const
{
float
a
=
x
;
float
b
=
a
+
float
{
3
};
float
c
=
(
b
>
0
)
*
(
b
>
float
{
6
}
?
float
{
6
}
:
b
)
*
a
*
float
{
0.166667
};
y
=
c
;
}
__host__
__device__
constexpr
void
operator
()(
ck
::
half_t
&
y
,
const
ck
::
half_t
&
x
)
const
{
float
a
=
x
;
float
b
=
a
+
float
{
3
};
float
c
=
(
b
>
0
)
*
(
b
>
float
{
6
}
?
float
{
6
}
:
b
)
*
a
*
float
{
0.166667
};
y
=
c
;
}
};
using
AElementOp
=
Relu
;
using
BElementOp
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
CElementOp
=
ck
::
tensor_operation
::
element_wise
::
PassThroug
h
;
using
CElementOp
=
Hardswis
h
;
static
constexpr
auto
GemmDefault
=
ck
::
tensor_operation
::
device
::
GemmSpecialization
::
Default
;
...
...
example/09_convnd_fwd/convnd_fwd_xdl.cpp
View file @
dd29eb09
...
...
@@ -25,9 +25,43 @@ using AccDataType = float;
template
<
ck
::
index_t
...
Is
>
using
S
=
ck
::
Sequence
<
Is
...
>
;
using
InElementOp
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
struct
Relu
{
__host__
__device__
constexpr
void
operator
()(
float
&
y
,
const
float
&
x
)
const
{
const
float
a
=
x
;
y
=
a
>
0
?
a
:
0
;
}
__host__
__device__
constexpr
void
operator
()(
ck
::
half_t
&
y
,
const
ck
::
half_t
&
x
)
const
{
const
ck
::
half_t
a
=
x
;
y
=
a
>
0
?
a
:
0
;
}
};
struct
Hardswish
{
__host__
__device__
constexpr
void
operator
()(
float
&
y
,
const
float
&
x
)
const
{
float
a
=
x
;
float
b
=
a
+
float
{
3
};
float
c
=
(
b
>
0
)
*
(
b
>
float
{
6
}
?
float
{
6
}
:
b
)
*
a
*
float
{
0.166667
};
y
=
c
;
}
__host__
__device__
constexpr
void
operator
()(
ck
::
half_t
&
y
,
const
ck
::
half_t
&
x
)
const
{
float
a
=
x
;
float
b
=
a
+
float
{
3
};
float
c
=
(
b
>
0
)
*
(
b
>
float
{
6
}
?
float
{
6
}
:
b
)
*
a
*
float
{
0.166667
};
y
=
c
;
}
};
using
InElementOp
=
Relu
;
using
WeiElementOp
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
OutElementOp
=
ck
::
tensor_operation
::
element_wise
::
PassThroug
h
;
using
OutElementOp
=
Hardswis
h
;
static
constexpr
auto
ConvFwdDefault
=
ck
::
tensor_operation
::
device
::
ConvolutionForwardSpecialization
::
Default
;
...
...
example/09_convnd_fwd/convnd_fwd_xdl_fp16.cpp
View file @
dd29eb09
...
...
@@ -29,9 +29,43 @@ using InLayout = ck::tensor_layout::convolution::NHWC;
using
WeiLayout
=
ck
::
tensor_layout
::
convolution
::
KYXC
;
using
OutLayout
=
ck
::
tensor_layout
::
convolution
::
NHWK
;
using
InElementOp
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
struct
Relu
{
__host__
__device__
constexpr
void
operator
()(
float
&
y
,
const
float
&
x
)
const
{
const
float
a
=
x
;
y
=
a
>
0
?
a
:
0
;
}
__host__
__device__
constexpr
void
operator
()(
ck
::
half_t
&
y
,
const
ck
::
half_t
&
x
)
const
{
const
ck
::
half_t
a
=
x
;
y
=
a
>
0
?
a
:
0
;
}
};
struct
Hardswish
{
__host__
__device__
constexpr
void
operator
()(
float
&
y
,
const
float
&
x
)
const
{
float
a
=
x
;
float
b
=
a
+
float
{
3
};
float
c
=
(
b
>
0
)
*
(
b
>
float
{
6
}
?
float
{
6
}
:
b
)
*
a
*
float
{
0.166667
};
y
=
c
;
}
__host__
__device__
constexpr
void
operator
()(
ck
::
half_t
&
y
,
const
ck
::
half_t
&
x
)
const
{
float
a
=
x
;
float
b
=
a
+
float
{
3
};
float
c
=
(
b
>
0
)
*
(
b
>
float
{
6
}
?
float
{
6
}
:
b
)
*
a
*
float
{
0.166667
};
y
=
c
;
}
};
using
InElementOp
=
Relu
;
using
WeiElementOp
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
OutElementOp
=
ck
::
tensor_operation
::
element_wise
::
PassThroug
h
;
using
OutElementOp
=
Hardswis
h
;
static
constexpr
auto
ConvFwdDefault
=
ck
::
tensor_operation
::
device
::
ConvolutionForwardSpecialization
::
Default
;
...
...
example/11_conv2d_bwd_weight/conv2d_bwd_weight_xdl.cpp
View file @
dd29eb09
...
...
@@ -72,8 +72,13 @@ using DeviceConvBwdWeightInstance = ck::tensor_operation::device::
8
>
;
// CBlockTransferScalarPerVector_NWaveNPerXdl
// clang-format on
using
ReferenceConvBwdWeightInstance
=
ck
::
tensor_operation
::
host
::
ReferenceConvBwdWeight
<
InDataType
,
WeiDataType
,
OutDataType
,
InElementOp
,
WeiElementOp
,
OutElementOp
>
;
using
ReferenceConvBwdWeightInstance
=
ck
::
tensor_operation
::
host
::
ReferenceConvBwdWeight
<
InDataType
,
WeiDataType
,
OutDataType
,
InElementOp
,
WeiElementOp
,
OutElementOp
>
;
int
main
(
int
argc
,
char
*
argv
[])
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment