Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
composable_kernel_ROCM
Commits
cd4d4629
Commit
cd4d4629
authored
Jan 07, 2025
by
danyao12
Browse files
Merge branch 'develop' into ck_tile/fa_bwd_v3
parents
21d12bb7
888317e6
Changes
439
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
1568 additions
and
166 deletions
+1568
-166
include/ck_tile/core/utility/static_counter.hpp
include/ck_tile/core/utility/static_counter.hpp
+116
-0
include/ck_tile/host.hpp
include/ck_tile/host.hpp
+9
-1
include/ck_tile/host/arg_parser.hpp
include/ck_tile/host/arg_parser.hpp
+44
-2
include/ck_tile/host/device_memory.hpp
include/ck_tile/host/device_memory.hpp
+35
-0
include/ck_tile/host/fill.hpp
include/ck_tile/host/fill.hpp
+175
-6
include/ck_tile/host/host_tensor.hpp
include/ck_tile/host/host_tensor.hpp
+126
-18
include/ck_tile/host/joinable_thread.hpp
include/ck_tile/host/joinable_thread.hpp
+27
-0
include/ck_tile/host/reference/reference_elementwise.hpp
include/ck_tile/host/reference/reference_elementwise.hpp
+47
-0
include/ck_tile/host/reference/reference_fused_moe.hpp
include/ck_tile/host/reference/reference_fused_moe.hpp
+196
-0
include/ck_tile/host/reference/reference_gemm.hpp
include/ck_tile/host/reference/reference_gemm.hpp
+59
-105
include/ck_tile/host/reference/reference_layernorm2d_fwd.hpp
include/ck_tile/host/reference/reference_layernorm2d_fwd.hpp
+32
-5
include/ck_tile/host/reference/reference_moe_sorting.hpp
include/ck_tile/host/reference/reference_moe_sorting.hpp
+97
-0
include/ck_tile/host/reference/reference_permute.hpp
include/ck_tile/host/reference/reference_permute.hpp
+76
-0
include/ck_tile/host/reference/reference_reduce.hpp
include/ck_tile/host/reference/reference_reduce.hpp
+9
-8
include/ck_tile/host/reference/reference_rmsnorm2d_fwd.hpp
include/ck_tile/host/reference/reference_rmsnorm2d_fwd.hpp
+52
-0
include/ck_tile/host/reference/reference_rowwise_quantization2d.hpp
..._tile/host/reference/reference_rowwise_quantization2d.hpp
+33
-0
include/ck_tile/host/reference/reference_softmax.hpp
include/ck_tile/host/reference/reference_softmax.hpp
+59
-21
include/ck_tile/host/reference/reference_topk.hpp
include/ck_tile/host/reference/reference_topk.hpp
+124
-0
include/ck_tile/ops/add_rmsnorm2d_rdquant.hpp
include/ck_tile/ops/add_rmsnorm2d_rdquant.hpp
+12
-0
include/ck_tile/ops/add_rmsnorm2d_rdquant/kernel/add_rmsnorm2d_rdquant_fwd_kernel.hpp
...orm2d_rdquant/kernel/add_rmsnorm2d_rdquant_fwd_kernel.hpp
+240
-0
No files found.
Too many changes to show.
To preserve performance only
439 of 439+
files are displayed.
Plain diff
Email patch
include/ck_tile/core/utility/static_counter.hpp
0 → 100644
View file @
cd4d4629
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2024, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include "ck_tile/core/config.hpp"
namespace
ck_tile
{
template
<
typename
Context
,
index_t
Start
=
0
,
index_t
Step
=
1
>
struct
static_counter
{
public:
template
<
typename
Unique
>
static
constexpr
index_t
next
()
{
return
next
<
Unique
>
(
0
)
*
Step
+
Start
;
}
template
<
unsigned
long
long
>
static
constexpr
index_t
next
()
{
struct
Unique
{
};
return
next
<
Unique
>
(
0
)
*
Step
+
Start
;
}
template
<
typename
Unique
>
static
constexpr
index_t
current
()
{
return
current
<
Unique
>
(
0
)
*
Step
+
Start
;
}
template
<
unsigned
long
long
>
static
constexpr
index_t
current
()
{
struct
Unique
{
};
return
current
<
Unique
>
(
0
)
*
Step
+
Start
;
}
private:
template
<
index_t
I
>
struct
slot
{
_Pragma
(
"GCC diagnostic push"
);
_Pragma
(
"GCC diagnostic ignored
\"
-Wundefined-internal
\"
"
);
friend
constexpr
bool
slot_allocated
(
slot
<
I
>
);
_Pragma
(
"GCC diagnostic pop"
);
};
template
<
index_t
I
>
struct
allocate_slot
{
friend
constexpr
bool
slot_allocated
(
slot
<
I
>
)
{
return
true
;
}
enum
{
value
=
I
};
};
// If slot_allocated(slot<I>) has NOT been defined, then SFINAE will keep this function out of
// the overload set...
template
<
typename
Unique
,
index_t
I
=
0
,
bool
=
slot_allocated
(
slot
<
I
>())
>
static
constexpr
index_t
next
(
index_t
)
{
return
next
<
Unique
,
I
+
1
>
(
0
);
}
// ...And this function will be used, instead, which will define slot_allocated(slot<I>) via
// allocate_slot<I>.
template
<
typename
Unique
,
index_t
I
=
0
>
static
constexpr
index_t
next
(
double
)
{
return
allocate_slot
<
I
>::
value
;
}
// If slot_allocated(slot<I>) has NOT been defined, then SFINAE will keep this function out of
// the overload set...
template
<
typename
Unique
,
index_t
I
=
Start
,
bool
=
slot_allocated
(
slot
<
I
>())
>
static
constexpr
index_t
current
(
index_t
)
{
return
current
<
Unique
,
I
+
1
>
(
0
);
}
// ...And this function will be used, instead, which will return the current counter, or assert
// in case next() hasn't been called yet.
template
<
typename
Unique
,
index_t
I
=
Start
>
static
constexpr
index_t
current
(
double
)
{
static_assert
(
I
!=
0
,
"You must invoke next() first"
);
return
I
-
1
;
}
};
namespace
impl
{
template
<
int
I
>
struct
static_counter_uniq_
;
}
#define MAKE_SC() \
ck_tile::static_counter<ck_tile::impl::static_counter_uniq_<__COUNTER__>> {}
#define MAKE_SC_WITH(start_, step_) \
ck_tile::static_counter<ck_tile::impl::static_counter_uniq_<__COUNTER__>, start_, step_> {}
#define NEXT_SC(c_) c_.next<__COUNTER__>()
#define NEXT_SCI(c_, static_i_) c_.next<__COUNTER__ + static_i_>()
// Usage:
// constexpr auto c = MAKE_SC()
// NEXT_SC(c) // -> constexpr 0
// NEXT_SC(c) // -> constexpr 1
// NEXT_SC(c) // -> constexpr 2
}
// namespace ck_tile
include/ck_tile/host.hpp
View file @
cd4d4629
...
@@ -11,6 +11,7 @@
...
@@ -11,6 +11,7 @@
#include "ck_tile/host/fill.hpp"
#include "ck_tile/host/fill.hpp"
#include "ck_tile/host/hip_check_error.hpp"
#include "ck_tile/host/hip_check_error.hpp"
#include "ck_tile/host/host_tensor.hpp"
#include "ck_tile/host/host_tensor.hpp"
#include "ck_tile/host/joinable_thread.hpp"
#include "ck_tile/host/kernel_launch.hpp"
#include "ck_tile/host/kernel_launch.hpp"
#include "ck_tile/host/ranges.hpp"
#include "ck_tile/host/ranges.hpp"
#include "ck_tile/host/reference/reference_batched_dropout.hpp"
#include "ck_tile/host/reference/reference_batched_dropout.hpp"
...
@@ -19,10 +20,17 @@
...
@@ -19,10 +20,17 @@
#include "ck_tile/host/reference/reference_batched_masking.hpp"
#include "ck_tile/host/reference/reference_batched_masking.hpp"
#include "ck_tile/host/reference/reference_batched_rotary_position_embedding.hpp"
#include "ck_tile/host/reference/reference_batched_rotary_position_embedding.hpp"
#include "ck_tile/host/reference/reference_batched_softmax.hpp"
#include "ck_tile/host/reference/reference_batched_softmax.hpp"
#include "ck_tile/host/reference/reference_elementwise.hpp"
#include "ck_tile/host/reference/reference_fused_moe.hpp"
#include "ck_tile/host/reference/reference_gemm.hpp"
#include "ck_tile/host/reference/reference_gemm.hpp"
#include "ck_tile/host/reference/reference_im2col.hpp"
#include "ck_tile/host/reference/reference_im2col.hpp"
#include "ck_tile/host/reference/reference_layernorm2d.hpp"
#include "ck_tile/host/reference/reference_layernorm2d_fwd.hpp"
#include "ck_tile/host/reference/reference_moe_sorting.hpp"
#include "ck_tile/host/reference/reference_permute.hpp"
#include "ck_tile/host/reference/reference_reduce.hpp"
#include "ck_tile/host/reference/reference_reduce.hpp"
#include "ck_tile/host/reference/reference_rmsnorm2d_fwd.hpp"
#include "ck_tile/host/reference/reference_rowwise_quantization2d.hpp"
#include "ck_tile/host/reference/reference_softmax.hpp"
#include "ck_tile/host/reference/reference_softmax.hpp"
#include "ck_tile/host/reference/reference_topk.hpp"
#include "ck_tile/host/stream_config.hpp"
#include "ck_tile/host/stream_config.hpp"
#include "ck_tile/host/timer.hpp"
#include "ck_tile/host/timer.hpp"
include/ck_tile/host/arg_parser.hpp
View file @
cd4d4629
...
@@ -15,11 +15,14 @@
...
@@ -15,11 +15,14 @@
namespace
ck_tile
{
namespace
ck_tile
{
/*
/*
* a host side utility, arg parser for
* a host side utility, arg parser for, either
* -[key0]=[value0] -[key1]=[value1] ...
* -[key0] = [value0, value1, value2]
* or
* -[key0]=[value0] -[key1]=[value1] ...
*/
*/
class
ArgParser
class
ArgParser
{
{
public:
public:
class
Arg
class
Arg
{
{
...
@@ -187,6 +190,45 @@ class ArgParser
...
@@ -187,6 +190,45 @@ class ArgParser
return
value
;
return
value
;
}
}
std
::
vector
<
std
::
string
>
get_string_vec
(
const
std
::
string
&
name
,
const
std
::
string
&
delimiter
=
","
)
const
{
if
(
get_str
(
name
).
empty
())
{
return
{};
}
std
::
string
s
=
get_str
(
name
);
std
::
vector
<
std
::
string
>
tokens
;
size_t
pos
=
0
;
std
::
string
token
;
while
((
pos
=
s
.
find
(
delimiter
))
!=
std
::
string
::
npos
)
{
token
=
s
.
substr
(
0
,
pos
);
tokens
.
push_back
(
token
);
s
.
erase
(
0
,
pos
+
delimiter
.
length
());
}
tokens
.
push_back
(
s
);
return
tokens
;
}
std
::
vector
<
int
>
get_int_vec
(
const
std
::
string
&
name
,
const
std
::
string
&
delimiter
=
","
)
const
{
if
(
get_str
(
name
).
empty
())
{
return
{};
}
const
std
::
vector
<
std
::
string
>
args
=
get_string_vec
(
name
,
delimiter
);
std
::
vector
<
int
>
tokens
;
tokens
.
reserve
(
static_cast
<
int
>
(
args
.
size
()));
for
(
const
std
::
string
&
token
:
args
)
{
int
value
=
atoi
(
token
.
c_str
());
tokens
.
push_back
(
value
);
}
return
tokens
;
}
private:
private:
std
::
unordered_map
<
std
::
string
,
Arg
>
input_map
;
std
::
unordered_map
<
std
::
string
,
Arg
>
input_map
;
std
::
vector
<
std
::
string
>
keys
;
std
::
vector
<
std
::
string
>
keys
;
...
...
include/ck_tile/host/device_memory.hpp
View file @
cd4d4629
...
@@ -7,6 +7,7 @@
...
@@ -7,6 +7,7 @@
#include <stdint.h>
#include <stdint.h>
#include <stdexcept>
#include <stdexcept>
#include "ck_tile/host/hip_check_error.hpp"
#include "ck_tile/host/hip_check_error.hpp"
#include "ck_tile/host/host_tensor.hpp"
namespace
ck_tile
{
namespace
ck_tile
{
template
<
typename
T
>
template
<
typename
T
>
...
@@ -36,6 +37,19 @@ struct DeviceMem
...
@@ -36,6 +37,19 @@ struct DeviceMem
mpDeviceBuf
=
nullptr
;
mpDeviceBuf
=
nullptr
;
}
}
}
}
template
<
typename
T
>
DeviceMem
(
const
HostTensor
<
T
>&
t
)
:
mMemSize
(
t
.
get_element_space_size_in_bytes
())
{
if
(
mMemSize
!=
0
)
{
HIP_CHECK_ERROR
(
hipMalloc
(
static_cast
<
void
**>
(
&
mpDeviceBuf
),
mMemSize
));
}
else
{
mpDeviceBuf
=
nullptr
;
}
ToDevice
(
t
.
data
());
}
void
Realloc
(
std
::
size_t
mem_size
)
void
Realloc
(
std
::
size_t
mem_size
)
{
{
if
(
mpDeviceBuf
)
if
(
mpDeviceBuf
)
...
@@ -92,6 +106,27 @@ struct DeviceMem
...
@@ -92,6 +106,27 @@ struct DeviceMem
HIP_CHECK_ERROR
(
hipMemcpy
(
p
,
mpDeviceBuf
,
cpySize
,
hipMemcpyDeviceToHost
));
HIP_CHECK_ERROR
(
hipMemcpy
(
p
,
mpDeviceBuf
,
cpySize
,
hipMemcpyDeviceToHost
));
}
}
}
}
// construct a host tensor with type T
template
<
typename
T
>
HostTensor
<
T
>
ToHost
(
std
::
size_t
cpySize
)
{
// TODO: host tensor could be slightly larger than the device tensor
// we just copy all data from GPU buffer
std
::
size_t
host_elements
=
(
cpySize
+
sizeof
(
T
)
-
1
)
/
sizeof
(
T
);
HostTensor
<
T
>
h_
({
host_elements
});
if
(
mpDeviceBuf
)
{
HIP_CHECK_ERROR
(
hipMemcpy
(
h_
.
data
(),
mpDeviceBuf
,
cpySize
,
hipMemcpyDeviceToHost
));
}
return
h_
;
}
template
<
typename
T
>
HostTensor
<
T
>
ToHost
()
{
return
ToHost
<
T
>
(
mMemSize
);
}
void
SetZero
()
const
void
SetZero
()
const
{
{
if
(
mpDeviceBuf
)
if
(
mpDeviceBuf
)
...
...
include/ck_tile/host/fill.hpp
View file @
cd4d4629
...
@@ -10,8 +10,10 @@
...
@@ -10,8 +10,10 @@
#include <random>
#include <random>
#include <type_traits>
#include <type_traits>
#include <utility>
#include <utility>
#include <unordered_set>
#include "ck_tile/core.hpp"
#include "ck_tile/core.hpp"
#include "ck_tile/host/joinable_thread.hpp"
namespace
ck_tile
{
namespace
ck_tile
{
...
@@ -21,13 +23,44 @@ struct FillUniformDistribution
...
@@ -21,13 +23,44 @@ struct FillUniformDistribution
float
a_
{
-
5.
f
};
float
a_
{
-
5.
f
};
float
b_
{
5.
f
};
float
b_
{
5.
f
};
std
::
optional
<
uint32_t
>
seed_
{
11939
};
std
::
optional
<
uint32_t
>
seed_
{
11939
};
// ATTENTION: threaded does not guarantee the distribution between thread
bool
threaded
=
false
;
template
<
typename
ForwardIter
>
template
<
typename
ForwardIter
>
void
operator
()(
ForwardIter
first
,
ForwardIter
last
)
const
void
operator
()(
ForwardIter
first
,
ForwardIter
last
)
const
{
{
std
::
mt19937
gen
(
seed_
.
has_value
()
?
*
seed_
:
std
::
random_device
{}());
if
(
threaded
)
std
::
uniform_real_distribution
<
float
>
dis
(
a_
,
b_
);
{
std
::
generate
(
first
,
last
,
[
&
dis
,
&
gen
]()
{
return
ck_tile
::
type_convert
<
T
>
(
dis
(
gen
));
});
uint32_t
num_thread
=
std
::
thread
::
hardware_concurrency
();
auto
total
=
static_cast
<
std
::
size_t
>
(
std
::
distance
(
first
,
last
));
auto
work_per_thread
=
static_cast
<
std
::
size_t
>
((
total
+
num_thread
-
1
)
/
num_thread
);
std
::
vector
<
joinable_thread
>
threads
(
num_thread
);
for
(
std
::
size_t
it
=
0
;
it
<
num_thread
;
++
it
)
{
std
::
size_t
iw_begin
=
it
*
work_per_thread
;
std
::
size_t
iw_end
=
std
::
min
((
it
+
1
)
*
work_per_thread
,
total
);
auto
thread_f
=
[
this
,
total
,
iw_begin
,
iw_end
,
&
first
]
{
if
(
iw_begin
>
total
||
iw_end
>
total
)
return
;
// need to make each thread unique, add an offset to current seed
std
::
mt19937
gen
(
seed_
.
has_value
()
?
(
*
seed_
+
iw_begin
)
:
std
::
random_device
{}());
std
::
uniform_real_distribution
<
float
>
dis
(
a_
,
b_
);
std
::
generate
(
first
+
iw_begin
,
first
+
iw_end
,
[
&
dis
,
&
gen
]()
{
return
ck_tile
::
type_convert
<
T
>
(
dis
(
gen
));
});
};
threads
[
it
]
=
joinable_thread
(
thread_f
);
}
}
else
{
std
::
mt19937
gen
(
seed_
.
has_value
()
?
*
seed_
:
std
::
random_device
{}());
std
::
uniform_real_distribution
<
float
>
dis
(
a_
,
b_
);
std
::
generate
(
first
,
last
,
[
&
dis
,
&
gen
]()
{
return
ck_tile
::
type_convert
<
T
>
(
dis
(
gen
));
});
}
}
}
template
<
typename
ForwardRange
>
template
<
typename
ForwardRange
>
...
@@ -41,19 +74,117 @@ struct FillUniformDistribution
...
@@ -41,19 +74,117 @@ struct FillUniformDistribution
}
}
};
};
namespace
impl
{
// clang-format off
template
<
index_t
bytes
>
struct
RawIntegerType_
{};
template
<
>
struct
RawIntegerType_
<
1
>
{
using
type
=
uint8_t
;};
template
<
>
struct
RawIntegerType_
<
2
>
{
using
type
=
uint16_t
;};
template
<
>
struct
RawIntegerType_
<
4
>
{
using
type
=
uint32_t
;};
template
<
>
struct
RawIntegerType_
<
8
>
{
using
type
=
uint64_t
;};
// clang-format on
template
<
typename
T
>
using
RawIntegerType
=
typename
RawIntegerType_
<
sizeof
(
T
)
>::
type
;
}
// namespace impl
// Note: this struct will have no const-ness will generate random
template
<
typename
T
>
struct
FillUniformDistribution_Unique
{
float
a_
{
-
5.
f
};
float
b_
{
5.
f
};
std
::
optional
<
uint32_t
>
seed_
{
11939
};
std
::
mt19937
gen_
{};
std
::
unordered_set
<
impl
::
RawIntegerType
<
T
>>
set_
{};
FillUniformDistribution_Unique
(
float
a
=
-
5.
f
,
float
b
=
5.
f
,
std
::
optional
<
uint32_t
>
seed
=
{
11939
})
:
a_
(
a
),
b_
(
b
),
seed_
(
seed
),
gen_
{
seed_
.
has_value
()
?
*
seed_
:
std
::
random_device
{}()},
set_
{}
{
}
template
<
typename
ForwardIter
>
void
operator
()(
ForwardIter
first
,
ForwardIter
last
)
{
std
::
mt19937
&
gen
=
gen_
;
std
::
uniform_real_distribution
<
float
>
dis
(
a_
,
b_
);
auto
&
set
=
set_
;
std
::
generate
(
first
,
last
,
[
&
dis
,
&
gen
,
&
set
]()
{
T
v
=
static_cast
<
T
>
(
0
);
do
{
v
=
ck_tile
::
type_convert
<
T
>
(
dis
(
gen
));
}
while
(
set
.
count
(
bit_cast
<
impl
::
RawIntegerType
<
T
>>
(
v
))
==
1
);
set
.
insert
(
bit_cast
<
impl
::
RawIntegerType
<
T
>>
(
v
));
return
v
;
});
}
template
<
typename
ForwardRange
>
auto
operator
()(
ForwardRange
&&
range
)
->
std
::
void_t
<
decltype
(
std
::
declval
<
FillUniformDistribution_Unique
&>
()(
std
::
begin
(
std
::
forward
<
ForwardRange
>
(
range
)),
std
::
end
(
std
::
forward
<
ForwardRange
>
(
range
))))
>
{
(
*
this
)(
std
::
begin
(
std
::
forward
<
ForwardRange
>
(
range
)),
std
::
end
(
std
::
forward
<
ForwardRange
>
(
range
)));
}
void
clear
()
{
set_
.
clear
();
}
};
template
<
typename
T
>
template
<
typename
T
>
struct
FillNormalDistribution
struct
FillNormalDistribution
{
{
float
mean_
{
0.
f
};
float
mean_
{
0.
f
};
float
variance_
{
1.
f
};
float
variance_
{
1.
f
};
std
::
optional
<
uint32_t
>
seed_
{
11939
};
std
::
optional
<
uint32_t
>
seed_
{
11939
};
// ATTENTION: threaded does not guarantee the distribution between thread
bool
threaded
=
false
;
template
<
typename
ForwardIter
>
template
<
typename
ForwardIter
>
void
operator
()(
ForwardIter
first
,
ForwardIter
last
)
const
void
operator
()(
ForwardIter
first
,
ForwardIter
last
)
const
{
{
std
::
mt19937
gen
(
seed_
.
has_value
()
?
*
seed_
:
std
::
random_device
{}());
if
(
threaded
)
std
::
normal_distribution
<
float
>
dis
(
mean_
,
std
::
sqrt
(
variance_
));
{
std
::
generate
(
first
,
last
,
[
&
dis
,
&
gen
]()
{
return
ck_tile
::
type_convert
<
T
>
(
dis
(
gen
));
});
uint32_t
num_thread
=
std
::
thread
::
hardware_concurrency
();
auto
total
=
static_cast
<
std
::
size_t
>
(
std
::
distance
(
first
,
last
));
auto
work_per_thread
=
static_cast
<
std
::
size_t
>
((
total
+
num_thread
-
1
)
/
num_thread
);
std
::
vector
<
joinable_thread
>
threads
(
num_thread
);
for
(
std
::
size_t
it
=
0
;
it
<
num_thread
;
++
it
)
{
std
::
size_t
iw_begin
=
it
*
work_per_thread
;
std
::
size_t
iw_end
=
std
::
min
((
it
+
1
)
*
work_per_thread
,
total
);
auto
thread_f
=
[
this
,
total
,
iw_begin
,
iw_end
,
&
first
]
{
if
(
iw_begin
>
total
||
iw_end
>
total
)
return
;
// need to make each thread unique, add an offset to current seed
std
::
mt19937
gen
(
seed_
.
has_value
()
?
(
*
seed_
+
iw_begin
)
:
std
::
random_device
{}());
std
::
normal_distribution
<
float
>
dis
(
mean_
,
std
::
sqrt
(
variance_
));
std
::
generate
(
first
+
iw_begin
,
first
+
iw_end
,
[
&
dis
,
&
gen
]()
{
return
ck_tile
::
type_convert
<
T
>
(
dis
(
gen
));
});
};
threads
[
it
]
=
joinable_thread
(
thread_f
);
}
}
else
{
std
::
mt19937
gen
(
seed_
.
has_value
()
?
*
seed_
:
std
::
random_device
{}());
std
::
normal_distribution
<
float
>
dis
(
mean_
,
std
::
sqrt
(
variance_
));
std
::
generate
(
first
,
last
,
[
&
dis
,
&
gen
]()
{
return
ck_tile
::
type_convert
<
T
>
(
dis
(
gen
));
});
}
}
}
template
<
typename
ForwardRange
>
template
<
typename
ForwardRange
>
...
@@ -167,6 +298,44 @@ struct FillMonotonicSeq
...
@@ -167,6 +298,44 @@ struct FillMonotonicSeq
}
}
};
};
template
<
typename
T
,
bool
IsAscending
=
true
>
struct
FillStepRange
{
float
start_value_
{
0
};
float
end_value_
{
3
};
float
step_
{
1
};
template
<
typename
ForwardIter
>
void
operator
()(
ForwardIter
first
,
ForwardIter
last
)
const
{
std
::
generate
(
first
,
last
,
[
=
,
n
=
start_value_
]()
mutable
{
auto
tmp
=
n
;
n
+=
step_
;
if
constexpr
(
IsAscending
)
{
if
(
n
>
end_value_
)
n
=
start_value_
;
}
else
{
if
(
n
<
end_value_
)
n
=
start_value_
;
}
return
type_convert
<
T
>
(
tmp
);
});
}
template
<
typename
ForwardRange
>
auto
operator
()(
ForwardRange
&&
range
)
const
->
std
::
void_t
<
decltype
(
std
::
declval
<
const
FillStepRange
&>
()(
std
::
begin
(
std
::
forward
<
ForwardRange
>
(
range
)),
std
::
end
(
std
::
forward
<
ForwardRange
>
(
range
))))
>
{
(
*
this
)(
std
::
begin
(
std
::
forward
<
ForwardRange
>
(
range
)),
std
::
end
(
std
::
forward
<
ForwardRange
>
(
range
)));
}
};
template
<
typename
T
>
template
<
typename
T
>
struct
FillConstant
struct
FillConstant
{
{
...
...
include/ck_tile/host/host_tensor.hpp
View file @
cd4d4629
...
@@ -8,11 +8,13 @@
...
@@ -8,11 +8,13 @@
#include <iostream>
#include <iostream>
#include <iomanip>
#include <iomanip>
#include <numeric>
#include <numeric>
#include <thread>
#include <utility>
#include <utility>
#include <vector>
#include <vector>
#include <functional>
#include <fstream>
#include "ck_tile/core.hpp"
#include "ck_tile/core.hpp"
#include "ck_tile/host/joinable_thread.hpp"
#include "ck_tile/host/ranges.hpp"
#include "ck_tile/host/ranges.hpp"
namespace
ck_tile
{
namespace
ck_tile
{
...
@@ -212,23 +214,6 @@ CK_TILE_HOST HostTensorDescriptor transpose_host_tensor_descriptor_given_new2old
...
@@ -212,23 +214,6 @@ CK_TILE_HOST HostTensorDescriptor transpose_host_tensor_descriptor_given_new2old
return
HostTensorDescriptor
(
new_lengths
,
new_strides
);
return
HostTensorDescriptor
(
new_lengths
,
new_strides
);
}
}
struct
joinable_thread
:
std
::
thread
{
template
<
typename
...
Xs
>
joinable_thread
(
Xs
&&
...
xs
)
:
std
::
thread
(
std
::
forward
<
Xs
>
(
xs
)...)
{
}
joinable_thread
(
joinable_thread
&&
)
=
default
;
joinable_thread
&
operator
=
(
joinable_thread
&&
)
=
default
;
~
joinable_thread
()
{
if
(
this
->
joinable
())
this
->
join
();
}
};
template
<
typename
F
,
typename
...
Xs
>
template
<
typename
F
,
typename
...
Xs
>
struct
ParallelTensorFunctor
struct
ParallelTensorFunctor
{
{
...
@@ -545,6 +530,28 @@ struct HostTensor
...
@@ -545,6 +530,28 @@ struct HostTensor
typename
Data
::
size_type
size
()
const
{
return
mData
.
size
();
}
typename
Data
::
size_type
size
()
const
{
return
mData
.
size
();
}
// return a slice of this tensor
// for simplicity we just copy the data and return a new tensor
auto
slice
(
std
::
vector
<
size_t
>
s_begin
,
std
::
vector
<
size_t
>
s_end
)
const
{
assert
(
s_begin
.
size
()
==
s_end
.
size
());
assert
(
s_begin
.
size
()
==
get_num_of_dimension
());
std
::
vector
<
size_t
>
s_len
(
s_begin
.
size
());
std
::
transform
(
s_end
.
begin
(),
s_end
.
end
(),
s_begin
.
begin
(),
s_len
.
begin
(),
std
::
minus
<
size_t
>
{});
HostTensor
<
T
>
sliced_tensor
(
s_len
);
sliced_tensor
.
ForEach
([
&
](
auto
&
self
,
auto
idx
)
{
std
::
vector
<
size_t
>
src_idx
(
idx
.
size
());
std
::
transform
(
idx
.
begin
(),
idx
.
end
(),
s_begin
.
begin
(),
src_idx
.
begin
(),
std
::
plus
<
size_t
>
{});
self
(
idx
)
=
operator
()(
src_idx
);
});
return
sliced_tensor
;
}
template
<
typename
U
=
T
>
template
<
typename
U
=
T
>
auto
AsSpan
()
const
auto
AsSpan
()
const
{
{
...
@@ -567,6 +574,107 @@ struct HostTensor
...
@@ -567,6 +574,107 @@ struct HostTensor
size
()
*
FromSize
/
ToSize
};
size
()
*
FromSize
/
ToSize
};
}
}
friend
std
::
ostream
&
operator
<<
(
std
::
ostream
&
os
,
const
HostTensor
<
T
>&
t
)
{
os
<<
t
.
mDesc
;
os
<<
"["
;
for
(
typename
Data
::
size_type
idx
=
0
;
idx
<
t
.
mData
.
size
();
++
idx
)
{
if
(
0
<
idx
)
{
os
<<
", "
;
}
if
constexpr
(
std
::
is_same_v
<
T
,
bf16_t
>
||
std
::
is_same_v
<
T
,
fp16_t
>
)
{
os
<<
type_convert
<
float
>
(
t
.
mData
[
idx
])
<<
" #### "
;
}
else
{
os
<<
t
.
mData
[
idx
];
}
}
os
<<
"]"
;
return
os
;
}
// read data from a file, as dtype
// the file could dumped from torch as (targeting tensor is t here)
// numpy.savetxt("f.txt", t.view(-1).numpy())
// numpy.savetxt("f.txt", t.cpu().view(-1).numpy()) # from cuda to cpu to save
// numpy.savetxt("f.txt", t.cpu().view(-1).numpy(), fmt="%d") # save as int
// will output f.txt, each line is a value
// dtype=float or int, internally will cast to real type
void
loadtxt
(
std
::
string
file_name
,
std
::
string
dtype
=
"float"
)
{
std
::
ifstream
file
(
file_name
);
if
(
file
.
is_open
())
{
std
::
string
line
;
index_t
cnt
=
0
;
while
(
std
::
getline
(
file
,
line
))
{
if
(
cnt
>=
static_cast
<
index_t
>
(
mData
.
size
()))
{
throw
std
::
runtime_error
(
std
::
string
(
"data read from file:"
)
+
file_name
+
" is too big"
);
}
if
(
dtype
==
"float"
)
{
mData
[
cnt
]
=
type_convert
<
T
>
(
std
::
stof
(
line
));
}
else
if
(
dtype
==
"int"
||
dtype
==
"int32"
)
{
mData
[
cnt
]
=
type_convert
<
T
>
(
std
::
stoi
(
line
));
}
cnt
++
;
}
file
.
close
();
if
(
cnt
<
static_cast
<
index_t
>
(
mData
.
size
()))
{
std
::
cerr
<<
"Warning! reading from file:"
<<
file_name
<<
", does not match the size of this tensor"
<<
std
::
endl
;
}
}
else
{
// Print an error message to the standard error
// stream if the file cannot be opened.
throw
std
::
runtime_error
(
std
::
string
(
"unable to open file:"
)
+
file_name
);
}
}
// can save to a txt file and read from torch as:
// torch.from_numpy(np.loadtxt('f.txt', dtype=np.int32/np.float32...)).view([...]).contiguous()
void
savetxt
(
std
::
string
file_name
,
std
::
string
dtype
=
"float"
)
{
std
::
ofstream
file
(
file_name
);
if
(
file
.
is_open
())
{
for
(
auto
&
itm
:
mData
)
{
if
(
dtype
==
"float"
)
file
<<
type_convert
<
float
>
(
itm
)
<<
std
::
endl
;
else
if
(
dtype
==
"int"
)
file
<<
type_convert
<
int
>
(
itm
)
<<
std
::
endl
;
else
// TODO: we didn't implement operator<< for all custom
// data types, here fall back to float in case compile error
file
<<
type_convert
<
float
>
(
itm
)
<<
std
::
endl
;
}
file
.
close
();
}
else
{
// Print an error message to the standard error
// stream if the file cannot be opened.
throw
std
::
runtime_error
(
std
::
string
(
"unable to open file:"
)
+
file_name
);
}
}
Descriptor
mDesc
;
Descriptor
mDesc
;
Data
mData
;
Data
mData
;
};
};
...
...
include/ck_tile/host/joinable_thread.hpp
0 → 100644
View file @
cd4d4629
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2024, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include <thread>
#include <utility>
namespace
ck_tile
{
struct
joinable_thread
:
std
::
thread
{
template
<
typename
...
Xs
>
joinable_thread
(
Xs
&&
...
xs
)
:
std
::
thread
(
std
::
forward
<
Xs
>
(
xs
)...)
{
}
joinable_thread
(
joinable_thread
&&
)
=
default
;
joinable_thread
&
operator
=
(
joinable_thread
&&
)
=
default
;
~
joinable_thread
()
{
if
(
this
->
joinable
())
this
->
join
();
}
};
}
// namespace ck_tile
include/ck_tile/host/reference/reference_elementwise.hpp
0 → 100644
View file @
cd4d4629
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2024, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include "ck_tile/core.hpp"
#include "ck_tile/host/host_tensor.hpp"
#include <thread>
namespace
ck_tile
{
template
<
typename
ADataType
,
typename
BDataType
,
typename
ComputeDataType
,
typename
ElementOp
>
CK_TILE_HOST
void
reference_unary_elementwise
(
const
HostTensor
<
ADataType
>&
a
,
HostTensor
<
BDataType
>&
b
,
ElementOp
element_op
)
{
// TODO: imeplement gpu version reference function
auto
f
=
[
&
](
auto
i
)
{
auto
v_a
=
type_convert
<
ComputeDataType
>
(
a
.
mData
[
i
]);
auto
v_b
=
element_op
(
v_a
);
b
.
mData
[
i
]
=
ck_tile
::
type_convert
<
BDataType
>
(
v_b
);
};
make_ParallelTensorFunctor
(
f
,
b
.
get_element_space_size
())(
std
::
thread
::
hardware_concurrency
());
}
template
<
typename
ADataType
,
typename
BDataType
,
typename
CDataType
,
typename
ComputeDataType
,
typename
ElementOp
>
CK_TILE_HOST
void
reference_binary_elementwise
(
const
HostTensor
<
ADataType
>&
a
,
const
HostTensor
<
BDataType
>&
b
,
HostTensor
<
CDataType
>&
c
,
ElementOp
element_op
)
{
// TODO: imeplement gpu version reference function
auto
f
=
[
&
](
auto
i
)
{
auto
v_a
=
type_convert
<
ComputeDataType
>
(
a
.
mData
[
i
]);
auto
v_b
=
type_convert
<
ComputeDataType
>
(
b
.
mData
[
i
]);
auto
v_c
=
element_op
(
v_a
,
v_b
);
c
.
mData
[
i
]
=
ck_tile
::
type_convert
<
CDataType
>
(
v_c
);
};
make_ParallelTensorFunctor
(
f
,
c
.
get_element_space_size
())(
std
::
thread
::
hardware_concurrency
());
}
}
// namespace ck_tile
include/ck_tile/host/reference/reference_fused_moe.hpp
0 → 100644
View file @
cd4d4629
// SPDX-License-Identifier: MIT
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include "ck_tile/core.hpp"
#include "ck_tile/host/host_tensor.hpp"
namespace
ck_tile
{
// [indexing implementation-1]
// using M_a as constexpr block_size to partition all tokens into different slices
// each slice map to one expert, and one expert can have multiple slices
// e.g. num_experts = 6, topk=3, M_a = 4, input_tokens = 5
// before sort, topk_ids is : [[0, 3, 5], [2, 3, 5], [1, 3, 5], [1, 2, 3], [1, 3, 5]]
// tok-0 tok-1 tok-2 tok-3 tok-4
// topk_weight is : [[a, b, c], [d, e, f], [g, h, i], [j, k, l], [m, n, o]] (some float
// number)
//
// token_id_per_expert is : [[0], [2, 3, 4], [1, 3], [0, 1, 2, 3, 4], [], [0, 1, 2, 5]]
// (only for reference) exp-0 exp-1 exp-2 exp-3 exp-4 exp-5
// weight_id_per_expert is: [[a], [g, j, m], [d, k], [b, e, h, l, n], [], [c, f, i, o]]
//
// max_num_tokens_padded : topk * input_tokens + num_experts * (M_a - 1)
// max_num_tokens_padded : topk * input_tokens + num_experts * M_a - topk (updated)
// * this could be larger than actual, since actual tokens are on GPU
//
// sorted_token_ids_ptr : [0, 6, 6, 6, 2, 3, 4, 6, 1, 3, 6, 6, 0, 1, 2, 3, 4, 6, 6, 6, 6, 6, 6, 6,
// 0, 1, 2, 5]
// |- exp-0 -|- exp-1 -|- exp-2 -|- exp-3 -|- exp-4
// -|- exp-5 -|
// sorted_weight_ptr : [a, *, *, *, g, j, m, *, d, k, *, *, b, e, h, l, n, *, *, *, *, *, *, *,
// c, f, i, o]
//
// * length is max_num_tokens_padded, actual size is num_tokens_post_padded_ptr
//
// sorted_expert_ids_ptr : [0, 1, 2, 3, 3, 4, 5]
// * length is (max_num_tokens_padded + block_size - 1) / block_size
///
// num_tokens_post_padded_ptr : [28]
// num_sorted_tiles_ptr : [7]
template
<
typename
AccDataType
,
// you only need to explcitly set this one
typename
Activation
,
// ck_tile::element_wise::Gelu
typename
ADataType
,
typename
GDataType
,
typename
DDataType
,
typename
ODataType
,
typename
AScaleDataType
,
typename
GScaleDataType
,
typename
DScaleDataType
,
typename
YSmoothScaleDataType
,
typename
TopkWeightDataType
,
typename
IndexDataType
>
void
reference_fused_moe
(
const
ck_tile
::
HostTensor
<
ADataType
>&
a_host
,
// [tokens, hidden_size]
const
ck_tile
::
HostTensor
<
GDataType
>&
g_host
,
// [experts, interme_size_0, hidden_size]
const
ck_tile
::
HostTensor
<
DDataType
>&
d_host
,
// [experts, hidden_size, interme_size_1]
const
ck_tile
::
HostTensor
<
AScaleDataType
>&
sa_host
,
// [tokens, 1],
const
ck_tile
::
HostTensor
<
GScaleDataType
>&
sg_host
,
// [experts, 1, interme_size_0]
const
ck_tile
::
HostTensor
<
DScaleDataType
>&
sd_host
,
// [experts, 1, hidden_size],
const
ck_tile
::
HostTensor
<
YSmoothScaleDataType
>&
sy_host
,
// [experts, 1, interme_size_0]
ck_tile
::
HostTensor
<
ODataType
>&
o_host
,
// [tokens, hidden_size]
const
ck_tile
::
HostTensor
<
IndexDataType
>&
sorted_token_ids_host
,
// [max_num_tokens_padded]
const
ck_tile
::
HostTensor
<
TopkWeightDataType
>&
sorted_weight_host
,
// [max_num_tokens_padded]
const
ck_tile
::
HostTensor
<
IndexDataType
>&
sorted_expert_ids_host
,
// [(max_num_tokens_padded + block_size - 1) / block_size]
const
ck_tile
::
HostTensor
<
IndexDataType
>&
num_sorted_tiles_host
,
// [1]
const
ck_tile
::
HostTensor
<
IndexDataType
>&
token_ids_host
,
// [tokens, topk] --> ugly!!! remove in the future
ck_tile
::
index_t
block_m
,
ck_tile
::
index_t
tokens
,
ck_tile
::
index_t
experts
,
ck_tile
::
index_t
hidden_size
,
ck_tile
::
index_t
intermediate_size
,
// this size is for gate/up
ck_tile
::
index_t
topk
,
ck_tile
::
index_t
gate_only
)
{
assert
(
sorted_token_ids_host
.
get_num_of_dimension
()
==
1
);
assert
(
sorted_weight_host
.
get_num_of_dimension
()
==
1
);
assert
(
sorted_expert_ids_host
.
get_num_of_dimension
()
==
1
);
assert
(
num_sorted_tiles_host
.
get_element_size
()
==
1
);
ck_tile
::
index_t
num_sorted_tiles
=
num_sorted_tiles_host
.
mData
[
0
]
/
block_m
;
ck_tile
::
index_t
intermediate_size_0
=
intermediate_size
;
ck_tile
::
index_t
intermediate_size_1
=
intermediate_size
/
(
gate_only
?
1
:
2
);
// TODO: better remove this in the future, or modify the token_id value
auto
get_topk_id
=
[
&
](
ck_tile
::
index_t
token_id_
,
ck_tile
::
index_t
expert_id_
)
{
for
(
ck_tile
::
index_t
i_
=
0
;
i_
<
topk
;
i_
++
)
{
if
(
token_ids_host
(
token_id_
,
i_
)
==
expert_id_
)
return
i_
;
}
throw
std
::
runtime_error
(
"not correct token/expert pair
\n
"
);
return
-
1
;
// TODO: not correct!!
};
ck_tile
::
HostTensor
<
AccDataType
>
out_topk_tokens
({
tokens
,
topk
,
hidden_size
});
int
max_num_tokens_padded
=
topk
*
tokens
+
experts
*
block_m
-
topk
;
// assert();
auto
f
=
[
&
](
auto
i_flatten
)
{
ck_tile
::
index_t
i_tile
=
i_flatten
/
block_m
;
if
(
i_tile
>=
num_sorted_tiles
)
return
;
ck_tile
::
index_t
i_expert
=
sorted_expert_ids_host
.
mData
[
i_tile
];
ck_tile
::
index_t
i_token
=
sorted_token_ids_host
.
mData
[
i_flatten
];
if
(
i_token
>=
tokens
)
return
;
ck_tile
::
index_t
i_topk
=
get_topk_id
(
i_token
,
i_expert
);
// TODO: ugly
auto
weight
=
sorted_weight_host
.
mData
[
i_flatten
];
ck_tile
::
HostTensor
<
AccDataType
>
acc_0
({
1
,
intermediate_size_0
});
// first gemm
for
(
ck_tile
::
index_t
i_n
=
0
;
i_n
<
intermediate_size_0
;
i_n
++
)
{
AccDataType
acc
=
static_cast
<
AccDataType
>
(
0
);
for
(
ck_tile
::
index_t
i_k
=
0
;
i_k
<
hidden_size
;
i_k
++
)
{
acc
+=
type_convert
<
AccDataType
>
(
a_host
(
i_token
,
i_k
))
*
type_convert
<
AccDataType
>
(
g_host
(
i_expert
,
i_n
,
i_k
));
}
acc_0
(
0
,
i_n
)
=
acc
;
// printf("ie:%2d, it:%3d, in:%d, %f\n", i_expert, i_token, i_n, acc);
}
ck_tile
::
HostTensor
<
AccDataType
>
y
({
1
,
intermediate_size_1
});
if
(
gate_only
)
{
if
(
intermediate_size_1
!=
intermediate_size_0
)
throw
std
::
runtime_error
(
"intermediate_size not correct, 0:"
+
std
::
to_string
(
intermediate_size_0
)
+
", 1:"
+
std
::
to_string
(
intermediate_size_1
));
for
(
ck_tile
::
index_t
i_n
=
0
;
i_n
<
intermediate_size_1
;
i_n
++
)
{
Activation
{}(
y
(
0
,
i_n
),
acc_0
(
0
,
i_n
));
// printf("ie:%2d, it:%3d, in:%d, %f\n", i_expert, i_token, i_n, y(0, i_n));
}
}
else
{
if
(
intermediate_size_1
*
2
!=
intermediate_size_0
)
throw
std
::
runtime_error
(
"intermediate_size not correct, 0:"
+
std
::
to_string
(
intermediate_size_0
)
+
", 1:"
+
std
::
to_string
(
intermediate_size_1
));
for
(
ck_tile
::
index_t
i_n
=
0
;
i_n
<
intermediate_size_1
;
i_n
++
)
{
AccDataType
tmp
;
Activation
{}(
tmp
,
acc_0
(
0
,
i_n
));
y
(
0
,
i_n
)
=
tmp
*
acc_0
(
0
,
i_n
+
intermediate_size_1
);
// TODO: elementwise mul
}
}
// second gemm, loop along gemm-n
ck_tile
::
HostTensor
<
AccDataType
>
acc_1
({
1
,
hidden_size
});
for
(
ck_tile
::
index_t
i_n
=
0
;
i_n
<
hidden_size
;
i_n
++
)
{
AccDataType
acc
=
static_cast
<
AccDataType
>
(
0
);
for
(
ck_tile
::
index_t
i_k
=
0
;
i_k
<
intermediate_size_1
;
i_k
++
)
{
acc
+=
y
(
0
,
i_k
)
*
type_convert
<
AccDataType
>
(
d_host
(
i_expert
,
i_n
,
i_k
));
}
acc_1
(
0
,
i_n
)
=
acc
*
weight
;
// multiple weight here
}
for
(
ck_tile
::
index_t
i_n
=
0
;
i_n
<
hidden_size
;
i_n
++
)
{
out_topk_tokens
(
i_token
,
i_topk
,
i_n
)
=
acc_1
(
0
,
i_n
);
}
};
// make_ParallelTensorFunctor(f, max_num_tokens_padded)(std::thread::hardware_concurrency());
make_ParallelTensorFunctor
(
f
,
max_num_tokens_padded
)(
1
);
// reduce
auto
r
=
[
&
](
auto
i_token
)
{
for
(
ck_tile
::
index_t
i_n
=
0
;
i_n
<
hidden_size
;
i_n
++
)
{
AccDataType
acc
=
type_convert
<
AccDataType
>
(
0
);
for
(
ck_tile
::
index_t
i_topk
=
0
;
i_topk
<
topk
;
i_topk
++
)
{
acc
+=
out_topk_tokens
(
i_token
,
i_topk
,
i_n
);
}
o_host
(
i_token
,
i_n
)
=
type_convert
<
ODataType
>
(
acc
);
}
};
make_ParallelTensorFunctor
(
r
,
tokens
)(
std
::
thread
::
hardware_concurrency
());
(
void
)
num_sorted_tiles_host
;
(
void
)
sa_host
;
(
void
)
sg_host
;
(
void
)
sd_host
;
(
void
)
sy_host
;
}
}
// namespace ck_tile
include/ck_tile/host/reference/reference_gemm.hpp
View file @
cd4d4629
// SPDX-License-Identifier: MIT
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-202
3
, Advanced Micro Devices, Inc. All rights reserved.
// Copyright (c) 2018-202
4
, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#pragma once
#include <cstdlib>
#include <thread>
#include "ck_tile/core.hpp"
#include "ck_tile/core.hpp"
#include "ck_tile/host/host_tensor.hpp"
#include "ck_tile/host/host_tensor.hpp"
#include "ck_tile/ops/common/tensor_layout.hpp"
#include <thread>
namespace
ck_tile
{
namespace
ck_tile
{
...
@@ -14,55 +15,36 @@ template <typename ADataType,
...
@@ -14,55 +15,36 @@ template <typename ADataType,
typename
BDataType
,
typename
BDataType
,
typename
AccDataType
,
typename
AccDataType
,
typename
CDataType
,
typename
CDataType
,
typename
LayoutA
,
typename
LayoutB
,
typename
LayoutC
,
typename
AElementOp
=
ck_tile
::
identity
,
typename
AElementOp
=
ck_tile
::
identity
,
typename
BElementOp
=
ck_tile
::
identity
,
typename
BElementOp
=
ck_tile
::
identity
,
typename
ACCElementOp
=
ck_tile
::
identity
>
typename
ACCElementOp
=
ck_tile
::
identity
>
CK_TILE_HOST
void
reference_gemm
(
const
HostTensor
<
ADataType
>&
a_m_k
,
CK_TILE_HOST
void
reference_gemm
(
const
HostTensor
<
ADataType
>&
a_m_k
,
const
HostTensor
<
BDataType
>&
b_
n_k
,
const
HostTensor
<
BDataType
>&
b_
k_n
,
HostTensor
<
CDataType
>&
c_m_n
,
HostTensor
<
CDataType
>&
c_m_n
,
const
AElementOp
&
a_element_op
=
{},
const
AElementOp
&
a_element_op
=
{},
const
BElementOp
&
b_element_op
=
{},
const
BElementOp
&
b_element_op
=
{},
const
ACCElementOp
&
acc_element_op
=
{})
const
ACCElementOp
&
acc_element_op
=
{})
{
{
const
int
N
=
(
std
::
is_same_v
<
LayoutB
,
tensor_layout
::
gemm
::
ColumnMajor
>
)
const
std
::
size_t
M
=
a_m_k
.
get_length
(
0
);
?
b_n_k
.
mDesc
.
get_lengths
()[
0
]
const
std
::
size_t
N
=
b_k_n
.
get_length
(
1
);
:
b_n_k
.
mDesc
.
get_lengths
()[
1
];
const
std
::
size_t
K
=
a_m_k
.
get_length
(
1
);
const
int
K
=
(
std
::
is_same_v
<
LayoutA
,
tensor_layout
::
gemm
::
RowMajor
>
)
?
a_m_k
.
mDesc
.
get_lengths
()[
1
]
auto
f_mn
=
[
&
](
auto
m
,
auto
n
)
{
:
a_m_k
.
mDesc
.
get_lengths
()[
0
];
AccDataType
v_acc
=
0
;
const
int
M
=
(
std
::
is_same_v
<
LayoutA
,
tensor_layout
::
gemm
::
RowMajor
>
)
?
a_m_k
.
mDesc
.
get_lengths
()[
0
]
for
(
std
::
size_t
k
=
0
;
k
<
K
;
++
k
)
:
a_m_k
.
mDesc
.
get_lengths
()[
1
];
auto
f
=
[
&
](
auto
m
)
{
for
(
int
n
=
0
;
n
<
N
;
++
n
)
{
{
AccDataType
v_acc
=
0
;
ADataType
v_a
=
a_element_op
(
a_m_k
(
m
,
k
));
BDataType
v_b
=
b_element_op
(
b_k_n
(
k
,
n
));
for
(
int
k
=
0
;
k
<
K
;
++
k
)
{
v_acc
+=
ADataType
v_a
=
(
std
::
is_same_v
<
LayoutA
,
tensor_layout
::
gemm
::
RowMajor
>
)
ck_tile
::
type_convert
<
AccDataType
>
(
v_a
)
*
ck_tile
::
type_convert
<
AccDataType
>
(
v_b
);
?
a_element_op
(
a_m_k
(
m
,
k
))
:
a_element_op
(
a_m_k
(
k
,
m
));
BDataType
v_b
=
(
std
::
is_same_v
<
LayoutB
,
tensor_layout
::
gemm
::
ColumnMajor
>
)
?
b_element_op
(
b_n_k
(
n
,
k
))
:
b_element_op
(
b_n_k
(
k
,
n
));
v_acc
+=
ck_tile
::
type_convert
<
AccDataType
>
(
v_a
)
*
ck_tile
::
type_convert
<
AccDataType
>
(
v_b
);
}
CDataType
&
c_ref
=
(
std
::
is_same_v
<
LayoutC
,
tensor_layout
::
gemm
::
RowMajor
>
)
?
c_m_n
(
m
,
n
)
:
c_m_n
(
n
,
m
);
c_ref
=
ck_tile
::
type_convert
<
CDataType
>
(
acc_element_op
(
v_acc
));
}
}
c_m_n
(
m
,
n
)
=
ck_tile
::
type_convert
<
CDataType
>
(
acc_element_op
(
v_acc
));
};
};
make_ParallelTensorFunctor
(
f
,
M
)(
std
::
thread
::
hardware_concurrency
());
make_ParallelTensorFunctor
(
f
_mn
,
M
,
N
)(
std
::
thread
::
hardware_concurrency
());
}
}
template
<
typename
ADataType
,
template
<
typename
ADataType
,
...
@@ -115,9 +97,9 @@ template <typename ADataType,
...
@@ -115,9 +97,9 @@ template <typename ADataType,
typename
LayoutA
,
typename
LayoutA
,
typename
LayoutB
,
typename
LayoutB
,
typename
LayoutC
>
typename
LayoutC
>
void
reference_gemm_gpu
(
DeviceMem
&
a_device
,
void
reference_gemm_gpu
(
ADataType
*
a_ptr
,
DeviceMem
&
b_device
,
BDataType
*
b_ptr
,
DeviceMem
&
c_device
,
CDataType
*
c_ptr
,
index_t
M
,
index_t
M
,
index_t
N
,
index_t
N
,
index_t
K
,
index_t
K
,
...
@@ -125,78 +107,50 @@ void reference_gemm_gpu(DeviceMem& a_device,
...
@@ -125,78 +107,50 @@ void reference_gemm_gpu(DeviceMem& a_device,
index_t
stride_b
,
index_t
stride_b
,
index_t
stride_c
)
index_t
stride_c
)
{
{
ADataType
*
d_A
;
BDataType
*
d_B
;
CDataType
*
d_C
;
hipError_t
errA
=
hipMalloc
(
&
d_A
,
M
*
K
*
sizeof
(
ADataType
));
hipError_t
errB
=
hipMalloc
(
&
d_B
,
N
*
K
*
sizeof
(
BDataType
));
hipError_t
errC
=
hipMalloc
(
&
d_C
,
M
*
N
*
sizeof
(
CDataType
));
if
(
errA
!=
hipSuccess
)
{
std
::
cerr
<<
"Error allocating device memory for A: "
<<
hipGetErrorString
(
errA
)
<<
std
::
endl
;
return
;
// Early exit on error
}
if
(
errB
!=
hipSuccess
)
{
std
::
cerr
<<
"Error allocating device memory for B: "
<<
hipGetErrorString
(
errB
)
<<
std
::
endl
;
return
;
// Early exit on error
}
if
(
errC
!=
hipSuccess
)
{
std
::
cerr
<<
"Error allocating device memory for C: "
<<
hipGetErrorString
(
errC
)
<<
std
::
endl
;
return
;
// Early exit on error
}
errA
=
hipMemcpy
(
d_A
,
a_device
.
GetDeviceBuffer
(),
M
*
K
*
sizeof
(
ADataType
),
hipMemcpyHostToDevice
);
if
(
errA
!=
hipSuccess
)
{
std
::
cerr
<<
"Error copying A to device: "
<<
hipGetErrorString
(
errA
)
<<
std
::
endl
;
}
errB
=
hipMemcpy
(
d_B
,
b_device
.
GetDeviceBuffer
(),
N
*
K
*
sizeof
(
BDataType
),
hipMemcpyHostToDevice
);
if
(
errB
!=
hipSuccess
)
{
std
::
cerr
<<
"Error copying B to device: "
<<
hipGetErrorString
(
errB
)
<<
std
::
endl
;
}
int
totalElements
=
M
*
N
;
int
totalElements
=
M
*
N
;
int
numThreadsPerBlock
=
256
;
// Common choice for threads per block
int
numThreadsPerBlock
=
256
;
// Common choice for threads per block
int
numBlocks
=
(
totalElements
+
numThreadsPerBlock
-
1
)
/
numThreadsPerBlock
;
int
numBlocks
=
(
totalElements
+
numThreadsPerBlock
-
1
)
/
numThreadsPerBlock
;
naive_gemm_kernel
<
ADataType
,
BDataType
,
AccDataType
,
CDataType
,
LayoutA
,
LayoutB
,
LayoutC
>
naive_gemm_kernel
<
ADataType
,
BDataType
,
AccDataType
,
CDataType
,
LayoutA
,
LayoutB
,
LayoutC
>
<<<
numBlocks
,
numThreadsPerBlock
>>>
(
d_A
,
d_B
,
d_C
,
M
,
N
,
K
,
stride_a
,
stride_b
,
stride_c
);
<<<
numBlocks
,
numThreadsPerBlock
>>>
(
errC
=
hipMemcpy
(
a_ptr
,
b_ptr
,
c_ptr
,
M
,
N
,
K
,
stride_a
,
stride_b
,
stride_c
);
c_device
.
GetDeviceBuffer
(),
d_C
,
M
*
N
*
sizeof
(
CDataType
),
hipMemcpyDeviceToHost
);
if
(
errC
!=
hipSuccess
)
{
std
::
cerr
<<
"Error copying C to device: "
<<
hipGetErrorString
(
errC
)
<<
std
::
endl
;
}
errA
=
hipFree
(
d_A
);
return
;
if
(
errA
!=
hipSuccess
)
}
{
std
::
cerr
<<
"Error free the A memory: "
<<
hipGetErrorString
(
errA
)
<<
std
::
endl
;
}
errB
=
hipFree
(
d_B
);
template
<
typename
ADataType
,
if
(
errB
!=
hipSuccess
)
typename
BDataType
,
{
typename
AccDataType
,
std
::
cerr
<<
"Error free the B memory: "
<<
hipGetErrorString
(
errB
)
<<
std
::
endl
;
typename
CDataType
,
}
typename
LayoutA
,
typename
LayoutB
,
typename
LayoutC
>
void
reference_batched_gemm_gpu
(
ADataType
*
a_ptr
,
BDataType
*
b_ptr
,
CDataType
*
c_ptr
,
index_t
M
,
index_t
N
,
index_t
K
,
index_t
stride_a
,
index_t
stride_b
,
index_t
stride_c
,
index_t
batch_stride_A
,
index_t
batch_stride_B
,
index_t
batch_stride_C
,
index_t
batch_count
)
{
int
totalElements
=
M
*
N
;
int
numThreadsPerBlock
=
256
;
// Common choice for threads per block
int
numBlocks
=
(
totalElements
+
numThreadsPerBlock
-
1
)
/
numThreadsPerBlock
;
errC
=
hipFree
(
d_C
);
for
(
index_t
batch_id
=
0
;
batch_id
<
batch_count
;
++
batch_id
)
if
(
errC
!=
hipSuccess
)
{
{
std
::
cerr
<<
"Error free the C memory: "
<<
hipGetErrorString
(
errC
)
<<
std
::
endl
;
ADataType
*
d_ATemp
=
a_ptr
+
batch_id
*
batch_stride_A
;
BDataType
*
d_BTemp
=
b_ptr
+
batch_id
*
batch_stride_B
;
CDataType
*
d_CTemp
=
c_ptr
+
batch_id
*
batch_stride_C
;
naive_gemm_kernel
<
ADataType
,
BDataType
,
AccDataType
,
CDataType
,
LayoutA
,
LayoutB
,
LayoutC
>
<<<
numBlocks
,
numThreadsPerBlock
>>>
(
d_ATemp
,
d_BTemp
,
d_CTemp
,
M
,
N
,
K
,
stride_a
,
stride_b
,
stride_c
);
}
}
return
;
return
;
...
...
include/ck_tile/host/reference/reference_layernorm2d.hpp
→
include/ck_tile/host/reference/reference_layernorm2d
_fwd
.hpp
View file @
cd4d4629
...
@@ -8,20 +8,44 @@
...
@@ -8,20 +8,44 @@
namespace
ck_tile
{
namespace
ck_tile
{
// Note: for simplicity, each functor only care about single M
struct
reference_layernorm2d_default_epilogue
{
template
<
typename
OutDataType
,
typename
AccDataType
>
void
operator
()(
int
m
,
HostTensor
<
OutDataType
>&
o
,
const
HostTensor
<
AccDataType
>&
acc
)
{
const
int
N
=
acc
.
mDesc
.
get_lengths
()[
1
];
for
(
int
n
=
0
;
n
<
N
;
++
n
)
{
o
(
m
,
n
)
=
ck_tile
::
type_convert
<
OutDataType
>
(
acc
(
m
,
n
));
}
}
template
<
typename
OutDataType
,
typename
AccDataType
>
auto
operator
()(
int
m
,
const
HostTensor
<
AccDataType
>&
acc
)
{
HostTensor
<
OutDataType
>
o
(
acc
.
get_lengths
(),
acc
.
get_strides
());
operator
()(
m
,
o
,
acc
);
return
o
;
}
};
template
<
typename
XDataType
,
template
<
typename
XDataType
,
typename
GammaDataType
,
typename
GammaDataType
,
typename
BetaDataType
,
typename
BetaDataType
,
typename
ComputeDataType
,
typename
ComputeDataType
,
typename
YDataType
,
typename
YDataType
,
typename
MeanDataType
,
typename
MeanDataType
,
typename
InvStdDataType
>
typename
InvStdDataType
,
typename
Epilogue
=
reference_layernorm2d_default_epilogue
>
void
reference_layernorm2d_fwd
(
const
HostTensor
<
XDataType
>&
x_m_n
,
void
reference_layernorm2d_fwd
(
const
HostTensor
<
XDataType
>&
x_m_n
,
const
HostTensor
<
GammaDataType
>&
gamma_n
,
const
HostTensor
<
GammaDataType
>&
gamma_n
,
const
HostTensor
<
BetaDataType
>&
beta_n
,
const
HostTensor
<
BetaDataType
>&
beta_n
,
HostTensor
<
YDataType
>&
y_m_n
,
HostTensor
<
YDataType
>&
y_m_n
,
HostTensor
<
MeanDataType
>&
mean_m
,
HostTensor
<
MeanDataType
>&
mean_m
,
HostTensor
<
InvStdDataType
>&
invStd_m
,
HostTensor
<
InvStdDataType
>&
invStd_m
,
ComputeDataType
epsilon
)
ComputeDataType
epsilon
,
Epilogue
epilogue_functor
=
{})
{
{
auto
layernorm2d_fwd_func
=
[
&
](
auto
m
)
{
auto
layernorm2d_fwd_func
=
[
&
](
auto
m
)
{
const
int
N
=
x_m_n
.
mDesc
.
get_lengths
()[
1
];
const
int
N
=
x_m_n
.
mDesc
.
get_lengths
()[
1
];
...
@@ -51,16 +75,19 @@ void reference_layernorm2d_fwd(const HostTensor<XDataType>& x_m_n,
...
@@ -51,16 +75,19 @@ void reference_layernorm2d_fwd(const HostTensor<XDataType>& x_m_n,
if
constexpr
(
!
std
::
is_same_v
<
InvStdDataType
,
ck_tile
::
null_type
>
)
if
constexpr
(
!
std
::
is_same_v
<
InvStdDataType
,
ck_tile
::
null_type
>
)
invStd_m
(
m
)
=
ck_tile
::
type_convert
<
InvStdDataType
>
(
divisor
);
invStd_m
(
m
)
=
ck_tile
::
type_convert
<
InvStdDataType
>
(
divisor
);
HostTensor
<
ComputeDataType
>
acc
(
x_m_n
.
get_lengths
(),
x_m_n
.
get_strides
());
for
(
int
n
=
0
;
n
<
N
;
++
n
)
for
(
int
n
=
0
;
n
<
N
;
++
n
)
{
{
ComputeDataType
x
=
ck_tile
::
type_convert
<
ComputeDataType
>
(
x_m_n
(
m
,
n
));
ComputeDataType
x
=
ck_tile
::
type_convert
<
ComputeDataType
>
(
x_m_n
(
m
,
n
));
ComputeDataType
gamma
=
ck_tile
::
type_convert
<
ComputeDataType
>
(
gamma_n
(
n
));
ComputeDataType
gamma
=
ck_tile
::
type_convert
<
ComputeDataType
>
(
gamma_n
(
n
));
ComputeDataType
beta
=
ck_tile
::
type_convert
<
ComputeDataType
>
(
beta_n
(
n
));
ComputeDataType
beta
=
ck_tile
::
type_convert
<
ComputeDataType
>
(
beta_n
(
n
));
auto
y
=
(
x
-
mean
)
*
divisor
;
auto
a_
=
(
x
-
mean
)
*
divisor
;
y
=
y
*
gamma
+
beta
;
a_
=
a_
*
gamma
+
beta
;
y_m_n
(
m
,
n
)
=
ck_tile
::
type_convert
<
YDataType
>
(
y
)
;
acc
(
m
,
n
)
=
a_
;
}
}
epilogue_functor
(
m
,
y_m_n
,
acc
);
};
};
make_ParallelTensorFunctor
(
layernorm2d_fwd_func
,
make_ParallelTensorFunctor
(
layernorm2d_fwd_func
,
...
...
include/ck_tile/host/reference/reference_moe_sorting.hpp
0 → 100644
View file @
cd4d4629
// SPDX-License-Identifier: MIT
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include "ck_tile/core.hpp"
#include "ck_tile/host/host_tensor.hpp"
namespace
ck_tile
{
#define MOE_SORTING_MOCK_ID(token_id_, topk_id_) \
static_cast<uint32_t>(((token_id_)&0x00ffffff) | (((topk_id_)&0xff) << 24))
template
<
typename
WeightType
,
typename
IndexType
=
index_t
>
CK_TILE_HOST
void
reference_moe_sorting
(
const
HostTensor
<
IndexType
>&
topk_ids
,
const
HostTensor
<
WeightType
>&
weights
,
HostTensor
<
IndexType
>&
p_sorted_token_ids
,
HostTensor
<
WeightType
>&
sorted_weight
,
HostTensor
<
IndexType
>&
sorted_expert_ids
,
index_t
&
unit_cnt
,
const
index_t
experts
,
const
index_t
unit_size
)
{
const
index_t
num_token
=
topk_ids
.
mDesc
.
get_lengths
()[
0
];
const
index_t
topk
=
topk_ids
.
mDesc
.
get_lengths
()[
1
];
// allocate a temp buffer, and fill the value with [number_token|topk]
std
::
vector
<
std
::
vector
<
IndexType
>>
expert_tokens
(
experts
,
#if CK_TILE_REFERENCE_MOE_SORTING_MOCK_ID
std
::
vector
<
IndexType
>
(
unit_size
,
MOE_SORTING_MOCK_ID
(
num_token
,
topk
)));
#else
std
::
vector
<
IndexType
>
(
unit_size
,
num_token
));
#endif
std
::
vector
<
std
::
vector
<
WeightType
>>
expert_token_weights
(
experts
,
std
::
vector
<
WeightType
>
(
unit_size
,
0
));
std
::
vector
<
IndexType
>
expert_slices
(
experts
,
1
);
std
::
vector
<
IndexType
>
expert_slice_idxs
(
experts
,
0
);
for
(
index_t
t
=
0
;
t
<
num_token
;
t
++
)
{
for
(
index_t
k
=
0
;
k
<
topk
;
k
++
)
{
IndexType
e
=
topk_ids
(
t
,
k
);
WeightType
w
=
weights
(
t
,
k
);
index_t
idx
=
expert_slice_idxs
[
e
];
if
(
idx
>
expert_slices
[
e
]
*
unit_size
-
1
)
{
expert_slices
[
e
]
++
;
index_t
new_size
=
expert_slices
[
e
]
*
unit_size
;
expert_tokens
[
e
].
resize
(
new_size
);
expert_token_weights
[
e
].
resize
(
new_size
);
for
(
index_t
i
=
(
expert_slices
[
e
]
-
1
)
*
unit_size
;
i
<
new_size
;
i
++
)
{
#if CK_TILE_REFERENCE_MOE_SORTING_MOCK_ID
expert_tokens
[
e
][
i
]
=
MOE_SORTING_MOCK_ID
(
num_token
,
topk
);
#else
expert_tokens
[
e
][
i
]
=
num_token
;
#endif
expert_token_weights
[
e
][
i
]
=
0
;
}
}
#if CK_TILE_REFERENCE_MOE_SORTING_MOCK_ID
expert_tokens
[
e
][
idx
]
=
MOE_SORTING_MOCK_ID
(
t
,
k
);
#else
expert_tokens
[
e
][
idx
]
=
t
;
#endif
expert_token_weights
[
e
][
idx
]
=
w
;
expert_slice_idxs
[
e
]
++
;
}
}
IndexType
*
out_tokens
=
p_sorted_token_ids
.
data
();
WeightType
*
out_weights
=
sorted_weight
.
data
();
IndexType
*
out_expert_id
=
sorted_expert_ids
.
data
();
for
(
index_t
e
=
0
;
e
<
experts
;
e
++
)
{
memcpy
(
out_tokens
,
expert_tokens
[
e
].
data
(),
sizeof
(
index_t
)
*
expert_slices
[
e
]
*
unit_size
);
out_tokens
+=
expert_slices
[
e
]
*
unit_size
;
memcpy
(
out_weights
,
expert_token_weights
[
e
].
data
(),
sizeof
(
WeightType
)
*
expert_slices
[
e
]
*
unit_size
);
out_weights
+=
expert_slices
[
e
]
*
unit_size
;
for
(
index_t
s
=
0
;
s
<
expert_slices
[
e
];
s
++
)
{
out_expert_id
[
s
]
=
e
;
unit_cnt
++
;
}
out_expert_id
+=
expert_slices
[
e
];
}
unit_cnt
*=
unit_size
;
return
;
}
#undef MOE_SORTING_MOCK_ID
}
// namespace ck_tile
include/ck_tile/host/reference/reference_permute.hpp
0 → 100644
View file @
cd4d4629
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2024, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include "ck_tile/core.hpp"
#include "ck_tile/host/host_tensor.hpp"
#include <thread>
#include <numeric>
#include <functional>
namespace
ck_tile
{
/*
this will do permute + contiguous like functionality in pytorch
*/
template
<
typename
DataType
>
CK_TILE_HOST
void
reference_permute
(
const
HostTensor
<
DataType
>&
x
,
HostTensor
<
DataType
>&
y
,
std
::
vector
<
index_t
>
perm
)
{
const
auto
x_len
=
x
.
mDesc
.
get_lengths
();
const
auto
y_len
=
y
.
mDesc
.
get_lengths
();
assert
(
x_len
.
size
()
==
y_len
.
size
());
index_t
rank
=
x_len
.
size
();
const
auto
x_elm
=
std
::
accumulate
(
x_len
.
begin
(),
x_len
.
end
(),
1
,
std
::
multiplies
<
index_t
>
());
const
auto
y_elm
=
std
::
accumulate
(
y_len
.
begin
(),
y_len
.
end
(),
1
,
std
::
multiplies
<
index_t
>
());
assert
(
x_elm
==
y_elm
);
(
void
)
y_elm
;
auto
f
=
[
&
](
auto
i_element
)
{
std
::
vector
<
size_t
>
y_coord
=
[
&
]()
{
std
::
vector
<
size_t
>
tmp
(
rank
,
0
);
size_t
r
=
i_element
;
for
(
index_t
i
=
rank
-
1
;
i
>=
0
;
i
--
)
{
tmp
[
i
]
=
r
%
y_len
[
i
];
r
=
r
/
y_len
[
i
];
}
return
tmp
;
}();
std
::
vector
<
size_t
>
x_coord
=
[
&
]()
{
std
::
vector
<
size_t
>
tmp
(
rank
,
0
);
for
(
index_t
i
=
0
;
i
<
rank
;
i
++
)
{
tmp
[
perm
[
i
]]
=
y_coord
[
i
];
}
return
tmp
;
}();
// do permute
y
(
y_coord
)
=
x
(
x_coord
);
};
make_ParallelTensorFunctor
(
f
,
x_elm
)(
std
::
thread
::
hardware_concurrency
());
}
template
<
typename
DataType
>
CK_TILE_HOST
auto
reference_permute
(
const
HostTensor
<
DataType
>&
x
,
std
::
vector
<
index_t
>
perm
)
{
auto
x_shape
=
x
.
get_lengths
();
ck_tile
::
index_t
rank
=
perm
.
size
();
std
::
vector
<
ck_tile
::
index_t
>
y_shape
=
[
&
]()
{
std
::
vector
<
ck_tile
::
index_t
>
tmp
(
rank
,
0
);
for
(
int
i
=
0
;
i
<
static_cast
<
int
>
(
rank
);
i
++
)
{
tmp
[
i
]
=
x_shape
[
perm
[
i
]];
}
return
tmp
;
}();
HostTensor
<
DataType
>
y
(
y_shape
);
reference_permute
(
x
,
y
,
perm
);
return
y
;
}
}
// namespace ck_tile
include/ck_tile/host/reference/reference_reduce.hpp
View file @
cd4d4629
...
@@ -9,24 +9,25 @@
...
@@ -9,24 +9,25 @@
namespace
ck_tile
{
namespace
ck_tile
{
template
<
typename
ADataType
,
typename
AccDataType
,
typename
BDataType
>
template
<
typename
XDataType
,
typename
ComputeDataType
,
typename
YDataType
,
typename
ReduceOp
>
CK_TILE_HOST
void
reference_reduce
(
const
HostTensor
<
ADataType
>&
a_m_n
,
HostTensor
<
BDataType
>&
b_m
)
CK_TILE_HOST
void
reference_reduce
(
const
HostTensor
<
XDataType
>&
x_m_n
,
HostTensor
<
YDataType
>&
y_m
,
ReduceOp
reduce_op
)
{
{
auto
f
=
[
&
](
auto
m
)
{
auto
f
=
[
&
](
auto
m
)
{
const
int
N
=
a
_m_n
.
mDesc
.
get_lengths
()[
1
];
const
int
N
=
x
_m_n
.
mDesc
.
get_lengths
()[
1
];
Acc
DataType
v_acc
=
0
;
Compute
DataType
v_acc
=
reduce_op
.
template
GetIdentityValue
<
ComputeDataType
>()
;
for
(
int
n
=
0
;
n
<
N
;
++
n
)
for
(
int
n
=
0
;
n
<
N
;
++
n
)
{
{
const
A
DataType
v_a
=
a
_m_n
(
m
,
n
);
const
Compute
DataType
v_a
=
type_convert
<
ComputeDataType
>
(
x
_m_n
(
m
,
n
)
)
;
v_acc
+
=
v_a
;
v_acc
=
reduce_op
(
v_acc
,
v_a
)
;
}
}
b
_m
(
m
)
=
ck_tile
::
type_convert
<
B
DataType
>
(
v_acc
);
y
_m
(
m
)
=
ck_tile
::
type_convert
<
Y
DataType
>
(
v_acc
);
};
};
make_ParallelTensorFunctor
(
f
,
b
_m
.
mDesc
.
get_lengths
()[
0
])(
std
::
thread
::
hardware_concurrency
());
make_ParallelTensorFunctor
(
f
,
y
_m
.
mDesc
.
get_lengths
()[
0
])(
std
::
thread
::
hardware_concurrency
());
}
}
}
// namespace ck_tile
}
// namespace ck_tile
include/ck_tile/host/reference/reference_rmsnorm2d_fwd.hpp
0 → 100644
View file @
cd4d4629
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2024, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include "ck_tile/core.hpp"
#include "ck_tile/host/host_tensor.hpp"
namespace
ck_tile
{
template
<
typename
XDataType
,
typename
GammaDataType
,
typename
ComputeDataType
,
typename
YDataType
,
typename
InvRmsDataType
>
void
reference_rmsnorm2d_fwd
(
const
HostTensor
<
XDataType
>&
x_m_n
,
const
HostTensor
<
GammaDataType
>&
gamma_n
,
HostTensor
<
YDataType
>&
y_m_n
,
HostTensor
<
InvRmsDataType
>&
invRms_m
,
ComputeDataType
epsilon
)
{
auto
rmsnorm2d_fwd_func
=
[
&
](
auto
m
)
{
const
int
N
=
x_m_n
.
mDesc
.
get_lengths
()[
1
];
ComputeDataType
mean_square
=
0
;
ComputeDataType
divisor
=
0
;
for
(
int
n
=
0
;
n
<
N
;
++
n
)
{
ComputeDataType
x
=
ck_tile
::
type_convert
<
ComputeDataType
>
(
x_m_n
(
m
,
n
));
mean_square
+=
x
*
x
;
}
mean_square
=
mean_square
/
N
;
divisor
=
ck_tile
::
type_convert
<
ComputeDataType
>
(
1
)
/
ck_tile
::
sqrt
(
mean_square
+
epsilon
);
if
constexpr
(
!
std
::
is_same_v
<
InvRmsDataType
,
ck_tile
::
null_type
>
)
invRms_m
(
m
)
=
ck_tile
::
type_convert
<
InvRmsDataType
>
(
divisor
);
for
(
int
n
=
0
;
n
<
N
;
++
n
)
{
ComputeDataType
x
=
ck_tile
::
type_convert
<
ComputeDataType
>
(
x_m_n
(
m
,
n
));
ComputeDataType
gamma
=
ck_tile
::
type_convert
<
ComputeDataType
>
(
gamma_n
(
n
));
auto
y
=
x
*
divisor
*
gamma
;
y_m_n
(
m
,
n
)
=
ck_tile
::
type_convert
<
YDataType
>
(
y
);
}
};
make_ParallelTensorFunctor
(
rmsnorm2d_fwd_func
,
invRms_m
.
mDesc
.
get_lengths
()[
0
])(
std
::
thread
::
hardware_concurrency
());
}
}
// namespace ck_tile
include/ck_tile/host/reference/reference_rowwise_quantization2d.hpp
0 → 100644
View file @
cd4d4629
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2023, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include "ck_tile/core.hpp"
#include "ck_tile/host/host_tensor.hpp"
#include <thread>
namespace
ck_tile
{
template
<
typename
XDataType
,
typename
ScaleDataType
,
typename
QXDataType
>
CK_TILE_HOST
void
reference_rowwise_quantization2d
(
const
HostTensor
<
XDataType
>&
x_m_n
,
const
HostTensor
<
ScaleDataType
>&
scale_m
,
HostTensor
<
QXDataType
>&
qx_m_n
)
{
auto
f
=
[
&
](
auto
m
)
{
const
int
N
=
x_m_n
.
mDesc
.
get_lengths
()[
1
];
for
(
int
n
=
0
;
n
<
N
;
++
n
)
{
auto
v_x
=
x_m_n
(
m
,
n
);
// scale = amax / 127 for int8
auto
v_scale
=
type_convert
<
XDataType
>
(
scale_m
(
m
));
auto
v_qx
=
v_x
/
v_scale
;
qx_m_n
(
m
,
n
)
=
saturates
<
QXDataType
>
{}(
v_qx
);
}
};
make_ParallelTensorFunctor
(
f
,
scale_m
.
mDesc
.
get_lengths
()[
0
])(
std
::
thread
::
hardware_concurrency
());
}
}
// namespace ck_tile
include/ck_tile/host/reference/reference_softmax.hpp
View file @
cd4d4629
// SPDX-License-Identifier: MIT
// SPDX-License-Identifier: MIT
// Copyright (c) 20
18-2023
, Advanced Micro Devices, Inc. All rights reserved.
// Copyright (c) 20
24
, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#pragma once
...
@@ -9,43 +9,81 @@
...
@@ -9,43 +9,81 @@
namespace
ck_tile
{
namespace
ck_tile
{
template
<
typename
AData
Type
,
typename
AccData
Type
,
typename
BData
Type
>
template
<
typename
Input
Type
,
typename
Compute
Type
,
typename
OutputType
=
Compute
Type
>
CK_TILE_HOST
void
reference_softmax
(
const
HostTensor
<
ADataType
>&
a_m_n
,
CK_TILE_HOST
void
HostTensor
<
BData
Type
>&
b_m_n
)
reference_softmax
(
const
HostTensor
<
InputType
>&
x
,
HostTensor
<
Output
Type
>&
y
,
index_t
dim
=
-
1
)
{
{
auto
f
=
[
&
](
auto
m
)
{
index_t
rank
=
x
.
get_num_of_dimension
();
const
int
N
=
a_m_n
.
mDesc
.
get_lengths
()[
1
];
assert
(
rank
==
y
.
get_num_of_dimension
());
assert
(
dim
==
-
1
||
dim
<
rank
);
AccDataType
v_max
=
ck_tile
::
numeric
<
ADataType
>::
Lowest
();
index_t
target_dim
=
dim
==
-
1
?
(
rank
-
1
)
:
dim
;
index_t
softmax_len
=
x
.
get_length
(
target_dim
);
index_t
n_parallel
=
x
.
get_element_size
()
/
softmax_len
;
auto
x_len
=
x
.
get_lengths
();
// max
auto
f
=
[
&
](
auto
i_element
)
{
for
(
int
n
=
0
;
n
<
N
;
++
n
)
std
::
vector
<
size_t
>
coord
=
[
&
]()
{
{
std
::
vector
<
size_t
>
t_
(
rank
,
0
);
const
ADataType
v_a
=
a_m_n
(
m
,
n
);
size_t
r
=
i_element
;
for
(
index_t
i
=
rank
-
1
;
i
>=
0
;
i
--
)
{
if
(
i
==
target_dim
)
continue
;
t_
[
i
]
=
r
%
x_len
[
i
];
r
=
r
/
x_len
[
i
];
}
return
t_
;
}();
ComputeType
v_max
=
-
ck_tile
::
numeric
<
ComputeType
>::
infinity
();
v_max
=
v_max
<
v_a
?
v_a
:
v_max
;
// compute max
for
(
auto
idx
=
0
;
idx
<
softmax_len
;
idx
++
)
{
auto
c_
=
coord
;
c_
[
target_dim
]
=
idx
;
const
ComputeType
v_x
=
ck_tile
::
type_convert
<
ComputeType
>
(
x
(
c_
));
v_max
=
v_max
<
v_x
?
v_x
:
v_max
;
}
}
AccData
Type
v_exp_sum
=
0
;
Compute
Type
v_exp_sum
=
static_cast
<
ComputeType
>
(
0
)
;
// sum
// sum
for
(
int
n
=
0
;
n
<
N
;
++
n
)
for
(
auto
idx
=
0
;
idx
<
softmax_len
;
idx
++
)
{
{
const
ADataType
v_a
=
a_m_n
(
m
,
n
);
auto
c_
=
coord
;
c_
[
target_dim
]
=
idx
;
v_exp_sum
+=
ck_tile
::
exp
(
v_a
-
v_max
);
const
ComputeType
v_x
=
ck_tile
::
type_convert
<
ComputeType
>
(
x
(
c_
));
v_exp_sum
+=
ck_tile
::
exp
(
v_x
-
v_max
);
}
}
// elementwise
// elementwise
for
(
int
n
=
0
;
n
<
N
;
++
n
)
for
(
auto
idx
=
0
;
idx
<
softmax_len
;
idx
++
)
{
{
const
ADataType
v_a
=
a_m_n
(
m
,
n
);
auto
c_
=
coord
;
c_
[
target_dim
]
=
idx
;
const
ComputeType
v_x
=
ck_tile
::
type_convert
<
ComputeType
>
(
x
(
c_
));
auto
out
=
ck_tile
::
exp
(
v_x
-
v_max
)
/
v_exp_sum
;
b_m_n
(
m
,
n
)
=
ck_tile
::
exp
(
v_a
-
v_max
)
/
v_exp_sum
;
y
(
c_
)
=
ck_tile
::
type_convert
<
OutputType
>
(
out
)
;
}
}
};
};
make_ParallelTensorFunctor
(
f
,
make_ParallelTensorFunctor
(
f
,
n_parallel
)(
std
::
thread
::
hardware_concurrency
());
b_m_n
.
mDesc
.
get_lengths
()[
0
])(
std
::
thread
::
hardware_concurrency
());
}
template
<
typename
InputType
,
typename
ComputeType
,
typename
OutputType
=
ComputeType
>
CK_TILE_HOST
auto
reference_softmax
(
const
HostTensor
<
InputType
>&
x
,
index_t
dim
=
-
1
)
{
HostTensor
<
OutputType
>
y
(
x
.
get_lengths
(),
x
.
get_strides
());
reference_softmax
<
InputType
,
ComputeType
,
OutputType
>
(
x
,
y
,
dim
);
return
y
;
}
}
}
// namespace ck_tile
}
// namespace ck_tile
include/ck_tile/host/reference/reference_topk.hpp
0 → 100644
View file @
cd4d4629
// SPDX-License-Identifier: MIT
// Copyright (c) 2024, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include "ck_tile/core.hpp"
#include "ck_tile/host/host_tensor.hpp"
#include <thread>
#include <numeric>
#include <functional>
#include <utility>
#include <algorithm>
namespace
ck_tile
{
/*
similiar to torch.topk()
x (Tensor) – the input tensor.
k (int) – the k in “top-k”
dim (int, optional) – the dimension to sort along
largest (bool, optional) – largest or smallest elements
sorted (bool, optional) – elements in sorted order or not
output:
y_values
y_indices
https://github.com/pytorch/pytorch/blob/main/aten/src/ATen/native/TopKImpl.h
*/
template
<
typename
DataType
,
typename
IndexType
=
index_t
>
CK_TILE_HOST
void
reference_topk
(
const
HostTensor
<
DataType
>&
x
,
HostTensor
<
DataType
>&
y_values
,
HostTensor
<
IndexType
>&
y_indices
,
index_t
k
,
index_t
dim
=
-
1
,
bool
largest
=
true
,
bool
sorted
=
true
)
{
// rank must be the same
index_t
rank
=
x
.
get_num_of_dimension
();
assert
(
rank
==
y_values
.
get_num_of_dimension
());
assert
(
rank
==
y_indices
.
get_num_of_dimension
());
assert
(
dim
==
-
1
||
dim
<
rank
);
index_t
topk_dim
=
dim
==
-
1
?
(
rank
-
1
)
:
dim
;
index_t
topk_src_len
=
x
.
get_length
(
topk_dim
);
auto
x_len
=
x
.
get_lengths
();
assert
(
k
<=
topk_src_len
);
assert
(
k
==
y_values
.
get_length
(
topk_dim
)
&&
k
==
y_indices
.
get_length
(
topk_dim
));
index_t
n_parallel
=
x
.
get_element_size
()
/
topk_src_len
;
// clang-format off
auto
f
=
[
&
](
auto
i_element
)
{
std
::
vector
<
size_t
>
topk_coord
=
[
&
](){
std
::
vector
<
size_t
>
t_
(
rank
,
0
);
size_t
r
=
i_element
;
for
(
index_t
i
=
rank
-
1
;
i
>=
0
;
i
--
)
{
if
(
i
==
topk_dim
)
continue
;
// topk dim should be zero
t_
[
i
]
=
r
%
x_len
[
i
];
r
=
r
/
x_len
[
i
];
}
return
t_
;
}();
using
elem_t
=
std
::
pair
<
DataType
,
IndexType
>
;
std
::
vector
<
elem_t
>
q
=
[
&
](){
std
::
vector
<
elem_t
>
t_
(
topk_src_len
);
for
(
index_t
i
=
0
;
i
<
topk_src_len
;
i
++
)
{
auto
c_
=
topk_coord
;
c_
[
topk_dim
]
=
i
;
t_
[
i
].
first
=
x
(
c_
);
t_
[
i
].
second
=
i
;
}
return
t_
;
}();
// run topk
if
(
largest
)
{
std
::
nth_element
(
q
.
begin
(),
q
.
begin
()
+
k
-
1
,
q
.
end
(),
[](
const
elem_t
&
lhs
,
const
elem_t
&
rhs
)
->
bool
{
return
lhs
.
first
>
rhs
.
first
;
});
if
(
sorted
)
{
std
::
sort
(
q
.
begin
(),
q
.
begin
()
+
k
-
1
,
[](
const
elem_t
&
lhs
,
const
elem_t
&
rhs
)
->
bool
{
return
lhs
.
first
>
rhs
.
first
;
});
}
}
else
{
std
::
nth_element
(
q
.
begin
(),
q
.
begin
()
+
k
-
1
,
q
.
end
(),
[](
const
elem_t
&
lhs
,
const
elem_t
&
rhs
)
->
bool
{
return
lhs
.
first
<
rhs
.
first
;
});
if
(
sorted
)
{
std
::
sort
(
q
.
begin
(),
q
.
begin
()
+
k
-
1
,
[](
const
elem_t
&
lhs
,
const
elem_t
&
rhs
)
->
bool
{
return
lhs
.
first
<
rhs
.
first
;
});
}
}
// write out
for
(
index_t
i
=
0
;
i
<
k
;
i
++
)
{
auto
c_
=
topk_coord
;
c_
[
topk_dim
]
=
i
;
y_values
(
c_
)
=
q
[
i
].
first
;
y_indices
(
c_
)
=
q
[
i
].
second
;
}
};
// clang-format on
make_ParallelTensorFunctor
(
f
,
n_parallel
)(
std
::
thread
::
hardware_concurrency
());
}
// TODO: if using this method, the return tensor would be dense(no stride)
template
<
typename
DataType
,
typename
IndexType
=
index_t
>
CK_TILE_HOST
auto
reference_topk
(
const
HostTensor
<
DataType
>&
x
,
index_t
k
,
index_t
dim
=
-
1
,
bool
largest
=
true
,
bool
sorted
=
true
)
{
auto
lens
=
x
.
get_lengths
();
index_t
target_dim
=
(
dim
==
-
1
)
?
(
lens
.
size
()
-
1
)
:
dim
;
assert
(
target_dim
<
lens
.
size
());
assert
(
k
<=
lens
[
target_dim
]);
lens
[
target_dim
]
=
k
;
HostTensor
<
DataType
>
y_values
(
lens
);
HostTensor
<
IndexType
>
y_indices
(
lens
);
reference_topk
<
DataType
,
IndexType
>
(
x
,
y_values
,
y_indices
,
k
,
dim
,
largest
,
sorted
);
return
ck_tile
::
make_tuple
(
y_values
,
y_indices
);
}
}
// namespace ck_tile
include/ck_tile/ops/add_rmsnorm2d_rdquant.hpp
0 → 100644
View file @
cd4d4629
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2024, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include "ck_tile/ops/add_rmsnorm2d_rdquant/kernel/add_rmsnorm2d_rdquant_fwd_kernel.hpp"
#include "ck_tile/ops/add_rmsnorm2d_rdquant/pipeline/add_rmsnorm2d_rdquant_fwd_pipeline_default_policy.hpp"
#include "ck_tile/ops/add_rmsnorm2d_rdquant/pipeline/add_rmsnorm2d_rdquant_fwd_pipeline_one_pass.hpp"
#include "ck_tile/ops/add_rmsnorm2d_rdquant/pipeline/add_rmsnorm2d_rdquant_fwd_pipeline_problem.hpp"
#include "ck_tile/ops/add_rmsnorm2d_rdquant/pipeline/add_rmsnorm2d_rdquant_fwd_pipeline_three_pass.hpp"
#include "ck_tile/ops/common/generic_2d_block_shape.hpp"
#include "ck_tile/ops/common/tensor_layout.hpp"
include/ck_tile/ops/add_rmsnorm2d_rdquant/kernel/add_rmsnorm2d_rdquant_fwd_kernel.hpp
0 → 100644
View file @
cd4d4629
// SPDX-License-Identifier: MIT
// Copyright (c) 2018-2024, Advanced Micro Devices, Inc. All rights reserved.
#pragma once
#include "ck_tile/core.hpp"
#include "ck_tile/ops/common.hpp"
namespace
ck_tile
{
// host side args
// X = A + B, Y = Rmsnorm2d(X), QY = RowwiseDynamicQuant(Y) = SaturateCast(Y / YScale)
struct
AddRmsnorm2dRdquantFwdHostArgs
{
const
void
*
p_a
;
// [m ,n], input, fp16/bf16
const
void
*
p_b
;
// [m ,n], input, fp16/bf16
const
void
*
p_gamma
;
// [1, n], gamma, prec same as input
void
*
p_x
;
// [m, n], output, p_a + p_b, fp16/bf16
void
*
p_yscale
;
// [m, 1], output, rowwise quant scale (amax / 127) of reuslt of rmsnorm2d(x)
void
*
p_qy
;
// [m, n], output, result of quant tensor of rmsnorm2d(x) int8
float
epsilon
;
index_t
m
;
index_t
n
;
index_t
stride
;
// row_stride
};
// TODO: Extract some type to wrapper class
template
<
typename
Pipeline_
>
struct
AddRmsnorm2dRdquantFwd
{
using
Pipeline
=
remove_cvref_t
<
Pipeline_
>
;
using
Problem
=
typename
Pipeline
::
Problem
;
using
ADataType
=
remove_cvref_t
<
typename
Problem
::
ADataType
>
;
using
BDataType
=
remove_cvref_t
<
typename
Problem
::
BDataType
>
;
using
GammaDataType
=
remove_cvref_t
<
typename
Problem
::
GammaDataType
>
;
using
ComputeDataType
=
remove_cvref_t
<
typename
Problem
::
ComputeDataType
>
;
using
XDataType
=
remove_cvref_t
<
typename
Problem
::
XDataType
>
;
using
YScaleDataType
=
remove_cvref_t
<
typename
Problem
::
YScaleDataType
>
;
using
QYDataType
=
remove_cvref_t
<
typename
Problem
::
QYDataType
>
;
static
constexpr
bool
kSaveX
=
Problem
::
kSaveX
;
static
constexpr
index_t
Block_M
=
Problem
::
BlockShape
::
Block_M
;
static
constexpr
index_t
Block_N
=
Problem
::
BlockShape
::
Block_N
;
static
constexpr
bool
kPadM
=
false
;
// always no need to pad along M
static
constexpr
bool
kPadN
=
Problem
::
kPadN
;
static
constexpr
bool
kThreePass
=
Problem
::
kThreePass
;
static
constexpr
index_t
ThreadPerWarp_N
=
Problem
::
BlockShape
::
ThreadPerWarp_N
;
static
constexpr
index_t
Vector_N
=
Problem
::
BlockShape
::
Vector_N
;
static
constexpr
index_t
Repeat_N
=
Problem
::
BlockShape
::
Repeat_N
;
static
constexpr
auto
I0
=
number
<
0
>
{};
static
constexpr
auto
I1
=
number
<
1
>
{};
struct
Kargs
{
const
void
*
p_a
;
const
void
*
p_b
;
const
void
*
p_gamma
;
void
*
p_x
;
void
*
p_yscale
;
void
*
p_qy
;
float
epsilon
;
index_t
m
;
index_t
n
;
index_t
stride
;
// row_stride
};
using
Hargs
=
AddRmsnorm2dRdquantFwdHostArgs
;
CK_TILE_HOST
static
constexpr
Kargs
MakeKargs
(
const
Hargs
&
hargs
)
{
return
Kargs
{
hargs
.
p_a
,
hargs
.
p_b
,
hargs
.
p_gamma
,
hargs
.
p_x
,
hargs
.
p_yscale
,
hargs
.
p_qy
,
hargs
.
epsilon
,
hargs
.
m
,
hargs
.
n
,
hargs
.
stride
};
}
CK_TILE_HOST
static
constexpr
auto
GridSize
(
const
Hargs
&
hargs
)
{
return
dim3
(
integer_divide_ceil
(
hargs
.
m
,
Block_M
));
}
CK_TILE_HOST
static
constexpr
auto
BlockSize
()
{
return
Problem
::
BlockShape
::
BlockSize
;
}
// clang-format off
template
<
typename
T
>
struct
t2s
;
template
<
>
struct
t2s
<
float
>
{
static
constexpr
const
char
*
name
=
"fp32"
;
};
template
<
>
struct
t2s
<
ck_tile
::
fp16_t
>
{
static
constexpr
const
char
*
name
=
"fp16"
;
};
template
<
>
struct
t2s
<
ck_tile
::
bf16_t
>
{
static
constexpr
const
char
*
name
=
"bf16"
;
};
template
<
>
struct
t2s
<
ck_tile
::
fp8_t
>
{
static
constexpr
const
char
*
name
=
"fp8"
;
};
template
<
>
struct
t2s
<
ck_tile
::
bf8_t
>
{
static
constexpr
const
char
*
name
=
"bf8"
;
};
// clang-format on
// in byte
CK_TILE_HOST_DEVICE
static
constexpr
index_t
GetSmemSize
()
{
return
Pipeline
::
GetSmemSize
();
}
CK_TILE_HOST
static
std
::
string
GetName
()
{
// clang-format off
using
S_
=
typename
Problem
::
BlockShape
;
auto
surfix
=
[
&
]
()
{
std
::
string
n
;
if
(
kPadN
)
n
+=
"_pn"
;
if
(
kSaveX
)
n
+=
"_x"
;
if
(
kThreePass
)
n
+=
"_2p"
;
return
n
;
}();
#define _SS_ std::string
#define _TS_ std::to_string
return
_SS_
(
"add_rmsnorm2d_rdquant_fwd_"
)
+
_SS_
(
t2s
<
XDataType
>::
name
)
+
"_"
+
_TS_
(
S_
::
Block_M
)
+
"x"
+
_TS_
(
S_
::
Block_N
)
+
"_"
+
_TS_
(
S_
::
WarpPerBlock_M
)
+
"x"
+
_TS_
(
S_
::
WarpPerBlock_N
)
+
"_"
+
_TS_
(
S_
::
Warp_M
)
+
"x"
+
_TS_
(
S_
::
Warp_N
)
+
"_"
+
_TS_
(
S_
::
Vector_M
)
+
"x"
+
_TS_
(
S_
::
Vector_N
)
+
"_"
+
_SS_
(
Pipeline
::
name
)
+
surfix
;
#undef _SS_
#undef _TS_
// clang-format on
}
CK_TILE_DEVICE
void
operator
()(
Kargs
kargs
)
const
{
const
auto
iM
=
get_block_id
()
*
Block_M
;
const
auto
a_window
=
[
&
]()
{
const
auto
tmp_
=
make_naive_tensor_view
<
address_space_enum
::
global
>
(
static_cast
<
const
ADataType
*>
(
kargs
.
p_a
),
make_tuple
(
kargs
.
m
,
kargs
.
n
),
make_tuple
(
kargs
.
stride
,
1
),
number
<
Vector_N
>
{},
number
<
1
>
{});
const
auto
tmp2_
=
pad_tensor_view
(
tmp_
,
make_tuple
(
number
<
Block_M
>
{},
number
<
Block_N
>
{}),
sequence
<
kPadM
,
kPadN
>
{});
return
make_tile_window
(
tmp2_
,
make_tuple
(
number
<
Block_M
>
{},
number
<
Block_N
>
{}),
{
iM
,
0
});
}();
const
auto
b_window
=
[
&
]()
{
const
auto
tmp_
=
make_naive_tensor_view
<
address_space_enum
::
global
>
(
static_cast
<
const
BDataType
*>
(
kargs
.
p_b
),
make_tuple
(
kargs
.
m
,
kargs
.
n
),
make_tuple
(
kargs
.
stride
,
1
),
number
<
Vector_N
>
{},
number
<
1
>
{});
const
auto
tmp2_
=
pad_tensor_view
(
tmp_
,
make_tuple
(
number
<
Block_M
>
{},
number
<
Block_N
>
{}),
sequence
<
kPadM
,
kPadN
>
{});
return
make_tile_window
(
tmp2_
,
make_tuple
(
number
<
Block_M
>
{},
number
<
Block_N
>
{}),
{
iM
,
0
});
}();
const
auto
gamma_window
=
[
&
]()
{
const
auto
tmp_
=
make_naive_tensor_view
<
address_space_enum
::
global
>
(
static_cast
<
const
GammaDataType
*>
(
kargs
.
p_gamma
),
make_tuple
(
kargs
.
n
),
make_tuple
(
1
),
number
<
Vector_N
>
{},
number
<
1
>
{});
const
auto
tmp2_
=
pad_tensor_view
(
tmp_
,
make_tuple
(
number
<
Block_N
>
{}),
sequence
<
kPadN
>
{});
return
make_tile_window
(
tmp2_
,
make_tuple
(
number
<
Block_N
>
{}),
{
0
});
}();
auto
x_window
=
[
&
]()
{
if
constexpr
(
kSaveX
)
{
const
auto
tmp2_
=
[
&
]()
{
const
auto
tmp_
=
make_naive_tensor_view
<
address_space_enum
::
global
>
(
static_cast
<
XDataType
*>
(
kargs
.
p_x
),
make_tuple
(
kargs
.
m
,
kargs
.
n
),
make_tuple
(
kargs
.
stride
,
1
),
number
<
Vector_N
>
{},
number
<
1
>
{});
return
pad_tensor_view
(
tmp_
,
make_tuple
(
number
<
Block_M
>
{},
number
<
Block_N
>
{}),
sequence
<
kPadM
,
kPadN
>
{});
}();
return
make_tile_window
(
tmp2_
,
make_tuple
(
number
<
Block_M
>
{},
number
<
Block_N
>
{}),
{
iM
,
0
});
}
else
return
make_null_tile_window
(
make_tuple
(
number
<
Block_M
>
{},
number
<
Block_N
>
{}));
}();
auto
yscale_window
=
[
&
]()
{
auto
tmp_
=
make_naive_tensor_view
<
address_space_enum
::
global
>
(
static_cast
<
YScaleDataType
*>
(
kargs
.
p_yscale
),
make_tuple
(
kargs
.
m
),
make_tuple
(
1
),
number
<
1
>
{});
auto
tmp2_
=
pad_tensor_view
(
tmp_
,
make_tuple
(
number
<
Block_M
>
{}),
sequence
<
kPadM
>
{});
return
make_tile_window
(
tmp2_
,
make_tuple
(
number
<
Block_M
>
{}),
{
iM
});
}();
auto
qy_window
=
[
&
]()
{
auto
tmp_
=
make_naive_tensor_view
<
address_space_enum
::
global
>
(
static_cast
<
QYDataType
*>
(
kargs
.
p_qy
),
make_tuple
(
kargs
.
m
,
kargs
.
n
),
make_tuple
(
kargs
.
stride
,
1
),
number
<
Vector_N
>
{},
number
<
1
>
{});
auto
tmp2_
=
pad_tensor_view
(
tmp_
,
make_tuple
(
number
<
Block_M
>
{},
number
<
Block_N
>
{}),
sequence
<
kPadM
,
kPadN
>
{});
return
make_tile_window
(
tmp2_
,
make_tuple
(
number
<
Block_M
>
{},
number
<
Block_N
>
{}),
{
iM
,
0
});
}();
__shared__
char
smem
[
GetSmemSize
()];
Pipeline
{}(
a_window
,
b_window
,
gamma_window
,
x_window
,
yscale_window
,
qy_window
,
static_cast
<
const
ComputeDataType
>
(
kargs
.
epsilon
),
kargs
.
n
,
smem
);
}
};
}
// namespace ck_tile
Prev
1
…
17
18
19
20
21
22
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment