Commit 63ea1d70 authored by letaoqin's avatar letaoqin
Browse files

update include file name

parent cef44211
......@@ -9,7 +9,7 @@
#include "ck/utility/common_header.hpp"
#include "ck/tensor_description/tensor_descriptor.hpp"
#include "ck/tensor_description/tensor_descriptor_helper.hpp"
#include "ck/tensor_operation/gpu/device/device_batched_multiple_head_flash_attention_fwd.hpp"
#include "ck/tensor_operation/gpu/device/device_batched_mha_infer.hpp"
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
#include "ck/tensor_operation/gpu/device/matrix_padder.hpp"
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
......
......@@ -10,7 +10,7 @@
#include "ck/tensor_description/tensor_descriptor.hpp"
#include "ck/tensor_description/tensor_descriptor_helper.hpp"
#include "ck/tensor_operation/gpu/device/tensor_layout.hpp"
#include "ck/tensor_operation/gpu/device/device_grouped_multiple_head_flash_attention_fwd.hpp"
#include "ck/tensor_operation/gpu/device/device_grouped_mha_infer.hpp"
#include "ck/tensor_operation/gpu/device/gemm_specialization.hpp"
#include "ck/tensor_operation/gpu/device/matrix_padder.hpp"
#include "ck/tensor_operation/gpu/grid/gridwise_batched_mha_fwd_xdl_cshuffle.hpp"
......@@ -231,7 +231,7 @@ struct DeviceGroupedMultiheadAttentionForward_Xdl
#endif
using DeviceOp = DeviceGroupedMultiheadAttentionForward_Xdl;
using ProblemDesc = typename DeviceGroupedMultiheadAttentionForward<NumDimG,
using ProblemDesc = typename DeviceGroupedMultiheadAttentionInfer<NumDimG,
NumDimM,
NumDimN,
NumDimK,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment