Commit 7303b516 authored by Bartlomiej Kocot's avatar Bartlomiej Kocot
Browse files

Minor fixes

parent 95a1112f
...@@ -367,8 +367,7 @@ struct DeviceGroupedConvBwdWeight_Wmma_CShuffle ...@@ -367,8 +367,7 @@ struct DeviceGroupedConvBwdWeight_Wmma_CShuffle
template <index_t NDim, typename ck::enable_if<NDim == 3, bool>::type = false> template <index_t NDim, typename ck::enable_if<NDim == 3, bool>::type = false>
static auto GetABCGridDesc() static auto GetABCGridDesc()
{ {
const index_t dim = 1; const index_t dim = 1;
const index_t batch = 1;
const std::array<index_t, NDimSpatial> lengths{1, 1, 1}; const std::array<index_t, NDimSpatial> lengths{1, 1, 1};
const std::array<index_t, NDimSpatial + 3> strides{1, 1, 1, 1, 1, 1}; const std::array<index_t, NDimSpatial + 3> strides{1, 1, 1, 1, 1, 1};
const std::array<index_t, NDimSpatial> params{1, 1, 1}; const std::array<index_t, NDimSpatial> params{1, 1, 1};
......
...@@ -14,13 +14,10 @@ namespace tensor_operation { ...@@ -14,13 +14,10 @@ namespace tensor_operation {
namespace device { namespace device {
namespace instance { namespace instance {
using BF16 = ck::bhalf_t; using F16 = ck::half_t;
using F16 = ck::half_t; using F32 = float;
using F32 = float; using I8 = int8_t;
using I8 = int8_t; using I32 = int32_t;
using I32 = int32_t;
using Empty_Tuple = ck::Tuple<>;
template <ck::index_t... Is> template <ck::index_t... Is>
using S = ck::Sequence<Is...>; using S = ck::Sequence<Is...>;
......
...@@ -7,14 +7,14 @@ foreach(gpu IN LISTS GPU_TARGETS) ...@@ -7,14 +7,14 @@ foreach(gpu IN LISTS GPU_TARGETS)
add_gtest_executable(test_grouped_convnd_bwd_weight test_grouped_convnd_bwd_weight.cpp) add_gtest_executable(test_grouped_convnd_bwd_weight test_grouped_convnd_bwd_weight.cpp)
target_link_libraries(test_grouped_convnd_bwd_weight PRIVATE utility device_grouped_conv1d_bwd_weight_instance device_grouped_conv2d_bwd_weight_instance device_grouped_conv3d_bwd_weight_instance) target_link_libraries(test_grouped_convnd_bwd_weight PRIVATE utility device_grouped_conv1d_bwd_weight_instance device_grouped_conv2d_bwd_weight_instance device_grouped_conv3d_bwd_weight_instance)
add_gtest_executable(test_grouped_convnd_bwd_weight_interface test_grouped_convnd_bwd_weight_interface_xdl.cpp) add_gtest_executable(test_grouped_convnd_bwd_weight_interface test_grouped_convnd_bwd_weight_interface_xdl.cpp)
target_link_libraries(test_grouped_convnd_bwd_weight_interface PRIVATE utility device_grouped_conv1d_bwd_weight_instance device_grouped_conv2d_bwd_weight_instance device_grouped_conv3d_bwd_weight_instance) target_link_libraries(test_grouped_convnd_bwd_weight_interface PRIVATE utility)
set(target 1) set(target 1)
endif() endif()
if(gpu IN_LIST gpu_list_wmma AND target EQUAL 0) if(gpu IN_LIST gpu_list_wmma AND target EQUAL 0)
add_gtest_executable(test_grouped_convnd_bwd_weight test_grouped_convnd_bwd_weight.cpp) add_gtest_executable(test_grouped_convnd_bwd_weight test_grouped_convnd_bwd_weight.cpp)
target_link_libraries(test_grouped_convnd_bwd_weight PRIVATE utility device_grouped_conv3d_bwd_weight_instance) target_link_libraries(test_grouped_convnd_bwd_weight PRIVATE utility device_grouped_conv3d_bwd_weight_instance)
add_gtest_executable(test_grouped_convnd_bwd_weight_interface test_grouped_convnd_bwd_weight_interface_wmma.cpp) add_gtest_executable(test_grouped_convnd_bwd_weight_interface test_grouped_convnd_bwd_weight_interface_wmma.cpp)
target_link_libraries(test_grouped_convnd_bwd_weight_interface PRIVATE utility device_grouped_conv1d_bwd_weight_instance device_grouped_conv2d_bwd_weight_instance device_grouped_conv3d_bwd_weight_instance) target_link_libraries(test_grouped_convnd_bwd_weight_interface PRIVATE utility)
set(target 1) set(target 1)
endif() endif()
endforeach() endforeach()
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment