Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
gaoqiong
composable_kernel
Commits
dd6a8de4
Commit
dd6a8de4
authored
Apr 06, 2022
by
Jehandad Khan
Browse files
Merge branch 'develop' into jd/dev_pkg
parents
0aa899aa
abf4bdb9
Changes
470
Hide whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
1730 additions
and
177 deletions
+1730
-177
test/grouped_gemm/CMakeLists.txt
test/grouped_gemm/CMakeLists.txt
+3
-0
test/grouped_gemm/grouped_gemm_fp16.cpp
test/grouped_gemm/grouped_gemm_fp16.cpp
+203
-0
test/include/test_util.hpp
test/include/test_util.hpp
+0
-84
test/magic_number_division/magic_number_division.cpp
test/magic_number_division/magic_number_division.cpp
+7
-28
test/reduce/CMakeLists.txt
test/reduce/CMakeLists.txt
+7
-0
test/reduce/reduce_no_index.cpp
test/reduce/reduce_no_index.cpp
+667
-0
test/reduce/reduce_util.hpp
test/reduce/reduce_util.hpp
+19
-0
test/reduce/reduce_with_index.cpp
test/reduce/reduce_with_index.cpp
+669
-0
test/reference_conv_fwd/reference_conv_fwd.cpp
test/reference_conv_fwd/reference_conv_fwd.cpp
+147
-53
test/space_filling_curve/space_filling_curve.cpp
test/space_filling_curve/space_filling_curve.cpp
+8
-12
No files found.
test/grouped_gemm/CMakeLists.txt
0 → 100644
View file @
dd6a8de4
add_test_executable
(
test_grouped_gemm_fp16 grouped_gemm_fp16.cpp
)
target_link_libraries
(
test_grouped_gemm_fp16 PRIVATE host_tensor
)
target_link_libraries
(
test_grouped_gemm_fp16 PRIVATE device_grouped_gemm_instance
)
test/grouped_gemm/grouped_gemm_fp16.cpp
0 → 100644
View file @
dd6a8de4
#include <iostream>
#include <numeric>
#include <initializer_list>
#include <cstdlib>
#include <stdlib.h>
#include <half.hpp>
#include "check_err.hpp"
#include "config.hpp"
#include "print.hpp"
#include "device.hpp"
#include "host_tensor.hpp"
#include "host_tensor_generator.hpp"
#include "host_gemm.hpp"
#include "device_tensor.hpp"
#include "device_grouped_gemm_xdl.hpp"
#include "element_wise_operation.hpp"
#include "reference_gemm.hpp"
#include "gemm_specialization.hpp"
using
PassThrough
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
DeviceGroupedGemmPtr_
=
ck
::
tensor_operation
::
device
::
DeviceGroupedGemmPtr
<
ck
::
tensor_operation
::
element_wise
::
PassThrough
,
ck
::
tensor_operation
::
element_wise
::
PassThrough
,
ck
::
tensor_operation
::
element_wise
::
PassThrough
>
;
namespace
ck
{
namespace
tensor_operation
{
namespace
device
{
namespace
device_grouped_gemm_instance
{
void
add_device_grouped_gemm_xdl_f16_f16_f16_mk_nk_mn_instances
(
std
::
vector
<
DeviceGroupedGemmPtr_
>&
);
}
}
// namespace device
}
// namespace tensor_operation
}
// namespace ck
namespace
{
using
ADataType
=
ck
::
half_t
;
using
BDataType
=
ck
::
half_t
;
using
CDataType
=
ck
::
half_t
;
using
AccDataType
=
float
;
using
ALayout
=
ck
::
tensor_layout
::
gemm
::
RowMajor
;
using
BLayout
=
ck
::
tensor_layout
::
gemm
::
ColumnMajor
;
using
CLayout
=
ck
::
tensor_layout
::
gemm
::
RowMajor
;
bool
TestGroupedGemm
(
DeviceGroupedGemmPtr_
&
groupedGemmPtr
)
{
int
group_count
=
rand
()
%
10
+
1
;
// GEMM shape
std
::
vector
<
ck
::
tensor_operation
::
device
::
GemmShape
>
gemm_shapes
;
std
::
vector
<
const
void
*>
p_a
,
p_b
;
std
::
vector
<
void
*>
p_c
;
gemm_shapes
.
reserve
(
group_count
);
for
(
int
i
=
0
;
i
<
group_count
;
i
++
)
{
int
M
=
256
+
256
*
(
rand
()
%
10
);
int
N
=
256
+
256
*
(
rand
()
%
10
);
int
K
=
128
+
128
*
(
rand
()
%
10
);
int
AStride
=
std
::
is_same
<
ck
::
tensor_layout
::
gemm
::
RowMajor
,
ALayout
>::
value
?
K
:
M
;
int
BStride
=
std
::
is_same
<
ck
::
tensor_layout
::
gemm
::
RowMajor
,
BLayout
>::
value
?
N
:
K
;
int
CStride
=
std
::
is_same
<
ck
::
tensor_layout
::
gemm
::
RowMajor
,
CLayout
>::
value
?
N
:
M
;
gemm_shapes
.
push_back
({
M
,
N
,
K
,
AStride
,
BStride
,
CStride
});
}
auto
f_host_tensor_descriptor
=
[](
std
::
size_t
row
,
std
::
size_t
col
,
std
::
size_t
stride
,
auto
layout
)
{
if
(
std
::
is_same
<
decltype
(
layout
),
ck
::
tensor_layout
::
gemm
::
RowMajor
>::
value
)
{
return
HostTensorDescriptor
(
std
::
vector
<
std
::
size_t
>
({
row
,
col
}),
std
::
vector
<
std
::
size_t
>
({
stride
,
1
}));
}
else
{
return
HostTensorDescriptor
(
std
::
vector
<
std
::
size_t
>
({
row
,
col
}),
std
::
vector
<
std
::
size_t
>
({
1
,
stride
}));
}
};
std
::
vector
<
Tensor
<
ADataType
>>
a_tensors
;
;
std
::
vector
<
Tensor
<
BDataType
>>
b_tensors
;
std
::
vector
<
Tensor
<
CDataType
>>
c_host_tensors
;
std
::
vector
<
Tensor
<
CDataType
>>
c_device_tensors
;
a_tensors
.
reserve
(
group_count
);
b_tensors
.
reserve
(
group_count
);
c_host_tensors
.
reserve
(
group_count
);
c_device_tensors
.
reserve
(
group_count
);
using
DeviceMemPtr
=
std
::
unique_ptr
<
DeviceMem
>
;
std
::
vector
<
DeviceMemPtr
>
a_tensors_device
,
b_tensors_device
,
c_tensors_device
;
a_tensors_device
.
reserve
(
group_count
);
b_tensors_device
.
reserve
(
group_count
);
c_tensors_device
.
reserve
(
group_count
);
for
(
int
i
=
0
;
i
<
gemm_shapes
.
size
();
i
++
)
{
a_tensors
.
emplace_back
(
Tensor
<
ADataType
>
(
f_host_tensor_descriptor
(
gemm_shapes
[
i
].
M
,
gemm_shapes
[
i
].
K
,
gemm_shapes
[
i
].
StrideA
,
ALayout
{})));
b_tensors
.
emplace_back
(
Tensor
<
BDataType
>
(
f_host_tensor_descriptor
(
gemm_shapes
[
i
].
K
,
gemm_shapes
[
i
].
N
,
gemm_shapes
[
i
].
StrideB
,
BLayout
{})));
c_host_tensors
.
emplace_back
(
Tensor
<
CDataType
>
(
f_host_tensor_descriptor
(
gemm_shapes
[
i
].
M
,
gemm_shapes
[
i
].
N
,
gemm_shapes
[
i
].
StrideC
,
CLayout
{})));
c_device_tensors
.
emplace_back
(
Tensor
<
CDataType
>
(
f_host_tensor_descriptor
(
gemm_shapes
[
i
].
M
,
gemm_shapes
[
i
].
N
,
gemm_shapes
[
i
].
StrideC
,
CLayout
{})));
a_tensors
[
i
].
GenerateTensorValue
(
GeneratorTensor_2
<
ADataType
>
{
-
5
,
5
});
b_tensors
[
i
].
GenerateTensorValue
(
GeneratorTensor_2
<
BDataType
>
{
-
5
,
5
});
}
for
(
int
i
=
0
;
i
<
gemm_shapes
.
size
();
i
++
)
{
a_tensors_device
.
emplace_back
(
std
::
make_unique
<
DeviceMem
>
(
sizeof
(
ADataType
)
*
a_tensors
[
i
].
mDesc
.
GetElementSize
()));
b_tensors_device
.
emplace_back
(
std
::
make_unique
<
DeviceMem
>
(
sizeof
(
BDataType
)
*
b_tensors
[
i
].
mDesc
.
GetElementSize
()));
c_tensors_device
.
emplace_back
(
std
::
make_unique
<
DeviceMem
>
(
sizeof
(
CDataType
)
*
c_device_tensors
[
i
].
mDesc
.
GetElementSize
()));
a_tensors_device
[
i
]
->
ToDevice
(
a_tensors
[
i
].
mData
.
data
());
b_tensors_device
[
i
]
->
ToDevice
(
b_tensors
[
i
].
mData
.
data
());
p_a
.
push_back
(
a_tensors_device
[
i
]
->
GetDeviceBuffer
());
p_b
.
push_back
(
b_tensors_device
[
i
]
->
GetDeviceBuffer
());
p_c
.
push_back
(
c_tensors_device
[
i
]
->
GetDeviceBuffer
());
}
auto
a_element_op
=
PassThrough
{};
auto
b_element_op
=
PassThrough
{};
auto
c_element_op
=
PassThrough
{};
// do GEMM
auto
invoker_ptr
=
groupedGemmPtr
->
MakeInvokerPointer
();
auto
argument_ptr
=
groupedGemmPtr
->
MakeArgumentPointer
(
p_a
,
p_b
,
p_c
,
gemm_shapes
,
a_element_op
,
b_element_op
,
c_element_op
);
invoker_ptr
->
Run
(
argument_ptr
.
get
());
for
(
int
i
=
0
;
i
<
gemm_shapes
.
size
();
i
++
)
{
c_tensors_device
[
i
]
->
FromDevice
(
c_device_tensors
[
i
].
mData
.
data
());
using
ReferenceGemmInstance
=
ck
::
tensor_operation
::
host
::
ReferenceGemm
<
ADataType
,
BDataType
,
CDataType
,
PassThrough
,
PassThrough
,
PassThrough
>
;
auto
ref_gemm
=
ReferenceGemmInstance
{};
auto
ref_invoker
=
ref_gemm
.
MakeInvoker
();
auto
ref_argument
=
ref_gemm
.
MakeArgument
(
a_tensors
[
i
],
b_tensors
[
i
],
c_host_tensors
[
i
],
a_element_op
,
b_element_op
,
c_element_op
);
if
(
!
groupedGemmPtr
->
IsSupportedArgument
(
argument_ptr
.
get
()))
{
return
false
;
}
ref_invoker
.
Run
(
ref_argument
);
bool
res
=
ck
::
utils
::
check_err
(
c_host_tensors
[
i
].
mData
,
c_device_tensors
[
i
].
mData
);
std
::
cout
<<
"group_id: "
<<
i
<<
(
res
?
" SUCCESS"
:
" FAILURE"
)
<<
std
::
endl
;
if
(
!
res
)
return
false
;
}
return
true
;
}
}
// anonymous namespace
int
main
()
{
std
::
vector
<
DeviceGroupedGemmPtr_
>
groupedGemmPtrs
;
ck
::
tensor_operation
::
device
::
device_grouped_gemm_instance
::
add_device_grouped_gemm_xdl_f16_f16_f16_mk_nk_mn_instances
(
groupedGemmPtrs
);
bool
res
=
true
;
for
(
auto
&
gemmPtr
:
groupedGemmPtrs
)
{
res
&=
TestGroupedGemm
(
gemmPtr
);
}
std
::
cout
<<
"TestGroupedGemm ..... "
<<
(
res
?
"SUCCESS"
:
"FAILURE"
)
<<
std
::
endl
;
return
res
?
0
:
1
;
}
test/include/test_util.hpp
deleted
100644 → 0
View file @
0aa899aa
#ifndef TEST_UTIL_HPP
#define TEST_UTIL_HPP
#include <cmath>
#include <cstdlib>
#include <iostream>
#include <iomanip>
#include <limits>
#include <type_traits>
#include <vector>
namespace
test_util
{
template
<
typename
T
>
typename
std
::
enable_if
<
std
::
is_floating_point
<
T
>::
value
,
bool
>::
type
check_err
(
const
std
::
vector
<
T
>&
out
,
const
std
::
vector
<
T
>&
ref
,
const
std
::
string
&
msg
,
T
rtol
=
static_cast
<
T
>
(
1e-5
),
T
atol
=
static_cast
<
T
>
(
1e-8
))
{
if
(
out
.
size
()
!=
ref
.
size
())
{
std
::
cout
<<
"out.size() != ref.size(), :"
<<
out
.
size
()
<<
" != "
<<
ref
.
size
()
<<
std
::
endl
<<
msg
<<
std
::
endl
;
return
false
;
}
bool
res
{
true
};
int
err_count
=
0
;
T
err
=
0
;
T
max_err
=
std
::
numeric_limits
<
T
>::
min
();
for
(
std
::
size_t
i
=
0
;
i
<
ref
.
size
();
++
i
)
{
err
=
std
::
abs
(
out
[
i
]
-
ref
[
i
]);
if
(
err
>
atol
+
rtol
*
std
::
abs
(
ref
[
i
])
||
!
std
::
isfinite
(
out
[
i
])
||
!
std
::
isfinite
(
ref
[
i
]))
{
max_err
=
err
>
max_err
?
err
:
max_err
;
err_count
++
;
if
(
err_count
<
5
)
{
std
::
cout
<<
std
::
setw
(
12
)
<<
std
::
setprecision
(
7
)
<<
"out["
<<
i
<<
"] != ref["
<<
i
<<
"]: "
<<
out
[
i
]
<<
"!="
<<
ref
[
i
]
<<
std
::
endl
<<
msg
<<
std
::
endl
;
}
res
=
false
;
}
}
if
(
!
res
)
{
std
::
cout
<<
std
::
setw
(
12
)
<<
std
::
setprecision
(
7
)
<<
"max err: "
<<
max_err
<<
std
::
endl
;
}
return
res
;
}
template
<
typename
T
>
typename
std
::
enable_if
<
std
::
is_integral
<
T
>::
value
,
bool
>::
type
check_err
(
const
std
::
vector
<
T
>&
out
,
const
std
::
vector
<
T
>&
ref
,
const
std
::
string
&
msg
,
T
=
0
,
T
=
0
)
{
if
(
out
.
size
()
!=
ref
.
size
())
{
std
::
cout
<<
"out.size() != ref.size(), :"
<<
out
.
size
()
<<
" != "
<<
ref
.
size
()
<<
std
::
endl
<<
msg
<<
std
::
endl
;
return
false
;
}
for
(
std
::
size_t
i
=
0
;
i
<
ref
.
size
();
++
i
)
{
if
(
out
[
i
]
!=
ref
[
i
])
{
std
::
cout
<<
"out["
<<
i
<<
"] != ref["
<<
i
<<
"]: "
<<
out
[
i
]
<<
"!="
<<
ref
[
i
]
<<
std
::
endl
<<
msg
<<
std
::
endl
;
return
false
;
}
}
return
true
;
}
}
// namespace test_util
#endif
test/magic_number_division/magic_number_division.cpp
View file @
dd6a8de4
...
@@ -4,8 +4,10 @@
...
@@ -4,8 +4,10 @@
#include <cstdlib>
#include <cstdlib>
#include <stdlib.h>
#include <stdlib.h>
#include <half.hpp>
#include <half.hpp>
#include "check_err.hpp"
#include "config.hpp"
#include "config.hpp"
#include "
print
.hpp"
#include "
magic_division
.hpp"
#include "device.hpp"
#include "device.hpp"
#include "host_tensor.hpp"
#include "host_tensor.hpp"
#include "host_tensor_generator.hpp"
#include "host_tensor_generator.hpp"
...
@@ -54,29 +56,6 @@ __host__ void cpu_magic_number_division(uint32_t magic_multiplier,
...
@@ -54,29 +56,6 @@ __host__ void cpu_magic_number_division(uint32_t magic_multiplier,
}
}
}
}
template
<
typename
T
>
T
check_error
(
const
std
::
vector
<
T
>&
ref
,
const
std
::
vector
<
T
>&
result
)
{
T
error
=
0
;
T
max_diff
=
0
;
T
ref_value
=
0
,
result_value
=
0
;
for
(
std
::
size_t
i
=
0
;
i
<
ref
.
size
();
++
i
)
{
T
diff
=
std
::
abs
(
ref
[
i
]
-
result
[
i
]);
error
+=
diff
;
if
(
max_diff
<
diff
)
{
max_diff
=
diff
;
ref_value
=
ref
[
i
];
result_value
=
result
[
i
];
}
}
return
max_diff
;
}
int
main
(
int
,
char
*
[])
int
main
(
int
,
char
*
[])
{
{
uint64_t
num_divisor
=
4096
;
uint64_t
num_divisor
=
4096
;
...
@@ -135,9 +114,9 @@ int main(int, char*[])
...
@@ -135,9 +114,9 @@ int main(int, char*[])
naive_result_dev_buf
.
FromDevice
(
naive_result_host
.
data
());
naive_result_dev_buf
.
FromDevice
(
naive_result_host
.
data
());
magic_result_dev_buf
.
FromDevice
(
magic_result_host
.
data
());
magic_result_dev_buf
.
FromDevice
(
magic_result_host
.
data
());
int32_t
max_diff
=
check_error
(
naive
_result_host
,
magic
_result_host
);
bool
res
=
ck
::
utils
::
check_err
(
magic
_result_host
,
naive
_result_host
);
if
(
max_diff
!=
0
)
if
(
!
res
)
{
{
pass
=
false
;
pass
=
false
;
continue
;
continue
;
...
@@ -149,9 +128,9 @@ int main(int, char*[])
...
@@ -149,9 +128,9 @@ int main(int, char*[])
magic_result_host2
.
data
(),
magic_result_host2
.
data
(),
num_dividend
);
num_dividend
);
max_diff
=
check_error
(
naive
_result_host
,
magic
_result_host
2
);
res
=
ck
::
utils
::
check_err
(
magic
_result_host
2
,
naive
_result_host
);
if
(
max_diff
!=
0
)
if
(
!
res
)
{
{
pass
=
false
;
pass
=
false
;
continue
;
continue
;
...
...
test/reduce/CMakeLists.txt
0 → 100644
View file @
dd6a8de4
add_test_executable
(
test_reduce_no_index reduce_no_index.cpp
)
add_test_executable
(
test_reduce_with_index reduce_with_index.cpp
)
target_link_libraries
(
test_reduce_no_index PRIVATE host_tensor
)
target_link_libraries
(
test_reduce_no_index PRIVATE device_reduce_instance
)
target_link_libraries
(
test_reduce_with_index PRIVATE host_tensor
)
target_link_libraries
(
test_reduce_with_index PRIVATE device_reduce_instance
)
test/reduce/reduce_no_index.cpp
0 → 100644
View file @
dd6a8de4
#include "getopt.h"
#include "check_err.hpp"
#include "device_reduce_instance.hpp"
#include "reduction_enums.hpp"
#include "host_tensor.hpp"
#include "host_tensor_generator.hpp"
#include "host_reduction.hpp"
#include "reduce_util.hpp"
using
namespace
ck
;
namespace
{
template
<
index_t
Rank
,
index_t
NumReduceDim
>
static
inline
std
::
vector
<
int
>
get_invariant_dims
(
const
std
::
vector
<
int
>&
reduceDims
)
{
assert
(
NumReduceDim
==
reduceDims
.
size
());
int
reduceFlag
=
0
;
// flag the bits for the reduceDims
for
(
int
i
=
0
;
i
<
NumReduceDim
;
i
++
)
{
reduceFlag
|=
1
<<
reduceDims
[
i
];
};
std
::
vector
<
int
>
invariantDims
;
// collect invariant dimensions
for
(
int
i
=
0
;
i
<
Rank
;
i
++
)
if
((
reduceFlag
&
(
1
<<
i
))
==
0
)
{
invariantDims
.
push_back
(
i
);
};
return
invariantDims
;
};
// map the data type used by the GPU kernels to the corresponding type used by the host codes
template
<
typename
InType
>
struct
type_mapping
{
using
OutType
=
InType
;
};
template
<
>
struct
type_mapping
<
ck
::
half_t
>
{
using
OutType
=
half_float
::
half
;
};
constexpr
int
Rank
=
4
;
constexpr
ReduceTensorOp
ReduceOpId
=
ReduceTensorOp
::
AVG
;
constexpr
NanPropagation
NanOpt
=
NanPropagation
::
PROPAGATE_NAN
;
constexpr
bool
PropagateNan
=
false
;
constexpr
ReduceTensorIndices
IndicesOpt
=
ReduceTensorIndices
::
NO_INDICES
;
constexpr
bool
NeedIndices
=
false
;
template
<
typename
InDataType
,
typename
AccDataType
,
typename
OutDataType
,
int
Rank
,
int
NumReduceDim
>
bool
test_reduce_no_index_impl
(
int
init_method
,
const
std
::
vector
<
size_t
>&
inLengths
,
const
std
::
vector
<
int
>&
reduceDims
,
float
alpha
,
float
beta
)
{
using
namespace
ck
::
tensor_operation
::
device
;
using
namespace
ck
::
tensor_operation
::
device
::
device_reduce_instance
;
using
namespace
ck
::
host_reduce
;
constexpr
bool
out_support_atomic_add
=
std
::
is_same
<
OutDataType
,
float
>::
value
;
constexpr
bool
op_support_atomic_add
=
true
;
constexpr
bool
use_atomic_add
=
(
out_support_atomic_add
&&
op_support_atomic_add
);
Tensor
<
InDataType
>
in
(
inLengths
);
std
::
vector
<
size_t
>
outLengths
;
const
auto
invariantDims
=
get_invariant_dims
<
Rank
,
NumReduceDim
>
(
reduceDims
);
if
(
reduceDims
.
size
()
==
Rank
)
outLengths
.
push_back
(
1
);
else
for
(
auto
dim
:
invariantDims
)
outLengths
.
push_back
(
inLengths
[
dim
]);
Tensor
<
OutDataType
>
out_ref
(
outLengths
);
Tensor
<
OutDataType
>
out
(
outLengths
);
// only used when the OutDataType is bhalf_t
Tensor
<
float
>
out_ref_fp32
(
outLengths
);
Tensor
<
float
>
out_fp32
(
outLengths
);
auto
inStrides
=
in
.
mDesc
.
GetStrides
();
auto
outStrides
=
out
.
mDesc
.
GetStrides
();
size_t
invariant_total_length
=
out
.
mDesc
.
GetElementSize
();
size_t
reduce_total_length
=
in
.
mDesc
.
GetElementSize
()
/
invariant_total_length
;
std
::
size_t
num_thread
=
1
;
switch
(
init_method
)
{
case
0
:
break
;
case
1
:
in
.
GenerateTensorValue
(
GeneratorTensor_1
<
InDataType
>
{
1
},
num_thread
);
if
(
beta
!=
0.0
f
)
out_ref
.
GenerateTensorValue
(
GeneratorTensor_1
<
InDataType
>
{
1
},
num_thread
);
break
;
case
2
:
in
.
GenerateTensorValue
(
GeneratorTensor_2
<
InDataType
>
{
-
5
,
5
},
num_thread
);
if
(
beta
!=
0.0
f
)
out_ref
.
GenerateTensorValue
(
GeneratorTensor_2
<
InDataType
>
{
-
5
,
5
},
num_thread
);
break
;
default:
in
.
GenerateTensorValue
(
GeneratorTensor_3
<
InDataType
>
{
-
5.0
,
5.0
},
num_thread
);
if
(
beta
!=
0.0
f
)
out_ref
.
GenerateTensorValue
(
GeneratorTensor_3
<
InDataType
>
{
-
5.0
,
5.0
},
num_thread
);
}
if
(
beta
!=
0.0
f
)
for
(
size_t
i
=
0
;
i
<
out_ref
.
mDesc
.
GetElementSpace
();
i
++
)
out
.
mData
[
i
]
=
out_ref
.
mData
[
i
];
// these buffers are usually provided by the user application
DeviceMem
in_dev
(
sizeof
(
InDataType
)
*
in
.
mDesc
.
GetElementSpace
());
DeviceMem
out_dev
(
sizeof
(
OutDataType
)
*
out
.
mDesc
.
GetElementSpace
());
in_dev
.
ToDevice
(
in
.
mData
.
data
());
if
(
beta
!=
0.0
f
)
out_dev
.
ToDevice
(
out
.
mData
.
data
());
using
InElementwiseOperation_0
=
typename
reduce_unary_operator
<
AccDataType
,
ReduceOpId
,
true
,
true
>::
InElementwiseOperation
;
using
AccElementwiseOperation_0
=
typename
reduce_unary_operator
<
AccDataType
,
ReduceOpId
,
true
,
true
>::
AccElementwiseOperation
;
using
InElementwiseOperation_1
=
typename
reduce_unary_operator
<
AccDataType
,
ReduceOpId
,
true
,
false
>::
InElementwiseOperation
;
using
AccElementwiseOperation_1
=
typename
reduce_unary_operator
<
AccDataType
,
ReduceOpId
,
true
,
false
>::
AccElementwiseOperation
;
using
InElementwiseOperation_2
=
typename
reduce_unary_operator
<
AccDataType
,
ReduceOpId
,
false
,
true
>::
InElementwiseOperation
;
using
AccElementwiseOperation_2
=
typename
reduce_unary_operator
<
AccDataType
,
ReduceOpId
,
false
,
true
>::
AccElementwiseOperation
;
using
DeviceReduceInstPtr0
=
DeviceReducePtr
<
InElementwiseOperation_0
,
AccElementwiseOperation_0
>
;
using
DeviceReduceInstPtr1
=
DeviceReducePtr
<
InElementwiseOperation_1
,
AccElementwiseOperation_1
>
;
using
DeviceReduceInstPtr2
=
DeviceReducePtr
<
InElementwiseOperation_2
,
AccElementwiseOperation_2
>
;
std
::
vector
<
DeviceReduceInstPtr0
>
reduce0_ptrs
;
std
::
vector
<
DeviceReduceInstPtr1
>
reduce1_ptrs
;
std
::
vector
<
DeviceReduceInstPtr2
>
reduce2_ptrs
;
add_device_reduce_instance_threadwise
<
InDataType
,
AccDataType
,
OutDataType
,
Rank
,
NumReduceDim
,
ReduceOpId
,
NanOpt
,
IndicesOpt
>
(
reduce0_ptrs
);
add_device_reduce_instance_blockwise
<
InDataType
,
AccDataType
,
OutDataType
,
Rank
,
NumReduceDim
,
ReduceOpId
,
NanOpt
,
IndicesOpt
>
(
reduce0_ptrs
);
if
constexpr
(
use_atomic_add
)
{
add_device_reduce_instance_multiblock_atomic_add
<
InDataType
,
AccDataType
,
OutDataType
,
Rank
,
NumReduceDim
,
ReduceOpId
,
NanOpt
,
IndicesOpt
>
(
reduce0_ptrs
);
}
else
{
add_device_reduce_instance_multiblock_partial_reduce
<
InDataType
,
AccDataType
,
OutDataType
,
Rank
,
NumReduceDim
,
ReduceOpId
,
NanOpt
,
IndicesOpt
>
(
reduce1_ptrs
);
};
// used for secondary reduction
if
constexpr
(
!
use_atomic_add
)
{
add_device_reduce_instance_blockwise_second_call
<
AccDataType
,
AccDataType
,
OutDataType
,
Rank
,
NumReduceDim
,
ReduceOpId
,
NanOpt
,
IndicesOpt
>
(
reduce2_ptrs
);
};
if
(
reduce0_ptrs
.
empty
()
&&
reduce1_ptrs
.
empty
())
{
throw
std
::
runtime_error
(
"Wrong! No device REDUCE instance found"
);
};
bool
result
=
true
;
using
HostInDataType
=
typename
type_mapping
<
InDataType
>::
OutType
;
using
HostOutDataType
=
typename
type_mapping
<
OutDataType
>::
OutType
;
using
HostAccDataType
=
typename
type_mapping
<
AccDataType
>::
OutType
;
ReductionHost
<
HostInDataType
,
HostAccDataType
,
HostOutDataType
,
ReduceOpId
,
Rank
,
NumReduceDim
,
PropagateNan
,
NeedIndices
>
hostReduce
(
in
.
mDesc
,
out_ref
.
mDesc
,
invariantDims
,
reduceDims
);
hostReduce
.
Run
(
alpha
,
reinterpret_cast
<
const
HostInDataType
*>
(
in
.
mData
.
data
()),
beta
,
reinterpret_cast
<
HostOutDataType
*>
(
out_ref
.
mData
.
data
()),
nullptr
);
const
auto
i_inLengths
=
to_int_vector
(
inLengths
);
const
auto
i_inStrides
=
to_int_vector
(
inStrides
);
const
auto
i_outLengths
=
to_int_vector
(
outLengths
);
const
auto
i_outStrides
=
to_int_vector
(
outStrides
);
for
(
auto
&
reduce_ptr
:
reduce0_ptrs
)
{
auto
wsSizeInBytes
=
reduce_ptr
->
GetWorkspaceSizeInBytes
(
i_inLengths
,
reduceDims
);
DeviceMem
ws_dev
(
wsSizeInBytes
);
InElementwiseOperation_0
in_elementwise_op_0
(
static_cast
<
int32_t
>
(
reduce_total_length
));
AccElementwiseOperation_0
acc_elementwise_op_0
(
static_cast
<
int32_t
>
(
reduce_total_length
));
auto
argument_ptr
=
reduce_ptr
->
MakeArgumentPointer
(
i_inLengths
,
i_inStrides
,
i_outLengths
,
i_outStrides
,
reduceDims
,
alpha
,
beta
,
in_dev
.
GetDeviceBuffer
(),
out_dev
.
GetDeviceBuffer
(),
nullptr
,
ws_dev
.
GetDeviceBuffer
(),
in_elementwise_op_0
,
acc_elementwise_op_0
);
if
(
!
reduce_ptr
->
IsSupportedArgument
(
argument_ptr
.
get
()))
continue
;
auto
invoker_ptr
=
reduce_ptr
->
MakeInvokerPointer
();
(
void
)
invoker_ptr
->
Run
(
argument_ptr
.
get
());
out_dev
.
FromDevice
(
out
.
mData
.
data
());
bool
single_result
=
true
;
if
constexpr
(
std
::
is_same
<
OutDataType
,
ck
::
half_t
>::
value
||
std
::
is_same
<
OutDataType
,
ck
::
bhalf_t
>::
value
)
{
reduce_util
::
to_f32_vector
(
out
,
out_fp32
);
reduce_util
::
to_f32_vector
(
out_ref
,
out_ref_fp32
);
single_result
=
ck
::
utils
::
check_err
(
out_fp32
.
mData
,
out_ref_fp32
.
mData
,
"Error: incorrect data result!"
);
}
else
{
single_result
=
ck
::
utils
::
check_err
(
out
.
mData
,
out_ref
.
mData
,
"Error: incorrect data result!"
);
};
if
(
!
single_result
)
{
std
::
cout
<<
"Fail Info: "
<<
reduce_ptr
->
GetTypeString
()
<<
std
::
endl
;
result
=
false
;
}
};
for
(
auto
&
reduce_ptr
:
reduce1_ptrs
)
{
auto
wsSizeInBytes
=
reduce_ptr
->
GetWorkspaceSizeInBytes
(
i_inLengths
,
reduceDims
);
DeviceMem
ws_dev
(
wsSizeInBytes
);
InElementwiseOperation_1
in_elementwise_op_1
(
static_cast
<
int32_t
>
(
reduce_total_length
));
AccElementwiseOperation_1
acc_elementwise_op_1
(
static_cast
<
int32_t
>
(
reduce_total_length
));
auto
argument_ptr
=
reduce_ptr
->
MakeArgumentPointer
(
i_inLengths
,
i_inStrides
,
i_outLengths
,
i_outStrides
,
reduceDims
,
alpha
,
beta
,
in_dev
.
GetDeviceBuffer
(),
out_dev
.
GetDeviceBuffer
(),
nullptr
,
ws_dev
.
GetDeviceBuffer
(),
in_elementwise_op_1
,
acc_elementwise_op_1
);
if
(
!
reduce_ptr
->
IsSupportedArgument
(
argument_ptr
.
get
()))
continue
;
auto
invoker_ptr
=
reduce_ptr
->
MakeInvokerPointer
();
(
void
)
invoker_ptr
->
Run
(
argument_ptr
.
get
());
std
::
vector
<
int
>
inLengths2
=
reduce_ptr
->
GetWorkspace2dLengths
(
argument_ptr
.
get
());
std
::
vector
<
int
>
inStrides2
{
inLengths2
[
1
],
1
};
for
(
auto
&
reduce2_ptr
:
reduce2_ptrs
)
{
InElementwiseOperation_2
in_elementwise_op_2
(
static_cast
<
int32_t
>
(
reduce_total_length
));
AccElementwiseOperation_2
acc_elementwise_op_2
(
static_cast
<
int32_t
>
(
reduce_total_length
));
auto
argument2_ptr
=
reduce2_ptr
->
MakeArgumentPointer
(
inLengths2
,
inStrides2
,
i_outLengths
,
i_outStrides
,
reduceDims
,
alpha
,
beta
,
ws_dev
.
GetDeviceBuffer
(),
out_dev
.
GetDeviceBuffer
(),
nullptr
,
ws_dev
.
GetDeviceBuffer
(),
in_elementwise_op_2
,
acc_elementwise_op_2
);
if
(
!
reduce2_ptr
->
IsSupportedArgument
(
argument2_ptr
.
get
()))
continue
;
std
::
string
reduce2_name
=
reduce2_ptr
->
GetTypeString
();
auto
invoker2_ptr
=
reduce2_ptr
->
MakeInvokerPointer
();
(
void
)
invoker2_ptr
->
Run
(
argument2_ptr
.
get
());
out_dev
.
FromDevice
(
out
.
mData
.
data
());
bool
single_result
=
true
;
if
constexpr
(
std
::
is_same
<
OutDataType
,
ck
::
half_t
>::
value
||
std
::
is_same
<
OutDataType
,
ck
::
bhalf_t
>::
value
)
{
reduce_util
::
to_f32_vector
(
out
,
out_fp32
);
reduce_util
::
to_f32_vector
(
out_ref
,
out_ref_fp32
);
single_result
=
ck
::
utils
::
check_err
(
out_fp32
.
mData
,
out_ref_fp32
.
mData
,
"Error: incorrect data result!"
);
}
else
{
single_result
=
ck
::
utils
::
check_err
(
out
.
mData
,
out_ref
.
mData
,
"Error: incorrect data result!"
);
};
if
(
!
single_result
)
{
std
::
cout
<<
"Fail Info: "
<<
reduce_ptr
->
GetTypeString
()
<<
" => "
<<
reduce2_ptr
->
GetTypeString
()
<<
std
::
endl
;
result
=
false
;
}
};
};
return
(
result
);
};
}
// anonymous namespace
static
struct
option
long_options
[]
=
{{
"inLengths"
,
required_argument
,
nullptr
,
'D'
},
{
"reduceDimensions"
,
required_argument
,
nullptr
,
'R'
},
{
"scales"
,
required_argument
,
nullptr
,
'S'
},
{
"help"
,
no_argument
,
nullptr
,
'?'
},
{
nullptr
,
0
,
nullptr
,
0
}};
class
SimpleAppArgs
{
template
<
typename
T
>
static
T
getSingleValueFromString
(
const
std
::
string
&
valueStr
)
{
std
::
istringstream
iss
(
valueStr
);
T
ret
;
iss
>>
ret
;
return
(
ret
);
};
template
<
typename
T
>
static
std
::
vector
<
T
>
getTypeValuesFromString
(
const
char
*
cstr_values
)
{
std
::
string
valuesStr
(
cstr_values
);
std
::
vector
<
T
>
values
;
std
::
size_t
pos
=
0
;
std
::
size_t
new_pos
;
new_pos
=
valuesStr
.
find
(
','
,
pos
);
while
(
new_pos
!=
std
::
string
::
npos
)
{
const
std
::
string
sliceStr
=
valuesStr
.
substr
(
pos
,
new_pos
-
pos
);
T
val
=
getSingleValueFromString
<
T
>
(
sliceStr
);
values
.
push_back
(
val
);
pos
=
new_pos
+
1
;
new_pos
=
valuesStr
.
find
(
','
,
pos
);
};
std
::
string
sliceStr
=
valuesStr
.
substr
(
pos
);
T
val
=
getSingleValueFromString
<
T
>
(
sliceStr
);
values
.
push_back
(
val
);
return
(
values
);
};
private:
int
option_index
=
0
;
public:
std
::
vector
<
size_t
>
inLengths
;
std
::
vector
<
int
>
reduceDims
;
std
::
vector
<
float
>
scales
;
int
data_type
;
int
init_method
=
1
;
public:
void
show_usage
(
const
char
*
cmd
)
{
std
::
cout
<<
"Usage of "
<<
cmd
<<
std
::
endl
;
std
::
cout
<<
"--inLengths or -D, comma separated list of input tensor dimension lengths "
"(only 4-d tensor supported)"
<<
std
::
endl
;
std
::
cout
<<
"--reduceDimensions or -R comma seperated list of dimension indexes to reduce "
"(only 1 or 3 or 4 dimensions supported)"
<<
std
::
endl
;
std
::
cout
<<
"--scales or -S, comma separated two float values for alpha and beta"
<<
std
::
endl
;
std
::
cout
<<
"Arg1 -- data type (0: fp16, 1: fp32, 3: int8, 5: bp16, 6: fp64)"
<<
std
::
endl
;
std
::
cout
<<
"Arg2 -- init method(0=no init, 1=single integer value, 2=scope integer "
"value, 3=decimal value)"
<<
std
::
endl
;
};
int
processArgs
(
int
argc
,
char
*
argv
[])
{
unsigned
int
ch
;
while
(
1
)
{
ch
=
getopt_long
(
argc
,
argv
,
"D:R:S:"
,
long_options
,
&
option_index
);
if
(
ch
==
-
1
)
break
;
switch
(
ch
)
{
case
'D'
:
if
(
!
optarg
)
throw
std
::
runtime_error
(
"Invalid option format!"
);
inLengths
=
getTypeValuesFromString
<
size_t
>
(
optarg
);
break
;
case
'R'
:
if
(
!
optarg
)
throw
std
::
runtime_error
(
"Invalid option format!"
);
reduceDims
=
getTypeValuesFromString
<
int
>
(
optarg
);
break
;
case
'S'
:
if
(
!
optarg
)
throw
std
::
runtime_error
(
"Invalid option format!"
);
scales
=
getTypeValuesFromString
<
float
>
(
optarg
);
break
;
case
'?'
:
if
(
std
::
string
(
long_options
[
option_index
].
name
)
==
"help"
)
{
show_usage
(
argv
[
0
]);
return
(
-
1
);
};
break
;
default:
show_usage
(
argv
[
0
]);
return
(
-
1
);
};
};
if
(
optind
+
2
>
argc
)
throw
std
::
runtime_error
(
"Invalid cmd-line arguments, more argumetns are needed!"
);
data_type
=
std
::
atoi
(
argv
[
optind
++
]);
init_method
=
std
::
atoi
(
argv
[
optind
]);
if
(
scales
.
empty
())
{
scales
.
push_back
(
1.0
f
);
scales
.
push_back
(
0.0
f
);
};
if
(
inLengths
.
size
()
!=
4
||
(
reduceDims
.
size
()
!=
1
&&
reduceDims
.
size
()
!=
3
&&
reduceDims
.
size
()
!=
4
))
return
(
-
1
);
if
(
data_type
!=
0
&&
data_type
!=
1
&&
data_type
!=
3
&&
data_type
!=
5
)
return
(
-
1
);
return
(
0
);
};
};
bool
test_reduce_no_index
(
int
data_type
,
int
init_method
,
std
::
vector
<
int
>
reduceDims
,
std
::
vector
<
size_t
>
inLengths
,
float
alpha
,
float
beta
)
{
bool
result
=
true
;
if
(
data_type
==
0
)
{
switch
(
reduceDims
.
size
())
{
case
1
:
result
=
test_reduce_no_index_impl
<
float
,
float
,
float
,
Rank
,
1
>
(
init_method
,
inLengths
,
reduceDims
,
alpha
,
beta
);
break
;
case
3
:
result
=
test_reduce_no_index_impl
<
float
,
float
,
float
,
Rank
,
3
>
(
init_method
,
inLengths
,
reduceDims
,
alpha
,
beta
);
break
;
case
4
:
result
=
test_reduce_no_index_impl
<
float
,
float
,
float
,
Rank
,
4
>
(
init_method
,
inLengths
,
reduceDims
,
alpha
,
beta
);
break
;
};
}
else
if
(
data_type
==
1
)
{
switch
(
reduceDims
.
size
())
{
case
1
:
result
=
test_reduce_no_index_impl
<
ck
::
half_t
,
float
,
ck
::
half_t
,
Rank
,
1
>
(
init_method
,
inLengths
,
reduceDims
,
alpha
,
beta
);
break
;
case
3
:
result
=
test_reduce_no_index_impl
<
ck
::
half_t
,
float
,
ck
::
half_t
,
Rank
,
3
>
(
init_method
,
inLengths
,
reduceDims
,
alpha
,
beta
);
break
;
case
4
:
result
=
test_reduce_no_index_impl
<
ck
::
half_t
,
float
,
ck
::
half_t
,
Rank
,
4
>
(
init_method
,
inLengths
,
reduceDims
,
alpha
,
beta
);
break
;
};
}
else
if
(
data_type
==
3
)
{
switch
(
reduceDims
.
size
())
{
case
1
:
result
=
test_reduce_no_index_impl
<
int8_t
,
int32_t
,
int8_t
,
Rank
,
1
>
(
init_method
,
inLengths
,
reduceDims
,
alpha
,
beta
);
break
;
case
3
:
result
=
test_reduce_no_index_impl
<
int8_t
,
int32_t
,
int8_t
,
Rank
,
3
>
(
init_method
,
inLengths
,
reduceDims
,
alpha
,
beta
);
break
;
case
4
:
result
=
test_reduce_no_index_impl
<
int8_t
,
int32_t
,
int8_t
,
Rank
,
4
>
(
init_method
,
inLengths
,
reduceDims
,
alpha
,
beta
);
break
;
};
}
else
if
(
data_type
==
5
)
{
switch
(
reduceDims
.
size
())
{
case
1
:
result
=
test_reduce_no_index_impl
<
ck
::
bhalf_t
,
float
,
ck
::
bhalf_t
,
Rank
,
1
>
(
init_method
,
inLengths
,
reduceDims
,
alpha
,
beta
);
break
;
case
3
:
result
=
test_reduce_no_index_impl
<
ck
::
bhalf_t
,
float
,
ck
::
bhalf_t
,
Rank
,
3
>
(
init_method
,
inLengths
,
reduceDims
,
alpha
,
beta
);
break
;
case
4
:
result
=
test_reduce_no_index_impl
<
ck
::
bhalf_t
,
float
,
ck
::
bhalf_t
,
Rank
,
4
>
(
init_method
,
inLengths
,
reduceDims
,
alpha
,
beta
);
break
;
};
}
return
(
result
);
};
int
main
(
int
argc
,
char
*
argv
[])
{
SimpleAppArgs
args
;
bool
result
=
true
;
if
(
argc
==
1
)
{
int
data_type
=
1
;
int
init_method
=
2
;
std
::
vector
<
size_t
>
inLengths
{
64
,
4
,
280
,
80
};
std
::
vector
<
std
::
vector
<
int
>>
v_reduceDims
{
{
0
,
1
,
2
,
3
},
{
0
,
1
,
2
},
{
1
,
2
,
3
},
{
0
,
1
,
3
},
{
0
,
2
,
3
},
{
0
},
{
1
},
{
2
},
{
3
}};
for
(
auto
&
reduceDims
:
v_reduceDims
)
result
=
result
&&
test_reduce_no_index
(
data_type
,
init_method
,
reduceDims
,
inLengths
,
1.0
f
,
0.0
f
);
}
else
{
if
(
args
.
processArgs
(
argc
,
argv
)
<
0
)
{
throw
std
::
runtime_error
(
"Invalid input arguments, test_reduce_no_index could not be executed!"
);
};
result
=
test_reduce_no_index
(
args
.
data_type
,
args
.
init_method
,
args
.
reduceDims
,
args
.
inLengths
,
args
.
scales
[
0
],
args
.
scales
[
1
]);
}
std
::
cout
<<
"test_reduce_no_index ..... "
<<
(
result
?
"SUCCESS"
:
"FAILURE"
)
<<
std
::
endl
;
return
(
result
?
0
:
-
1
);
}
test/reduce/reduce_util.hpp
0 → 100644
View file @
dd6a8de4
#ifndef REDUCE_UTILS_HPP
#define REDUCE_UTILS_HPP
#include "data_type.hpp"
namespace
ck
{
namespace
reduce_util
{
template
<
typename
T
>
void
to_f32_vector
(
const
Tensor
<
T
>&
src
,
Tensor
<
float
>&
dst
)
{
for
(
int
i
=
0
;
i
<
src
.
mData
.
size
();
++
i
)
dst
.
mData
[
i
]
=
type_convert
<
float
>
(
src
.
mData
[
i
]);
}
}
// namespace reduce_util
}
// namespace ck
#endif
test/reduce/reduce_with_index.cpp
0 → 100644
View file @
dd6a8de4
#include "getopt.h"
#include "device_reduce_instance.hpp"
#include "reduction_enums.hpp"
#include "host_tensor.hpp"
#include "host_tensor_generator.hpp"
#include "host_reduction.hpp"
#include "check_err.hpp"
#include "reduce_util.hpp"
using
namespace
ck
;
namespace
{
template
<
index_t
Rank
,
index_t
NumReduceDim
>
static
inline
std
::
vector
<
int
>
get_invariant_dims
(
const
std
::
vector
<
int
>&
reduceDims
)
{
assert
(
NumReduceDim
==
reduceDims
.
size
());
int
reduceFlag
=
0
;
// flag the bits for the reduceDims
for
(
int
i
=
0
;
i
<
NumReduceDim
;
i
++
)
{
reduceFlag
|=
1
<<
reduceDims
[
i
];
};
std
::
vector
<
int
>
invariantDims
;
// collect invariant dimensions
for
(
int
i
=
0
;
i
<
Rank
;
i
++
)
if
((
reduceFlag
&
(
1
<<
i
))
==
0
)
{
invariantDims
.
push_back
(
i
);
};
return
invariantDims
;
};
// map the data type used by the GPU kernels to the corresponding type used by the host codes
template
<
typename
InType
>
struct
type_mapping
{
using
OutType
=
InType
;
};
template
<
>
struct
type_mapping
<
ck
::
half_t
>
{
using
OutType
=
half_float
::
half
;
};
constexpr
int
Rank
=
4
;
constexpr
ReduceTensorOp
ReduceOpId
=
ReduceTensorOp
::
AMAX
;
constexpr
NanPropagation
NanOpt
=
NanPropagation
::
PROPAGATE_NAN
;
constexpr
bool
PropagateNan
=
false
;
constexpr
ReduceTensorIndices
IndicesOpt
=
ReduceTensorIndices
::
FLATTENED_INDICES
;
constexpr
bool
NeedIndices
=
true
;
template
<
typename
InDataType
,
typename
AccDataType
,
typename
OutDataType
,
int
Rank
,
int
NumReduceDim
>
bool
test_reduce_with_index_impl
(
int
init_method
,
const
std
::
vector
<
size_t
>&
inLengths
,
const
std
::
vector
<
int
>&
reduceDims
,
float
alpha
,
float
beta
)
{
using
namespace
ck
::
tensor_operation
::
device
;
using
namespace
ck
::
tensor_operation
::
device
::
device_reduce_instance
;
using
namespace
ck
::
host_reduce
;
Tensor
<
InDataType
>
in
(
inLengths
);
std
::
vector
<
size_t
>
outLengths
;
const
auto
invariantDims
=
get_invariant_dims
<
Rank
,
NumReduceDim
>
(
reduceDims
);
if
(
reduceDims
.
size
()
==
Rank
)
outLengths
.
push_back
(
1
);
else
for
(
auto
dim
:
invariantDims
)
outLengths
.
push_back
(
inLengths
[
dim
]);
Tensor
<
OutDataType
>
out_ref
(
outLengths
);
Tensor
<
OutDataType
>
out
(
outLengths
);
Tensor
<
int32_t
>
out_indices_ref
(
outLengths
);
Tensor
<
int32_t
>
out_indices
(
outLengths
);
// only used when the OutDataType is bhalf_t
Tensor
<
float
>
out_ref_fp32
(
outLengths
);
Tensor
<
float
>
out_fp32
(
outLengths
);
auto
inStrides
=
in
.
mDesc
.
GetStrides
();
auto
outStrides
=
out
.
mDesc
.
GetStrides
();
size_t
invariant_total_length
=
out
.
mDesc
.
GetElementSize
();
size_t
reduce_total_length
=
in
.
mDesc
.
GetElementSize
()
/
invariant_total_length
;
std
::
size_t
num_thread
=
1
;
switch
(
init_method
)
{
case
0
:
break
;
case
1
:
in
.
GenerateTensorValue
(
GeneratorTensor_1
<
InDataType
>
{
1
},
num_thread
);
if
(
beta
!=
0.0
f
)
out_ref
.
GenerateTensorValue
(
GeneratorTensor_1
<
InDataType
>
{
1
},
num_thread
);
break
;
case
2
:
in
.
GenerateTensorValue
(
GeneratorTensor_2
<
InDataType
>
{
-
5
,
5
},
num_thread
);
if
(
beta
!=
0.0
f
)
out_ref
.
GenerateTensorValue
(
GeneratorTensor_2
<
InDataType
>
{
-
5
,
5
},
num_thread
);
break
;
default:
in
.
GenerateTensorValue
(
GeneratorTensor_3
<
InDataType
>
{
-
5.0
,
5.0
},
num_thread
);
if
(
beta
!=
0.0
f
)
out_ref
.
GenerateTensorValue
(
GeneratorTensor_3
<
InDataType
>
{
-
5.0
,
5.0
},
num_thread
);
}
if
(
beta
!=
0.0
f
)
for
(
size_t
i
=
0
;
i
<
out_ref
.
mDesc
.
GetElementSpace
();
i
++
)
out
.
mData
[
i
]
=
out_ref
.
mData
[
i
];
// these buffers are usually provided by the user application
DeviceMem
in_dev
(
sizeof
(
InDataType
)
*
in
.
mDesc
.
GetElementSpace
());
DeviceMem
out_dev
(
sizeof
(
OutDataType
)
*
out
.
mDesc
.
GetElementSpace
());
in_dev
.
ToDevice
(
in
.
mData
.
data
());
if
(
beta
!=
0.0
f
)
out_dev
.
ToDevice
(
out
.
mData
.
data
());
size_t
indicesSizeInBytes
=
NeedIndices
?
out
.
mDesc
.
GetElementSize
()
*
sizeof
(
int
)
:
0
;
DeviceMem
out_indices_dev
(
indicesSizeInBytes
);
using
InElementwiseOperation_0
=
typename
reduce_unary_operator
<
AccDataType
,
ReduceOpId
,
true
,
true
>::
InElementwiseOperation
;
using
AccElementwiseOperation_0
=
typename
reduce_unary_operator
<
AccDataType
,
ReduceOpId
,
true
,
true
>::
AccElementwiseOperation
;
using
InElementwiseOperation_1
=
typename
reduce_unary_operator
<
AccDataType
,
ReduceOpId
,
true
,
false
>::
InElementwiseOperation
;
using
AccElementwiseOperation_1
=
typename
reduce_unary_operator
<
AccDataType
,
ReduceOpId
,
true
,
false
>::
AccElementwiseOperation
;
using
InElementwiseOperation_2
=
typename
reduce_unary_operator
<
AccDataType
,
ReduceOpId
,
false
,
true
>::
InElementwiseOperation
;
using
AccElementwiseOperation_2
=
typename
reduce_unary_operator
<
AccDataType
,
ReduceOpId
,
false
,
true
>::
AccElementwiseOperation
;
using
DeviceReduceInstPtr0
=
DeviceReducePtr
<
InElementwiseOperation_0
,
AccElementwiseOperation_0
>
;
using
DeviceReduceInstPtr1
=
DeviceReducePtr
<
InElementwiseOperation_1
,
AccElementwiseOperation_1
>
;
using
DeviceReduceInstPtr2
=
DeviceReducePtr
<
InElementwiseOperation_2
,
AccElementwiseOperation_2
>
;
std
::
vector
<
DeviceReduceInstPtr0
>
reduce0_ptrs
;
std
::
vector
<
DeviceReduceInstPtr1
>
reduce1_ptrs
;
std
::
vector
<
DeviceReduceInstPtr2
>
reduce2_ptrs
;
add_device_reduce_instance_threadwise
<
InDataType
,
AccDataType
,
OutDataType
,
Rank
,
NumReduceDim
,
ReduceOpId
,
NanOpt
,
IndicesOpt
>
(
reduce0_ptrs
);
add_device_reduce_instance_blockwise
<
InDataType
,
AccDataType
,
OutDataType
,
Rank
,
NumReduceDim
,
ReduceOpId
,
NanOpt
,
IndicesOpt
>
(
reduce0_ptrs
);
add_device_reduce_instance_multiblock_partial_reduce
<
InDataType
,
AccDataType
,
OutDataType
,
Rank
,
NumReduceDim
,
ReduceOpId
,
NanOpt
,
IndicesOpt
>
(
reduce1_ptrs
);
add_device_reduce_instance_blockwise_second_call
<
AccDataType
,
AccDataType
,
OutDataType
,
Rank
,
NumReduceDim
,
ReduceOpId
,
NanOpt
,
IndicesOpt
>
(
reduce2_ptrs
);
if
(
reduce0_ptrs
.
empty
()
&&
reduce1_ptrs
.
empty
())
{
throw
std
::
runtime_error
(
"Wrong! No device REDUCE instance found"
);
};
bool
result
=
true
;
using
HostInDataType
=
typename
type_mapping
<
InDataType
>::
OutType
;
using
HostOutDataType
=
typename
type_mapping
<
OutDataType
>::
OutType
;
using
HostAccDataType
=
typename
type_mapping
<
AccDataType
>::
OutType
;
ReductionHost
<
HostInDataType
,
HostAccDataType
,
HostOutDataType
,
ReduceOpId
,
Rank
,
NumReduceDim
,
PropagateNan
,
NeedIndices
>
hostReduce
(
in
.
mDesc
,
out_ref
.
mDesc
,
invariantDims
,
reduceDims
);
hostReduce
.
Run
(
alpha
,
reinterpret_cast
<
const
HostInDataType
*>
(
in
.
mData
.
data
()),
beta
,
reinterpret_cast
<
HostOutDataType
*>
(
out_ref
.
mData
.
data
()),
out_indices_ref
.
mData
.
data
());
const
auto
i_inLengths
=
to_int_vector
(
inLengths
);
const
auto
i_inStrides
=
to_int_vector
(
inStrides
);
const
auto
i_outLengths
=
to_int_vector
(
outLengths
);
const
auto
i_outStrides
=
to_int_vector
(
outStrides
);
for
(
auto
&
reduce_ptr
:
reduce0_ptrs
)
{
auto
wsSizeInBytes
=
reduce_ptr
->
GetWorkspaceSizeInBytes
(
i_inLengths
,
reduceDims
);
DeviceMem
ws_dev
(
wsSizeInBytes
);
InElementwiseOperation_0
in_elementwise_op_0
(
static_cast
<
int32_t
>
(
reduce_total_length
));
AccElementwiseOperation_0
acc_elementwise_op_0
(
static_cast
<
int32_t
>
(
reduce_total_length
));
auto
argument_ptr
=
reduce_ptr
->
MakeArgumentPointer
(
i_inLengths
,
i_inStrides
,
i_outLengths
,
i_outStrides
,
reduceDims
,
alpha
,
beta
,
in_dev
.
GetDeviceBuffer
(),
out_dev
.
GetDeviceBuffer
(),
out_indices_dev
.
GetDeviceBuffer
(),
ws_dev
.
GetDeviceBuffer
(),
in_elementwise_op_0
,
acc_elementwise_op_0
);
if
(
!
reduce_ptr
->
IsSupportedArgument
(
argument_ptr
.
get
()))
continue
;
auto
invoker_ptr
=
reduce_ptr
->
MakeInvokerPointer
();
(
void
)
invoker_ptr
->
Run
(
argument_ptr
.
get
());
out_dev
.
FromDevice
(
out
.
mData
.
data
());
bool
single_result
=
true
;
if
constexpr
(
std
::
is_same
<
OutDataType
,
ck
::
half_t
>::
value
||
std
::
is_same
<
OutDataType
,
ck
::
bhalf_t
>::
value
)
{
reduce_util
::
to_f32_vector
(
out
,
out_fp32
);
reduce_util
::
to_f32_vector
(
out_ref
,
out_ref_fp32
);
single_result
=
ck
::
utils
::
check_err
(
out_fp32
.
mData
,
out_ref_fp32
.
mData
,
"Error: incorrect data result!"
);
}
else
{
single_result
=
ck
::
utils
::
check_err
(
out
.
mData
,
out_ref
.
mData
,
"Error: incorrect data result!"
);
};
if
(
NeedIndices
)
{
out_indices_dev
.
FromDevice
(
out_indices
.
mData
.
data
());
single_result
=
single_result
&&
ck
::
utils
::
check_err
(
out_indices_ref
.
mData
,
out_indices
.
mData
,
"Error: incorrect index result!"
);
};
if
(
!
single_result
)
{
std
::
cout
<<
"Fail Info: "
<<
reduce_ptr
->
GetTypeString
()
<<
std
::
endl
;
result
=
false
;
}
};
for
(
auto
&
reduce_ptr
:
reduce1_ptrs
)
{
auto
wsSizeInBytes
=
reduce_ptr
->
GetWorkspaceSizeInBytes
(
i_inLengths
,
reduceDims
);
DeviceMem
ws_dev
(
wsSizeInBytes
);
InElementwiseOperation_1
in_elementwise_op_1
(
static_cast
<
int32_t
>
(
reduce_total_length
));
AccElementwiseOperation_1
acc_elementwise_op_1
(
static_cast
<
int32_t
>
(
reduce_total_length
));
auto
argument_ptr
=
reduce_ptr
->
MakeArgumentPointer
(
i_inLengths
,
i_inStrides
,
i_outLengths
,
i_outStrides
,
reduceDims
,
alpha
,
beta
,
in_dev
.
GetDeviceBuffer
(),
out_dev
.
GetDeviceBuffer
(),
out_indices_dev
.
GetDeviceBuffer
(),
ws_dev
.
GetDeviceBuffer
(),
in_elementwise_op_1
,
acc_elementwise_op_1
);
if
(
!
reduce_ptr
->
IsSupportedArgument
(
argument_ptr
.
get
()))
continue
;
std
::
string
reduce_name
=
reduce_ptr
->
GetTypeString
();
auto
invoker_ptr
=
reduce_ptr
->
MakeInvokerPointer
();
(
void
)
invoker_ptr
->
Run
(
argument_ptr
.
get
());
std
::
vector
<
int
>
inLengths2
=
reduce_ptr
->
GetWorkspace2dLengths
(
argument_ptr
.
get
());
std
::
vector
<
int
>
inStrides2
{
inLengths2
[
1
],
1
};
for
(
auto
&
reduce2_ptr
:
reduce2_ptrs
)
{
InElementwiseOperation_2
in_elementwise_op_2
(
static_cast
<
int32_t
>
(
reduce_total_length
));
AccElementwiseOperation_2
acc_elementwise_op_2
(
static_cast
<
int32_t
>
(
reduce_total_length
));
auto
argument2_ptr
=
reduce2_ptr
->
MakeArgumentPointer
(
inLengths2
,
inStrides2
,
i_outLengths
,
i_outStrides
,
reduceDims
,
alpha
,
beta
,
ws_dev
.
GetDeviceBuffer
(),
out_dev
.
GetDeviceBuffer
(),
out_indices_dev
.
GetDeviceBuffer
(),
ws_dev
.
GetDeviceBuffer
(),
in_elementwise_op_2
,
acc_elementwise_op_2
);
if
(
!
reduce2_ptr
->
IsSupportedArgument
(
argument2_ptr
.
get
()))
continue
;
std
::
string
reduce2_name
=
reduce2_ptr
->
GetTypeString
();
auto
invoker2_ptr
=
reduce2_ptr
->
MakeInvokerPointer
();
(
void
)
invoker2_ptr
->
Run
(
argument2_ptr
.
get
());
out_dev
.
FromDevice
(
out
.
mData
.
data
());
bool
single_result
=
true
;
if
constexpr
(
std
::
is_same
<
OutDataType
,
ck
::
half_t
>::
value
||
std
::
is_same
<
OutDataType
,
ck
::
bhalf_t
>::
value
)
{
reduce_util
::
to_f32_vector
(
out
,
out_fp32
);
reduce_util
::
to_f32_vector
(
out_ref
,
out_ref_fp32
);
single_result
=
ck
::
utils
::
check_err
(
out_fp32
.
mData
,
out_ref_fp32
.
mData
,
"Error: incorrect data result!"
);
}
else
{
single_result
=
ck
::
utils
::
check_err
(
out
.
mData
,
out_ref
.
mData
,
"Error: incorrect data result!"
);
};
if
(
NeedIndices
)
{
out_indices_dev
.
FromDevice
(
out_indices
.
mData
.
data
());
single_result
=
single_result
&&
ck
::
utils
::
check_err
(
out_indices_ref
.
mData
,
out_indices
.
mData
,
"Error: incorrect index result!"
);
};
if
(
!
single_result
)
{
std
::
cout
<<
"Fail Info: "
<<
reduce_ptr
->
GetTypeString
()
<<
" => "
<<
reduce2_ptr
->
GetTypeString
()
<<
std
::
endl
;
result
=
false
;
}
};
};
return
(
result
);
};
}
// anonymous namespace
static
struct
option
long_options
[]
=
{{
"inLengths"
,
required_argument
,
nullptr
,
'D'
},
{
"reduceDimensions"
,
required_argument
,
nullptr
,
'R'
},
{
"scales"
,
required_argument
,
nullptr
,
'S'
},
{
"help"
,
no_argument
,
nullptr
,
'?'
},
{
nullptr
,
0
,
nullptr
,
0
}};
class
SimpleAppArgs
{
template
<
typename
T
>
static
T
getSingleValueFromString
(
const
std
::
string
&
valueStr
)
{
std
::
istringstream
iss
(
valueStr
);
T
ret
;
iss
>>
ret
;
return
(
ret
);
};
template
<
typename
T
>
static
std
::
vector
<
T
>
getTypeValuesFromString
(
const
char
*
cstr_values
)
{
std
::
string
valuesStr
(
cstr_values
);
std
::
vector
<
T
>
values
;
std
::
size_t
pos
=
0
;
std
::
size_t
new_pos
;
new_pos
=
valuesStr
.
find
(
','
,
pos
);
while
(
new_pos
!=
std
::
string
::
npos
)
{
const
std
::
string
sliceStr
=
valuesStr
.
substr
(
pos
,
new_pos
-
pos
);
T
val
=
getSingleValueFromString
<
T
>
(
sliceStr
);
values
.
push_back
(
val
);
pos
=
new_pos
+
1
;
new_pos
=
valuesStr
.
find
(
','
,
pos
);
};
std
::
string
sliceStr
=
valuesStr
.
substr
(
pos
);
T
val
=
getSingleValueFromString
<
T
>
(
sliceStr
);
values
.
push_back
(
val
);
return
(
values
);
};
private:
int
option_index
=
0
;
public:
std
::
vector
<
size_t
>
inLengths
;
std
::
vector
<
int
>
reduceDims
;
std
::
vector
<
float
>
scales
;
int
data_type
;
int
init_method
=
1
;
public:
void
show_usage
(
const
char
*
cmd
)
{
std
::
cout
<<
"Usage of "
<<
cmd
<<
std
::
endl
;
std
::
cout
<<
"--inLengths or -D, comma separated list of input tensor dimension lengths "
"(only 4-d tensor supported)"
<<
std
::
endl
;
std
::
cout
<<
"--reduceDimensions or -R comma seperated list of dimension indexes to reduce "
"(only 1 or 3 or 4 dimensions supported)"
<<
std
::
endl
;
std
::
cout
<<
"--scales or -S, comma separated two float values for alpha and beta"
<<
std
::
endl
;
std
::
cout
<<
"Arg1 -- data type (1: fp32, 3: int8, 5: bp16, 6: fp64)"
<<
std
::
endl
;
std
::
cout
<<
"Arg2 -- init method(0=no init, 1=single integer value, 2=scope integer "
"value, 3=decimal value)"
<<
std
::
endl
;
};
int
processArgs
(
int
argc
,
char
*
argv
[])
{
unsigned
int
ch
;
while
(
1
)
{
ch
=
getopt_long
(
argc
,
argv
,
"D:R:S:"
,
long_options
,
&
option_index
);
if
(
ch
==
-
1
)
break
;
switch
(
ch
)
{
case
'D'
:
if
(
!
optarg
)
throw
std
::
runtime_error
(
"Invalid option format!"
);
inLengths
=
getTypeValuesFromString
<
size_t
>
(
optarg
);
break
;
case
'R'
:
if
(
!
optarg
)
throw
std
::
runtime_error
(
"Invalid option format!"
);
reduceDims
=
getTypeValuesFromString
<
int
>
(
optarg
);
break
;
case
'S'
:
if
(
!
optarg
)
throw
std
::
runtime_error
(
"Invalid option format!"
);
scales
=
getTypeValuesFromString
<
float
>
(
optarg
);
break
;
case
'?'
:
if
(
std
::
string
(
long_options
[
option_index
].
name
)
==
"help"
)
{
show_usage
(
argv
[
0
]);
return
(
-
1
);
};
break
;
default:
show_usage
(
argv
[
0
]);
return
(
-
1
);
};
};
if
(
optind
+
2
>
argc
)
throw
std
::
runtime_error
(
"Invalid cmd-line arguments, more argumetns are needed!"
);
data_type
=
std
::
atoi
(
argv
[
optind
++
]);
init_method
=
std
::
atoi
(
argv
[
optind
]);
if
(
scales
.
empty
())
{
scales
.
push_back
(
1.0
f
);
scales
.
push_back
(
0.0
f
);
};
if
(
inLengths
.
size
()
!=
4
||
(
reduceDims
.
size
()
!=
1
&&
reduceDims
.
size
()
!=
3
&&
reduceDims
.
size
()
!=
4
))
return
(
-
1
);
if
(
data_type
!=
0
&&
data_type
!=
1
&&
data_type
!=
3
&&
data_type
!=
5
)
return
(
-
1
);
return
(
0
);
};
};
bool
test_reduce_with_index
(
int
data_type
,
int
init_method
,
std
::
vector
<
int
>
reduceDims
,
std
::
vector
<
size_t
>
inLengths
,
float
alpha
,
float
beta
)
{
bool
result
=
true
;
if
(
data_type
==
0
)
{
switch
(
reduceDims
.
size
())
{
case
1
:
result
=
test_reduce_with_index_impl
<
float
,
float
,
float
,
Rank
,
1
>
(
init_method
,
inLengths
,
reduceDims
,
alpha
,
beta
);
break
;
case
3
:
result
=
test_reduce_with_index_impl
<
float
,
float
,
float
,
Rank
,
3
>
(
init_method
,
inLengths
,
reduceDims
,
alpha
,
beta
);
break
;
case
4
:
result
=
test_reduce_with_index_impl
<
float
,
float
,
float
,
Rank
,
4
>
(
init_method
,
inLengths
,
reduceDims
,
alpha
,
beta
);
break
;
};
}
else
if
(
data_type
==
1
)
{
switch
(
reduceDims
.
size
())
{
case
1
:
result
=
test_reduce_with_index_impl
<
ck
::
half_t
,
ck
::
half_t
,
ck
::
half_t
,
Rank
,
1
>
(
init_method
,
inLengths
,
reduceDims
,
alpha
,
beta
);
break
;
case
3
:
result
=
test_reduce_with_index_impl
<
ck
::
half_t
,
ck
::
half_t
,
ck
::
half_t
,
Rank
,
3
>
(
init_method
,
inLengths
,
reduceDims
,
alpha
,
beta
);
break
;
case
4
:
result
=
test_reduce_with_index_impl
<
ck
::
half_t
,
ck
::
half_t
,
ck
::
half_t
,
Rank
,
4
>
(
init_method
,
inLengths
,
reduceDims
,
alpha
,
beta
);
break
;
};
}
else
if
(
data_type
==
3
)
{
switch
(
reduceDims
.
size
())
{
case
1
:
result
=
test_reduce_with_index_impl
<
int8_t
,
int8_t
,
int8_t
,
Rank
,
1
>
(
init_method
,
inLengths
,
reduceDims
,
alpha
,
beta
);
break
;
case
3
:
result
=
test_reduce_with_index_impl
<
int8_t
,
int8_t
,
int8_t
,
Rank
,
3
>
(
init_method
,
inLengths
,
reduceDims
,
alpha
,
beta
);
break
;
case
4
:
result
=
test_reduce_with_index_impl
<
int8_t
,
int8_t
,
int8_t
,
Rank
,
4
>
(
init_method
,
inLengths
,
reduceDims
,
alpha
,
beta
);
break
;
};
}
else
if
(
data_type
==
5
)
{
switch
(
reduceDims
.
size
())
{
case
1
:
result
=
test_reduce_with_index_impl
<
ck
::
bhalf_t
,
float
,
ck
::
bhalf_t
,
Rank
,
1
>
(
init_method
,
inLengths
,
reduceDims
,
alpha
,
beta
);
break
;
case
3
:
result
=
test_reduce_with_index_impl
<
ck
::
bhalf_t
,
float
,
ck
::
bhalf_t
,
Rank
,
3
>
(
init_method
,
inLengths
,
reduceDims
,
alpha
,
beta
);
break
;
case
4
:
result
=
test_reduce_with_index_impl
<
ck
::
bhalf_t
,
float
,
ck
::
bhalf_t
,
Rank
,
4
>
(
init_method
,
inLengths
,
reduceDims
,
alpha
,
beta
);
break
;
};
}
return
(
result
);
};
int
main
(
int
argc
,
char
*
argv
[])
{
SimpleAppArgs
args
;
bool
result
=
true
;
if
(
argc
==
1
)
{
int
data_type
=
1
;
int
init_method
=
2
;
std
::
vector
<
size_t
>
inLengths
{
64
,
4
,
280
,
80
};
std
::
vector
<
std
::
vector
<
int
>>
v_reduceDims
{
{
0
,
1
,
2
,
3
},
{
0
,
1
,
2
},
{
1
,
2
,
3
},
{
0
,
1
,
3
},
{
0
,
2
,
3
},
{
0
},
{
1
},
{
2
},
{
3
}};
for
(
auto
&
reduceDims
:
v_reduceDims
)
result
=
result
&&
test_reduce_with_index
(
data_type
,
init_method
,
reduceDims
,
inLengths
,
1.0
f
,
0.0
f
);
}
else
{
if
(
args
.
processArgs
(
argc
,
argv
)
<
0
)
{
throw
std
::
runtime_error
(
"Invalid input arguments, test_reduce_with_index could not be executed!"
);
};
result
=
test_reduce_with_index
(
args
.
data_type
,
args
.
init_method
,
args
.
reduceDims
,
args
.
inLengths
,
args
.
scales
[
0
],
args
.
scales
[
1
]);
}
std
::
cout
<<
"test_reduce_with_index ..... "
<<
(
result
?
"SUCCESS"
:
"FAILURE"
)
<<
std
::
endl
;
return
(
result
?
0
:
-
1
);
}
test/reference_conv_fwd/reference_conv_fwd.cpp
View file @
dd6a8de4
...
@@ -6,13 +6,13 @@
...
@@ -6,13 +6,13 @@
#include <type_traits>
#include <type_traits>
#include <vector>
#include <vector>
#include "check_err.hpp"
#include "config.hpp"
#include "config.hpp"
#include "conv_util
s
.hpp"
#include "conv_
fwd_
util.hpp"
#include "element_wise_operation.hpp"
#include "element_wise_operation.hpp"
#include "host_tensor.hpp"
#include "host_tensor.hpp"
#include "reference_conv_fwd.hpp"
#include "reference_conv_fwd.hpp"
#include "tensor_layout.hpp"
#include "tensor_layout.hpp"
#include "test_util.hpp"
namespace
{
namespace
{
using
InElementOp
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
using
InElementOp
=
ck
::
tensor_operation
::
element_wise
::
PassThrough
;
...
@@ -23,11 +23,16 @@ template <typename T>
...
@@ -23,11 +23,16 @@ template <typename T>
struct
FillMonotonicSeq
struct
FillMonotonicSeq
{
{
T
m_init_value
{
0
};
T
m_init_value
{
0
};
T
m_step
{
1
};
template
<
typename
ForwardIter
>
template
<
typename
ForwardIter
>
void
operator
()(
ForwardIter
first
,
ForwardIter
last
)
const
void
operator
()(
ForwardIter
first
,
ForwardIter
last
)
const
{
{
std
::
iota
(
first
,
last
,
m_init_value
);
std
::
generate
(
first
,
last
,
[
=
,
n
=
m_init_value
]()
mutable
{
auto
tmp
=
n
;
n
+=
m_step
;
return
tmp
;
});
}
}
};
};
...
@@ -52,9 +57,10 @@ template <ck::index_t NDim,
...
@@ -52,9 +57,10 @@ template <ck::index_t NDim,
typename
OutLayout
=
ck
::
tensor_layout
::
convolution
::
NHWK
,
typename
OutLayout
=
ck
::
tensor_layout
::
convolution
::
NHWK
,
typename
FillInputOp
=
FillMonotonicSeq
<
InDataType
>,
typename
FillInputOp
=
FillMonotonicSeq
<
InDataType
>,
typename
FillWeightsOp
=
FillConstant
<
WeiDataType
>>
typename
FillWeightsOp
=
FillConstant
<
WeiDataType
>>
Tensor
<
OutDataType
>
RunReferenceConv
(
const
ck
::
conv_util
::
ConvParams
&
params
,
Tensor
<
OutDataType
>
const
FillInputOp
&
fill_input_op
=
FillInputOp
{
0
},
run_reference_convolution_forward
(
const
ck
::
utils
::
conv
::
ConvParams
&
params
,
const
FillWeightsOp
&
fill_weights_op
=
FillWeightsOp
{
0.5
f
})
const
FillInputOp
&
fill_input_op
=
FillInputOp
{},
const
FillWeightsOp
&
fill_weights_op
=
FillWeightsOp
{
0.5
f
})
{
{
std
::
vector
<
std
::
size_t
>
input_dims
{
static_cast
<
std
::
size_t
>
(
params
.
N
),
std
::
vector
<
std
::
size_t
>
input_dims
{
static_cast
<
std
::
size_t
>
(
params
.
N
),
static_cast
<
std
::
size_t
>
(
params
.
C
)};
static_cast
<
std
::
size_t
>
(
params
.
C
)};
...
@@ -75,10 +81,11 @@ Tensor<OutDataType> RunReferenceConv(const ck::conv_util::ConvParams& params,
...
@@ -75,10 +81,11 @@ Tensor<OutDataType> RunReferenceConv(const ck::conv_util::ConvParams& params,
std
::
begin
(
output_spatial_lengths
),
std
::
begin
(
output_spatial_lengths
),
std
::
end
(
output_spatial_lengths
));
std
::
end
(
output_spatial_lengths
));
Tensor
<
InDataType
>
input
(
ck
::
conv_util
::
GetHostTensorDescriptor
(
input_dims
,
InLayout
{}));
Tensor
<
InDataType
>
input
(
ck
::
utils
::
conv
::
get_host_tensor_descriptor
(
input_dims
,
InLayout
{}));
Tensor
<
WeiDataType
>
weights
(
ck
::
conv_util
::
GetHostTensorDescriptor
(
filter_dims
,
WeiLayout
{}));
Tensor
<
WeiDataType
>
weights
(
ck
::
utils
::
conv
::
get_host_tensor_descriptor
(
filter_dims
,
WeiLayout
{}));
Tensor
<
OutDataType
>
host_output
(
Tensor
<
OutDataType
>
host_output
(
ck
::
conv_util
::
G
et
H
ost
T
ensor
D
escriptor
(
output_dims
,
OutLayout
{}));
ck
::
utils
::
conv
::
g
et
_h
ost
_t
ensor
_d
escriptor
(
output_dims
,
OutLayout
{}));
fill_input_op
(
input
.
begin
(),
input
.
end
());
fill_input_op
(
input
.
begin
(),
input
.
end
());
fill_weights_op
(
weights
.
begin
(),
weights
.
end
());
fill_weights_op
(
weights
.
begin
(),
weights
.
end
());
...
@@ -104,13 +111,14 @@ Tensor<OutDataType> RunReferenceConv(const ck::conv_util::ConvParams& params,
...
@@ -104,13 +111,14 @@ Tensor<OutDataType> RunReferenceConv(const ck::conv_util::ConvParams& params,
OutElementOp
{});
OutElementOp
{});
ref_invoker
.
Run
(
ref_argument
);
ref_invoker
.
Run
(
ref_argument
);
// std::cout <<"output: " << host_output.mDesc << std::endl << host_output.mData << std::endl;
return
host_output
;
return
host_output
;
}
}
bool
T
est
C
onv2
DNHWC
()
bool
t
est
_c
onv2
d_nhwc
()
{
{
bool
res
{
true
};
bool
res
{
true
};
ck
::
conv_util
::
ConvParams
params
;
ck
::
utils
::
conv
::
ConvParams
params
;
params
.
N
=
1
;
params
.
N
=
1
;
params
.
K
=
1
;
params
.
K
=
1
;
params
.
C
=
2
;
params
.
C
=
2
;
...
@@ -121,7 +129,7 @@ bool TestConv2DNHWC()
...
@@ -121,7 +129,7 @@ bool TestConv2DNHWC()
params
.
input_left_pads
=
std
::
vector
<
ck
::
index_t
>
{
0
,
0
};
params
.
input_left_pads
=
std
::
vector
<
ck
::
index_t
>
{
0
,
0
};
params
.
input_right_pads
=
std
::
vector
<
ck
::
index_t
>
{
0
,
0
};
params
.
input_right_pads
=
std
::
vector
<
ck
::
index_t
>
{
0
,
0
};
auto
out_tensor
=
R
un
R
eference
C
onv
<
2
>
(
params
);
auto
out_tensor
=
r
un
_r
eference
_c
onv
olution_forward
<
2
>
(
params
);
std
::
vector
<
std
::
size_t
>
ref_dims
{
1
,
1
,
4
,
4
};
std
::
vector
<
std
::
size_t
>
ref_dims
{
1
,
1
,
4
,
4
};
std
::
vector
<
float
>
ref_data
{
130.5
,
std
::
vector
<
float
>
ref_data
{
130.5
,
148.5
,
148.5
,
...
@@ -139,10 +147,10 @@ bool TestConv2DNHWC()
...
@@ -139,10 +147,10 @@ bool TestConv2DNHWC()
472.5
,
472.5
,
490.5
,
490.5
,
508.5
};
508.5
};
res
=
res
&&
test_
util
::
check_err
(
out_tensor
.
mDesc
.
GetLengths
(),
res
=
res
&&
ck
::
util
s
::
check_err
(
out_tensor
.
mDesc
.
GetLengths
(),
ref_dims
,
ref_dims
,
"Error: wrong output tensor dimensions!"
);
"Error: wrong output tensor dimensions!"
);
res
=
res
&&
test_
util
::
check_err
(
out_tensor
.
mData
,
ref_data
,
"Error: incorrect results!"
);
res
=
res
&&
ck
::
util
s
::
check_err
(
out_tensor
.
mData
,
ref_data
,
"Error: incorrect results!"
);
params
.
N
=
1
;
params
.
N
=
1
;
params
.
K
=
2
;
params
.
K
=
2
;
...
@@ -154,7 +162,7 @@ bool TestConv2DNHWC()
...
@@ -154,7 +162,7 @@ bool TestConv2DNHWC()
params
.
input_left_pads
=
std
::
vector
<
ck
::
index_t
>
{
1
,
1
};
params
.
input_left_pads
=
std
::
vector
<
ck
::
index_t
>
{
1
,
1
};
params
.
input_right_pads
=
std
::
vector
<
ck
::
index_t
>
{
1
,
1
};
params
.
input_right_pads
=
std
::
vector
<
ck
::
index_t
>
{
1
,
1
};
out_tensor
=
R
un
R
eference
C
onv
<
2
>
(
params
);
out_tensor
=
r
un
_r
eference
_c
onv
olution_forward
<
2
>
(
params
);
ref_dims
=
std
::
vector
<
std
::
size_t
>
{
1
,
2
,
5
,
5
};
ref_dims
=
std
::
vector
<
std
::
size_t
>
{
1
,
2
,
5
,
5
};
ref_data
=
std
::
vector
<
float
>
{
ref_data
=
std
::
vector
<
float
>
{
210.
,
210.
,
327.
,
327.
,
351.
,
351.
,
375.
,
375.
,
399.
,
399.
,
210.
,
210.
,
327.
,
327.
,
351.
,
351.
,
375.
,
375.
,
399.
,
399.
,
...
@@ -162,18 +170,18 @@ bool TestConv2DNHWC()
...
@@ -162,18 +170,18 @@ bool TestConv2DNHWC()
747.
,
747.
,
1138.5
,
1138.5
,
1174.5
,
1174.5
,
1210.5
,
1210.5
,
1246.5
,
1246.5
,
747.
,
747.
,
1138.5
,
1138.5
,
1174.5
,
1174.5
,
1210.5
,
1210.5
,
1246.5
,
1246.5
,
1035.
,
1035.
,
1570.5
,
1570.5
,
1606.5
,
1606.5
,
1642.5
,
1642.5
,
1678.5
,
1678.5
,
1035.
,
1035.
,
1570.5
,
1570.5
,
1606.5
,
1606.5
,
1642.5
,
1642.5
,
1678.5
,
1678.5
,
1323.
,
1323.
,
2002.5
,
2002.5
,
2038.5
,
2038.5
,
2074.5
,
2074.5
,
2110.5
,
2110.5
};
1323.
,
1323.
,
2002.5
,
2002.5
,
2038.5
,
2038.5
,
2074.5
,
2074.5
,
2110.5
,
2110.5
};
res
=
res
&&
test_
util
::
check_err
(
out_tensor
.
mDesc
.
GetLengths
(),
res
=
res
&&
ck
::
util
s
::
check_err
(
out_tensor
.
mDesc
.
GetLengths
(),
ref_dims
,
ref_dims
,
"Error: wrong output tensor dimensions!"
);
"Error: wrong output tensor dimensions!"
);
res
=
res
&&
test_
util
::
check_err
(
out_tensor
.
mData
,
ref_data
,
"Error: incorrect results!"
);
res
=
res
&&
ck
::
util
s
::
check_err
(
out_tensor
.
mData
,
ref_data
,
"Error: incorrect results!"
);
return
res
;
return
res
;
}
}
bool
T
est
C
onv1
DNWC
()
bool
t
est
_c
onv1
d_nwc
()
{
{
bool
res
{
true
};
bool
res
{
true
};
ck
::
conv_util
::
ConvParams
params
;
ck
::
utils
::
conv
::
ConvParams
params
;
params
.
num_dim_spatial
=
1
;
params
.
num_dim_spatial
=
1
;
params
.
N
=
1
;
params
.
N
=
1
;
params
.
K
=
1
;
params
.
K
=
1
;
...
@@ -185,19 +193,20 @@ bool TestConv1DNWC()
...
@@ -185,19 +193,20 @@ bool TestConv1DNWC()
params
.
input_left_pads
=
std
::
vector
<
ck
::
index_t
>
{
0
};
params
.
input_left_pads
=
std
::
vector
<
ck
::
index_t
>
{
0
};
params
.
input_right_pads
=
std
::
vector
<
ck
::
index_t
>
{
0
};
params
.
input_right_pads
=
std
::
vector
<
ck
::
index_t
>
{
0
};
auto
out_tensor
=
RunReferenceConv
<
1
,
auto
out_tensor
=
float
,
run_reference_convolution_forward
<
1
,
float
,
float
,
float
,
float
,
ck
::
tensor_layout
::
convolution
::
NWC
,
float
,
ck
::
tensor_layout
::
convolution
::
KXC
,
ck
::
tensor_layout
::
convolution
::
NWC
,
ck
::
tensor_layout
::
convolution
::
NWK
>
(
params
);
ck
::
tensor_layout
::
convolution
::
KXC
,
ck
::
tensor_layout
::
convolution
::
NWK
>
(
params
);
std
::
vector
<
std
::
size_t
>
ref_dims
{
1
,
1
,
4
};
std
::
vector
<
std
::
size_t
>
ref_dims
{
1
,
1
,
4
};
std
::
vector
<
float
>
ref_data
{
7.5
,
13.5
,
19.5
,
25.5
};
std
::
vector
<
float
>
ref_data
{
7.5
,
13.5
,
19.5
,
25.5
};
res
=
res
&&
test_
util
::
check_err
(
out_tensor
.
mDesc
.
GetLengths
(),
res
=
res
&&
ck
::
util
s
::
check_err
(
out_tensor
.
mDesc
.
GetLengths
(),
ref_dims
,
ref_dims
,
"Error: wrong output tensor dimensions!"
);
"Error: wrong output tensor dimensions!"
);
res
=
res
&&
test_
util
::
check_err
(
out_tensor
.
mData
,
ref_data
,
"Error: incorrect results!"
);
res
=
res
&&
ck
::
util
s
::
check_err
(
out_tensor
.
mData
,
ref_data
,
"Error: incorrect results!"
);
params
.
num_dim_spatial
=
1
;
params
.
num_dim_spatial
=
1
;
params
.
N
=
1
;
params
.
N
=
1
;
...
@@ -210,19 +219,19 @@ bool TestConv1DNWC()
...
@@ -210,19 +219,19 @@ bool TestConv1DNWC()
params
.
input_left_pads
=
std
::
vector
<
ck
::
index_t
>
{
1
};
params
.
input_left_pads
=
std
::
vector
<
ck
::
index_t
>
{
1
};
params
.
input_right_pads
=
std
::
vector
<
ck
::
index_t
>
{
1
};
params
.
input_right_pads
=
std
::
vector
<
ck
::
index_t
>
{
1
};
out_tensor
=
R
un
R
eference
C
onv
<
1
,
out_tensor
=
r
un
_r
eference
_c
onv
olution_forward
<
1
,
float
,
float
,
float
,
float
,
float
,
float
,
ck
::
tensor_layout
::
convolution
::
NWC
,
ck
::
tensor_layout
::
convolution
::
NWC
,
ck
::
tensor_layout
::
convolution
::
KXC
,
ck
::
tensor_layout
::
convolution
::
KXC
,
ck
::
tensor_layout
::
convolution
::
NWK
>
(
params
);
ck
::
tensor_layout
::
convolution
::
NWK
>
(
params
);
ref_dims
=
std
::
vector
<
std
::
size_t
>
{
1
,
2
,
5
};
ref_dims
=
std
::
vector
<
std
::
size_t
>
{
1
,
2
,
5
};
ref_data
=
std
::
vector
<
float
>
{
9.
,
9.
,
19.5
,
19.5
,
31.5
,
31.5
,
43.5
,
43.5
,
55.5
,
55.5
};
ref_data
=
std
::
vector
<
float
>
{
9.
,
9.
,
19.5
,
19.5
,
31.5
,
31.5
,
43.5
,
43.5
,
55.5
,
55.5
};
res
=
res
&&
test_
util
::
check_err
(
out_tensor
.
mDesc
.
GetLengths
(),
res
=
res
&&
ck
::
util
s
::
check_err
(
out_tensor
.
mDesc
.
GetLengths
(),
ref_dims
,
ref_dims
,
"Error: wrong output tensor dimensions!"
);
"Error: wrong output tensor dimensions!"
);
res
=
res
&&
test_
util
::
check_err
(
out_tensor
.
mData
,
ref_data
,
"Error: incorrect results!"
);
res
=
res
&&
ck
::
util
s
::
check_err
(
out_tensor
.
mData
,
ref_data
,
"Error: incorrect results!"
);
params
.
num_dim_spatial
=
1
;
params
.
num_dim_spatial
=
1
;
params
.
N
=
2
;
params
.
N
=
2
;
...
@@ -235,16 +244,14 @@ bool TestConv1DNWC()
...
@@ -235,16 +244,14 @@ bool TestConv1DNWC()
params
.
input_left_pads
=
std
::
vector
<
ck
::
index_t
>
{
1
};
params
.
input_left_pads
=
std
::
vector
<
ck
::
index_t
>
{
1
};
params
.
input_right_pads
=
std
::
vector
<
ck
::
index_t
>
{
1
};
params
.
input_right_pads
=
std
::
vector
<
ck
::
index_t
>
{
1
};
auto
out_tensor2
=
auto
out_tensor2
=
run_reference_convolution_forward
<
1
,
RunReferenceConv
<
1
,
float
,
float
,
float
,
float
,
float
,
float
,
ck
::
tensor_layout
::
convolution
::
NWC
,
ck
::
tensor_layout
::
convolution
::
NWC
,
ck
::
tensor_layout
::
convolution
::
KXC
,
ck
::
tensor_layout
::
convolution
::
KXC
,
ck
::
tensor_layout
::
convolution
::
NWK
>
(
ck
::
tensor_layout
::
convolution
::
NWK
>
(
params
,
[](
auto
first
,
auto
last
)
{
params
,
FillMonotonicSeq
<
float
>
{
0.
f
,
0.1
f
});
std
::
generate
(
first
,
last
,
[
n
=
0
]()
mutable
{
return
float
(
n
++
)
*
float
(
0.1
f
);
});
});
ref_dims
=
std
::
vector
<
std
::
size_t
>
{
2
,
16
,
16
};
ref_dims
=
std
::
vector
<
std
::
size_t
>
{
2
,
16
,
16
};
ref_data
=
std
::
vector
<
float
>
{
ref_data
=
std
::
vector
<
float
>
{
...
@@ -312,10 +319,95 @@ bool TestConv1DNWC()
...
@@ -312,10 +319,95 @@ bool TestConv1DNWC()
72.9
,
72.9
,
72.9
,
72.9
,
72.9
,
72.9
,
72.9
,
72.9
,
72.9
,
72.9
,
72.9
,
72.9
,
72.9
,
72.9
,
72.9
,
72.9
,
49.4
,
49.4
,
49.4
,
49.4
,
49.4
,
49.4
,
49.4
,
49.4
,
49.4
,
49.4
,
49.4
,
49.4
,
49.4
,
49.4
,
49.4
,
49.4
,
49.4
,
49.4
,
49.4
,
49.4
,
49.4
,
49.4
,
49.4
,
49.4
};
49.4
,
49.4
,
49.4
,
49.4
,
49.4
,
49.4
,
49.4
,
49.4
};
res
=
res
&&
test_
util
::
check_err
(
out_tensor2
.
mDesc
.
GetLengths
(),
res
=
res
&&
ck
::
util
s
::
check_err
(
out_tensor2
.
mDesc
.
GetLengths
(),
ref_dims
,
ref_dims
,
"Error: wrong output tensor dimensions!"
);
"Error: wrong output tensor dimensions!"
);
res
=
res
&&
test_util
::
check_err
(
out_tensor2
.
mData
,
ref_data
,
"Error: incorrect results!"
);
res
=
res
&&
ck
::
utils
::
check_err
(
out_tensor2
.
mData
,
ref_data
,
"Error: incorrect results!"
);
return
res
;
}
bool
test_conv3d_ncdhw
()
{
bool
res
{
true
};
ck
::
utils
::
conv
::
ConvParams
params
;
params
.
num_dim_spatial
=
3
;
params
.
N
=
1
;
params
.
K
=
1
;
params
.
C
=
2
;
params
.
filter_spatial_lengths
=
std
::
vector
<
ck
::
index_t
>
{
3
,
3
,
3
};
params
.
input_spatial_lengths
=
std
::
vector
<
ck
::
index_t
>
{
6
,
6
,
6
};
params
.
conv_filter_strides
=
std
::
vector
<
ck
::
index_t
>
{
1
,
1
,
1
};
params
.
conv_filter_dilations
=
std
::
vector
<
ck
::
index_t
>
{
1
,
1
,
1
};
params
.
input_left_pads
=
std
::
vector
<
ck
::
index_t
>
{
0
,
0
,
0
};
params
.
input_right_pads
=
std
::
vector
<
ck
::
index_t
>
{
0
,
0
,
0
};
auto
out_tensor
=
run_reference_convolution_forward
<
3
,
float
,
float
,
float
,
ck
::
tensor_layout
::
convolution
::
NCDHW
,
ck
::
tensor_layout
::
convolution
::
KCZYX
,
ck
::
tensor_layout
::
convolution
::
NKDHW
>
(
params
,
FillMonotonicSeq
<
float
>
{
0.
f
,
0.1
f
});
std
::
vector
<
std
::
size_t
>
ref_dims
{
1
,
1
,
4
,
4
,
4
};
std
::
vector
<
float
>
ref_data
{
407.7
,
410.40002
,
413.09998
,
415.80002
,
423.90002
,
426.6
,
429.30002
,
432.
,
440.1
,
442.80002
,
445.5
,
448.2
,
456.30002
,
459.
,
461.7
,
464.40002
,
504.90002
,
507.6
,
510.30002
,
513.
,
521.1
,
523.8
,
526.5
,
529.2001
,
537.3
,
540.
,
542.7001
,
545.4
,
553.5
,
556.2001
,
558.9
,
561.6
,
602.10004
,
604.8
,
607.5
,
610.2
,
618.3
,
621.
,
623.7
,
626.4
,
634.5
,
637.2
,
639.9
,
642.60004
,
650.7
,
653.4
,
656.10004
,
658.8
,
699.3
,
702.
,
704.7
,
707.4
,
715.5
,
718.2
,
720.9
,
723.60004
,
731.7
,
734.4001
,
737.10004
,
739.8
,
747.9001
,
750.60004
,
753.3
,
756.
};
res
=
res
&&
ck
::
utils
::
check_err
(
out_tensor
.
mDesc
.
GetLengths
(),
ref_dims
,
"Error [case 1]: wrong output tensor dimensions!"
);
res
=
res
&&
ck
::
utils
::
check_err
(
out_tensor
.
mData
,
ref_data
,
"Error [case 1]: incorrect results!"
);
params
.
N
=
1
;
params
.
K
=
2
;
params
.
C
=
2
;
params
.
filter_spatial_lengths
=
std
::
vector
<
ck
::
index_t
>
{
3
,
3
,
3
};
params
.
input_spatial_lengths
=
std
::
vector
<
ck
::
index_t
>
{
12
,
12
,
12
};
params
.
conv_filter_strides
=
std
::
vector
<
ck
::
index_t
>
{
3
,
3
,
3
};
params
.
conv_filter_dilations
=
std
::
vector
<
ck
::
index_t
>
{
1
,
1
,
1
};
params
.
input_left_pads
=
std
::
vector
<
ck
::
index_t
>
{
0
,
0
,
0
};
params
.
input_right_pads
=
std
::
vector
<
ck
::
index_t
>
{
0
,
0
,
0
};
out_tensor
=
run_reference_convolution_forward
<
3
,
float
,
float
,
float
,
ck
::
tensor_layout
::
convolution
::
NCDHW
,
ck
::
tensor_layout
::
convolution
::
KCZYX
,
ck
::
tensor_layout
::
convolution
::
NKDHW
>
(
params
,
FillMonotonicSeq
<
float
>
{
0.
f
,
0.1
f
});
ref_dims
=
std
::
vector
<
std
::
size_t
>
{
1
,
2
,
4
,
4
,
4
};
ref_data
=
std
::
vector
<
float
>
{
2756.7002
,
2764.7998
,
2772.9001
,
2781.
,
2853.9001
,
2862.
,
2870.1
,
2878.2002
,
2951.1
,
2959.2002
,
2967.2998
,
2975.4001
,
3048.2998
,
3056.4001
,
3064.5
,
3072.6
,
3923.1
,
3931.2
,
3939.2998
,
3947.4
,
4020.2998
,
4028.4001
,
4036.5002
,
4044.5999
,
4117.5
,
4125.6
,
4133.7
,
4141.8
,
4214.7
,
4222.8
,
4230.9004
,
4239.
,
5089.5
,
5097.5996
,
5105.7
,
5113.8
,
5186.7
,
5194.8
,
5202.9
,
5211.
,
5283.9004
,
5292.
,
5300.0996
,
5308.2
,
5381.0996
,
5389.2
,
5397.3
,
5405.4004
,
6255.9004
,
6264.0005
,
6272.1
,
6280.2
,
6353.1
,
6361.2
,
6369.301
,
6377.4
,
6450.301
,
6458.4
,
6466.5
,
6474.6
,
6547.5
,
6555.6
,
6563.699
,
6571.801
,
2756.7002
,
2764.7998
,
2772.9001
,
2781.
,
2853.9001
,
2862.
,
2870.1
,
2878.2002
,
2951.1
,
2959.2002
,
2967.2998
,
2975.4001
,
3048.2998
,
3056.4001
,
3064.5
,
3072.6
,
3923.1
,
3931.2
,
3939.2998
,
3947.4
,
4020.2998
,
4028.4001
,
4036.5002
,
4044.5999
,
4117.5
,
4125.6
,
4133.7
,
4141.8
,
4214.7
,
4222.8
,
4230.9004
,
4239.
,
5089.5
,
5097.5996
,
5105.7
,
5113.8
,
5186.7
,
5194.8
,
5202.9
,
5211.
,
5283.9004
,
5292.
,
5300.0996
,
5308.2
,
5381.0996
,
5389.2
,
5397.3
,
5405.4004
,
6255.9004
,
6264.0005
,
6272.1
,
6280.2
,
6353.1
,
6361.2
,
6369.301
,
6377.4
,
6450.301
,
6458.4
,
6466.5
,
6474.6
,
6547.5
,
6555.6
,
6563.699
,
6571.801
};
res
=
res
&&
ck
::
utils
::
check_err
(
out_tensor
.
mDesc
.
GetLengths
(),
ref_dims
,
"Error [case 2]: wrong output tensor dimensions!"
);
res
=
res
&&
ck
::
utils
::
check_err
(
out_tensor
.
mData
,
ref_data
,
"Error [case 2]: incorrect results!"
,
1e-4
f
,
1e-6
f
);
return
res
;
return
res
;
}
}
...
@@ -325,9 +417,11 @@ bool TestConv1DNWC()
...
@@ -325,9 +417,11 @@ bool TestConv1DNWC()
int
main
(
void
)
int
main
(
void
)
{
{
bool
res
{
true
};
bool
res
{
true
};
res
=
T
est
C
onv2
DNHWC
();
res
=
t
est
_c
onv2
d_nhwc
();
std
::
cout
<<
"
T
est
C
onv2
DNHWC
..... "
<<
(
res
?
"SUCCESS"
:
"FAILURE"
)
<<
std
::
endl
;
std
::
cout
<<
"
t
est
_c
onv2
d_nhwc
..... "
<<
(
res
?
"SUCCESS"
:
"FAILURE"
)
<<
std
::
endl
;
res
=
T
est
C
onv1
DNWC
();
res
=
t
est
_c
onv1
d_nwc
();
std
::
cout
<<
"TestConv1DNHWC ..... "
<<
(
res
?
"SUCCESS"
:
"FAILURE"
)
<<
std
::
endl
;
std
::
cout
<<
"TestConv1DNHWC ..... "
<<
(
res
?
"SUCCESS"
:
"FAILURE"
)
<<
std
::
endl
;
return
0
;
res
=
test_conv3d_ncdhw
();
std
::
cout
<<
"test_conv3d_ncdhw ..... "
<<
(
res
?
"SUCCESS"
:
"FAILURE"
)
<<
std
::
endl
;
return
res
?
0
:
1
;
}
}
test/space_filling_curve/space_filling_curve.cpp
View file @
dd6a8de4
...
@@ -14,12 +14,8 @@ int main(int argc, char** argv)
...
@@ -14,12 +14,8 @@ int main(int argc, char** argv)
(
void
)
argc
;
(
void
)
argc
;
(
void
)
argv
;
(
void
)
argv
;
{
traverse_using_space_filling_curve
();
traverse_using_space_filling_curve
();
auto
err
=
hipDeviceSynchronize
();
(
void
)
err
;
assert
(
err
==
hipSuccess
);
}
return
0
;
return
0
;
}
}
...
@@ -95,13 +91,13 @@ void traverse_using_space_filling_curve()
...
@@ -95,13 +91,13 @@ void traverse_using_space_filling_curve()
make_tuple
(
12
,
2
,
6
),
make_tuple
(
12
,
2
,
6
),
make_tuple
(
12
,
0
,
6
));
make_tuple
(
12
,
0
,
6
));
constexpr
index_t
num_access
es
=
SpaceFillingCurve
::
GetNumOfAccess
();
constexpr
index_t
num_access
=
SpaceFillingCurve
::
GetNumOfAccess
();
static_assert
(
num_access
es
==
reduce_on_sequence
(
TensorLengths
{}
/
ScalarsPerAccess
{},
static_assert
(
num_access
==
reduce_on_sequence
(
TensorLengths
{}
/
ScalarsPerAccess
{},
math
::
multiplies
{},
math
::
multiplies
{},
Number
<
1
>
{}));
Number
<
1
>
{}));
static_for
<
1
,
num_access
es
,
1
>
{}([
&
](
auto
i
)
{
static_for
<
1
,
num_access
,
1
>
{}([
&
](
auto
i
)
{
constexpr
auto
idx_curr
=
SpaceFillingCurve
::
GetIndex
(
i
);
constexpr
auto
idx_curr
=
SpaceFillingCurve
::
GetIndex
(
i
);
static_assert
(
idx_curr
[
I0
]
==
expected
[
i
][
I0
]);
static_assert
(
idx_curr
[
I0
]
==
expected
[
i
][
I0
]);
...
@@ -115,7 +111,7 @@ void traverse_using_space_filling_curve()
...
@@ -115,7 +111,7 @@ void traverse_using_space_filling_curve()
static_assert
(
backward_step
[
I2
]
==
expected_step
[
I2
]);
static_assert
(
backward_step
[
I2
]
==
expected_step
[
I2
]);
});
});
static_for
<
0
,
num_access
es
-
1
,
1
>
{}([
&
](
auto
i
)
{
static_for
<
0
,
num_access
-
1
,
1
>
{}([
&
](
auto
i
)
{
constexpr
auto
idx_curr
=
SpaceFillingCurve
::
GetIndex
(
i
);
constexpr
auto
idx_curr
=
SpaceFillingCurve
::
GetIndex
(
i
);
static_assert
(
idx_curr
[
I0
]
==
expected
[
i
][
I0
]);
static_assert
(
idx_curr
[
I0
]
==
expected
[
i
][
I0
]);
...
...
Prev
1
…
20
21
22
23
24
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment