Commit 25c6d97b authored by Andriy Roshchenko's avatar Andriy Roshchenko
Browse files

Make fail/pass logic consistent within 01_gemm folder

Removed multiple negations in fail/pass logic to propagate `true` as the success indicator.
parent 728032d7
...@@ -45,4 +45,4 @@ using ReferenceGemmInstance = ck::tensor_operation::host:: ...@@ -45,4 +45,4 @@ using ReferenceGemmInstance = ck::tensor_operation::host::
#include "run_gemm_example_v2.inc" #include "run_gemm_example_v2.inc"
int main(int argc, char* argv[]) { return (run_gemm_splitk_example(argc, argv)) ? -1 : 0; } int main(int argc, char* argv[]) { return (run_gemm_splitk_example(argc, argv) ? 0 : -1); }
...@@ -50,4 +50,4 @@ using ReferenceGemmInstance = ck::tensor_operation::host::ReferenceGemm<ADataTyp ...@@ -50,4 +50,4 @@ using ReferenceGemmInstance = ck::tensor_operation::host::ReferenceGemm<ADataTyp
#include "run_gemm_example_v2.inc" #include "run_gemm_example_v2.inc"
int main(int argc, char* argv[]) { return (run_gemm_splitk_example(argc, argv)) ? -1 : 0; } int main(int argc, char* argv[]) { return (run_gemm_splitk_example(argc, argv) ? 0 : -1); }
...@@ -47,5 +47,5 @@ using ReferenceGemmInstance = ck::tensor_operation::host:: ...@@ -47,5 +47,5 @@ using ReferenceGemmInstance = ck::tensor_operation::host::
int main(int argc, char* argv[]) int main(int argc, char* argv[])
{ {
return (run_gemm_universal_streamk_example(argc, argv)) ? -1 : 0; return (run_gemm_universal_streamk_example(argc, argv) ? 0 : -1);
} }
...@@ -45,4 +45,4 @@ using ReferenceGemmInstance = ck::tensor_operation::host:: ...@@ -45,4 +45,4 @@ using ReferenceGemmInstance = ck::tensor_operation::host::
#include "run_gemm_example_v2.inc" #include "run_gemm_example_v2.inc"
int main(int argc, char* argv[]) { return (run_gemm_splitk_example(argc, argv)) ? -1 : 0; } int main(int argc, char* argv[]) { return (run_gemm_splitk_example(argc, argv) ? 0 : -1); }
...@@ -53,4 +53,4 @@ using ReferenceGemmInstanceGPU = ck::tensor_operation::device::ReferenceGemm<ALa ...@@ -53,4 +53,4 @@ using ReferenceGemmInstanceGPU = ck::tensor_operation::device::ReferenceGemm<ALa
#include "run_gemm_example.inc" #include "run_gemm_example.inc"
int main(int argc, char* argv[]) { return run_gemm_example(argc, argv) ? 0 : -1; } int main(int argc, char* argv[]) { return (run_gemm_example(argc, argv) ? 0 : -1); }
...@@ -45,4 +45,4 @@ using ReferenceGemmInstance = ck::tensor_operation::host:: ...@@ -45,4 +45,4 @@ using ReferenceGemmInstance = ck::tensor_operation::host::
#include "run_gemm_example_v2.inc" #include "run_gemm_example_v2.inc"
int main(int argc, char* argv[]) { return (run_gemm_splitk_example(argc, argv)) ? -1 : 0; } int main(int argc, char* argv[]) { return (run_gemm_splitk_example(argc, argv) ? 0 : -1); }
...@@ -237,7 +237,7 @@ bool run_gemm(const ProblemType& problem_size, const ExecutionConfig& config) ...@@ -237,7 +237,7 @@ bool run_gemm(const ProblemType& problem_size, const ExecutionConfig& config)
{ {
std::cerr << gemm.GetTypeString() << " does not support this problem" << std::endl; std::cerr << gemm.GetTypeString() << " does not support this problem" << std::endl;
return true; return false;
} }
bool pass = true; bool pass = true;
...@@ -293,7 +293,7 @@ bool run_gemm(const ProblemType& problem_size, const ExecutionConfig& config) ...@@ -293,7 +293,7 @@ bool run_gemm(const ProblemType& problem_size, const ExecutionConfig& config)
std::cout << "Perf: " << ave_time << " ms, " << tflops << " TFlops, " << gb_per_sec std::cout << "Perf: " << ave_time << " ms, " << tflops << " TFlops, " << gb_per_sec
<< " GB/s, " << gemm.GetTypeString() << std::endl; << " GB/s, " << gemm.GetTypeString() << std::endl;
} }
return !pass; return pass;
} }
bool run_gemm_universal_streamk_example(int argc, char* argv[]) bool run_gemm_universal_streamk_example(int argc, char* argv[])
...@@ -301,5 +301,5 @@ bool run_gemm_universal_streamk_example(int argc, char* argv[]) ...@@ -301,5 +301,5 @@ bool run_gemm_universal_streamk_example(int argc, char* argv[])
ProblemSizeStreamK_universal problem_size; ProblemSizeStreamK_universal problem_size;
ExecutionConfig config; ExecutionConfig config;
return !parse_cmd_args(argc, argv, problem_size, config) || run_gemm(problem_size, config); return parse_cmd_args(argc, argv, problem_size, config) && run_gemm(problem_size, config);
} }
...@@ -216,7 +216,7 @@ bool run_gemm(const ProblemType& problem_size, const ExecutionConfig& config) ...@@ -216,7 +216,7 @@ bool run_gemm(const ProblemType& problem_size, const ExecutionConfig& config)
{ {
std::cerr << gemm.GetTypeString() << " does not support this problem" << std::endl; std::cerr << gemm.GetTypeString() << " does not support this problem" << std::endl;
return true; return false;
} }
bool pass = true; bool pass = true;
...@@ -273,7 +273,7 @@ bool run_gemm(const ProblemType& problem_size, const ExecutionConfig& config) ...@@ -273,7 +273,7 @@ bool run_gemm(const ProblemType& problem_size, const ExecutionConfig& config)
std::cout << "Perf: " << ave_time << " ms, " << tflops << " TFlops, " << gb_per_sec std::cout << "Perf: " << ave_time << " ms, " << tflops << " TFlops, " << gb_per_sec
<< " GB/s, " << gemm.GetTypeString() << std::endl; << " GB/s, " << gemm.GetTypeString() << std::endl;
} }
return !pass; return pass;
} }
bool run_gemm_splitk_example(int argc, char* argv[]) bool run_gemm_splitk_example(int argc, char* argv[])
...@@ -281,5 +281,5 @@ bool run_gemm_splitk_example(int argc, char* argv[]) ...@@ -281,5 +281,5 @@ bool run_gemm_splitk_example(int argc, char* argv[])
ProblemSizeSplitK problem_size; ProblemSizeSplitK problem_size;
ExecutionConfig config; ExecutionConfig config;
return !parse_cmd_args(argc, argv, problem_size, config) || run_gemm(problem_size, config); return parse_cmd_args(argc, argv, problem_size, config) && run_gemm(problem_size, config);
} }
...@@ -382,6 +382,8 @@ template <index_t N> ...@@ -382,6 +382,8 @@ template <index_t N>
struct non_native_vector_base<f8_ocp_t, N> struct non_native_vector_base<f8_ocp_t, N>
{ {
using data_t = f8_ocp_t::data_type; using data_t = f8_ocp_t::data_type;
static_assert(sizeof(f8_ocp_t) == sizeof(data_t),
"non_native_vector_base storage size mismatch");
using data_v = data_t __attribute__((ext_vector_type(sizeof(data_t) * N))); using data_v = data_t __attribute__((ext_vector_type(sizeof(data_t) * N)));
using type = non_native_vector_base<f8_ocp_t, N>; using type = non_native_vector_base<f8_ocp_t, N>;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment