"vscode:/vscode.git/clone" did not exist on "ab59246203d27e955ecd65a7681b624dc5d7a72a"
Unverified Commit a6bde7c0 authored by Manupa Karunaratne's avatar Manupa Karunaratne Committed by GitHub
Browse files

Merge branch 'develop' into mlir-attention

parents fe36d210 35e5298e
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <migraphx/simplify_dyn_ops.hpp> #include <migraphx/simplify_dyn_ops.hpp>
#include <migraphx/matcher.hpp> #include <migraphx/matcher.hpp>
#include <migraphx/make_op.hpp> #include <migraphx/make_op.hpp>
#include <migraphx/literal.hpp>
namespace migraphx { namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
...@@ -131,10 +132,53 @@ struct find_const_4in_slice ...@@ -131,10 +132,53 @@ struct find_const_4in_slice
} }
}; };
/**
* Simplify dimensions_of to a literal when the input arugment has a static shape
* or the dynamic dimensions from `start` to `end` are fixed.
*/
struct find_static_dimensions_of
{
auto matcher() const { return match::name("dimensions_of")(); }
void apply(module& m, const match::matcher_result& mr) const
{
auto ins = mr.result;
auto input = ins->inputs().at(0);
auto dimensions_of_value = ins->get_operator().to_value();
auto start = dimensions_of_value.at("start").to<std::size_t>();
auto end = dimensions_of_value.at("end").to<std::size_t>();
if(input->get_shape().dynamic())
{
// check if dynamic dimensions from start to end are fixed
auto dds = input->get_shape().dyn_dims();
if(std::any_of(dds.begin() + start, dds.begin() + end, [](auto dd) {
return not dd.is_fixed();
}))
{
return;
}
}
std::size_t output_ndim = end - start;
std::vector<int64_t> vec_shape(output_ndim);
migraphx::shape s(migraphx::shape::int64_type, {output_ndim});
std::vector<std::size_t> input_lens = input->get_shape().to_static(1).lens();
std::transform(input_lens.begin() + start,
input_lens.begin() + end,
vec_shape.begin(),
[](auto i) { return int64_t(i); });
migraphx::shape output_shape{migraphx::shape::int64_type, {end - start}};
auto lit_ins = m.add_literal(migraphx::literal{output_shape, vec_shape});
m.replace_instruction(ins, lit_ins);
}
};
void simplify_dyn_ops::apply(module& m) const void simplify_dyn_ops::apply(module& m) const
{ {
match::find_matches( match::find_matches(m,
m, find_static_2in_broadcasts{}, find_const_3in_slice{}, find_const_4in_slice{}); find_static_2in_broadcasts{},
find_static_dimensions_of{},
find_const_3in_slice{},
find_const_4in_slice{});
} }
} // namespace MIGRAPHX_INLINE_NS } // namespace MIGRAPHX_INLINE_NS
......
...@@ -647,8 +647,8 @@ struct find_broadcast_transpose ...@@ -647,8 +647,8 @@ struct find_broadcast_transpose
{ {
auto transpose = r.result; auto transpose = r.result;
auto transpose_lens = transpose->get_shape().lens(); auto transpose_lens = transpose->get_shape().lens();
auto bcast_ins = r.instructions["bcast_ins"]; auto bcast_ins = r.instructions["bcast_ins"];
auto input = bcast_ins->inputs().front(); auto input = bcast_ins->inputs().front();
// scalar transformation does not need extra transpose // scalar transformation does not need extra transpose
if(not input->get_shape().scalar()) if(not input->get_shape().scalar())
{ {
......
# #################################################################################### # ####################################################################################
# The MIT License (MIT) # The MIT License (MIT)
# #
# Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved. # Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy # Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal # of this software and associated documentation files (the "Software"), to deal
...@@ -231,24 +231,28 @@ else() ...@@ -231,24 +231,28 @@ else()
string(REGEX REPLACE " /[^ ]+\\.(a|so) " " " HIP_COMPILER_FLAGS "${HIP_COMPILER_FLAGS}") string(REGEX REPLACE " /[^ ]+\\.(a|so) " " " HIP_COMPILER_FLAGS "${HIP_COMPILER_FLAGS}")
endforeach() endforeach()
message(STATUS "Hip compiler flags: ${HIP_COMPILER_FLAGS}") message(STATUS "Hip compiler flags: \"${HIP_COMPILER_FLAGS}\"")
target_compile_definitions(migraphx_gpu PRIVATE target_compile_definitions(migraphx_gpu PRIVATE
"-DMIGRAPHX_HIP_COMPILER=${CMAKE_CXX_COMPILER}" -DMIGRAPHX_HIP_COMPILER="${CMAKE_CXX_COMPILER}"
"-DMIGRAPHX_HIP_COMPILER_FLAGS=${HIP_COMPILER_FLAGS}" -DMIGRAPHX_HIP_COMPILER_FLAGS="${HIP_COMPILER_FLAGS}"
) )
if(DEFINED CMAKE_CXX_COMPILER_LAUNCHER) if(DEFINED CMAKE_CXX_COMPILER_LAUNCHER)
execute_process(COMMAND which ${CMAKE_CXX_COMPILER_LAUNCHER} OUTPUT_VARIABLE MIGRAPHX_HIP_COMPILER_LAUNCHER) execute_process(COMMAND which ${CMAKE_CXX_COMPILER_LAUNCHER} OUTPUT_VARIABLE MIGRAPHX_HIP_COMPILER_LAUNCHER)
string(STRIP "${MIGRAPHX_HIP_COMPILER_LAUNCHER}" MIGRAPHX_HIP_COMPILER_LAUNCHER) string(STRIP "${MIGRAPHX_HIP_COMPILER_LAUNCHER}" MIGRAPHX_HIP_COMPILER_LAUNCHER)
target_compile_definitions(migraphx_gpu PRIVATE "-DMIGRAPHX_HIP_COMPILER_LAUNCHER=${MIGRAPHX_HIP_COMPILER_LAUNCHER}") target_compile_definitions(migraphx_gpu PRIVATE -DMIGRAPHX_HIP_COMPILER_LAUNCHER="${MIGRAPHX_HIP_COMPILER_LAUNCHER}")
endif() endif()
endif() endif()
# Check miopen find mode api # Check miopen find mode api
include(CheckLibraryExists) include(CheckLibraryExists)
get_target_property(MIOPEN_LOCATION MIOpen LOCATION) get_target_property(MIOPEN_LOCATION MIOpen LOCATION)
get_target_property(ROCBLAS_LOCATION roc::rocblas LOCATION)
check_library_exists(MIOpen "miopenHiddenSetConvolutionFindMode" "${MIOPEN_LOCATION}" HAS_FIND_MODE_API) check_library_exists(MIOpen "miopenHiddenSetConvolutionFindMode" "${MIOPEN_LOCATION}" HAS_FIND_MODE_API)
check_library_exists(MIOpen "miopenFindSolutions" "${MIOPEN_LOCATION}" HAS_FIND_2_API) check_library_exists(MIOpen "miopenFindSolutions" "${MIOPEN_LOCATION}" HAS_FIND_2_API)
# Beta API for automated GEMM tuning
check_library_exists(roc::rocblas "rocblas_gemm_ex_get_solutions" "${ROCBLAS_LOCATION}" HAS_ROCBLAS_TUNING_BETA_FEATURE_API)
set(MIGRAPHX_USE_FIND_2_API "${HAS_FIND_2_API}" CACHE BOOL "") set(MIGRAPHX_USE_FIND_2_API "${HAS_FIND_2_API}" CACHE BOOL "")
...@@ -271,6 +275,13 @@ else() ...@@ -271,6 +275,13 @@ else()
message(STATUS "MIOpen does not have find mode api") message(STATUS "MIOpen does not have find mode api")
endif() endif()
if(HAS_ROCBLAS_TUNING_BETA_FEATURE_API)
target_compile_definitions(migraphx_gpu PUBLIC -DMIGRAPHX_USE_ROCBLAS_TUNING_API -DROCBLAS_BETA_FEATURES_API -DROCBLAS_NO_DEPRECATED_WARNINGS)
message(STATUS "MIGraphx is using Beta API of rocBLAS")
else()
message(STATUS "rocBLAS does not have User Tuning Beta API")
endif()
target_link_libraries(migraphx_gpu PUBLIC migraphx MIOpen roc::rocblas) target_link_libraries(migraphx_gpu PUBLIC migraphx MIOpen roc::rocblas)
target_link_libraries(migraphx_gpu PRIVATE migraphx_device migraphx_kernels) target_link_libraries(migraphx_gpu PRIVATE migraphx_device migraphx_kernels)
if(MIGRAPHX_USE_COMPOSABLEKERNEL) if(MIGRAPHX_USE_COMPOSABLEKERNEL)
......
...@@ -284,16 +284,20 @@ std::vector<std::vector<char>> compile_hip_src_with_hiprtc(std::vector<hiprtc_sr ...@@ -284,16 +284,20 @@ std::vector<std::vector<char>> compile_hip_src_with_hiprtc(std::vector<hiprtc_sr
bool is_hip_clang_compiler() bool is_hip_clang_compiler()
{ {
static const auto result = ends_with(MIGRAPHX_STRINGIZE(MIGRAPHX_HIP_COMPILER), "clang++"); static const auto result = fs::path{MIGRAPHX_HIP_COMPILER}.stem() == "clang++";
return result; return result;
} }
#ifdef MIGRAPHX_HIP_COMPILER_LAUNCHER
bool has_compiler_launcher() bool has_compiler_launcher()
{ {
static const auto result = fs::exists(MIGRAPHX_STRINGIZE(MIGRAPHX_HIP_COMPILER_LAUNCHER)); static const auto result = fs::exists(MIGRAPHX_HIP_COMPILER_LAUNCHER);
return result; return result;
} }
#endif
src_compiler assemble(src_compiler compiler) src_compiler assemble(src_compiler compiler)
{ {
compiler.out_ext = ".S"; compiler.out_ext = ".S";
...@@ -306,8 +310,7 @@ compile_hip_src(const std::vector<src_file>& srcs, std::string params, const std ...@@ -306,8 +310,7 @@ compile_hip_src(const std::vector<src_file>& srcs, std::string params, const std
{ {
assert(not srcs.empty()); assert(not srcs.empty());
if(not is_hip_clang_compiler()) if(not is_hip_clang_compiler())
MIGRAPHX_THROW("Unknown hip compiler: " + MIGRAPHX_THROW("Unknown hip compiler: " MIGRAPHX_HIP_COMPILER);
std::string(MIGRAPHX_STRINGIZE(MIGRAPHX_HIP_COMPILER)));
if(params.find("-std=") == std::string::npos) if(params.find("-std=") == std::string::npos)
params += " --std=c++17"; params += " --std=c++17";
...@@ -323,14 +326,14 @@ compile_hip_src(const std::vector<src_file>& srcs, std::string params, const std ...@@ -323,14 +326,14 @@ compile_hip_src(const std::vector<src_file>& srcs, std::string params, const std
params += " -DMIGRAPHX_DEBUG"; params += " -DMIGRAPHX_DEBUG";
params += " -Wno-unused-command-line-argument -Wno-cuda-compat "; params += " -Wno-unused-command-line-argument -Wno-cuda-compat ";
params += MIGRAPHX_STRINGIZE(MIGRAPHX_HIP_COMPILER_FLAGS); params += MIGRAPHX_HIP_COMPILER_FLAGS;
src_compiler compiler; src_compiler compiler;
compiler.flags = params; compiler.flags = params;
compiler.compiler = MIGRAPHX_STRINGIZE(MIGRAPHX_HIP_COMPILER); compiler.compiler = MIGRAPHX_HIP_COMPILER;
#ifdef MIGRAPHX_HIP_COMPILER_LAUNCHER #ifdef MIGRAPHX_HIP_COMPILER_LAUNCHER
if(has_compiler_launcher()) if(has_compiler_launcher())
compiler.launcher = MIGRAPHX_STRINGIZE(MIGRAPHX_HIP_COMPILER_LAUNCHER); compiler.launcher = MIGRAPHX_HIP_COMPILER_LAUNCHER;
#endif #endif
if(enabled(MIGRAPHX_GPU_DUMP_SRC{})) if(enabled(MIGRAPHX_GPU_DUMP_SRC{}))
{ {
...@@ -354,7 +357,7 @@ compile_hip_src(const std::vector<src_file>& srcs, std::string params, const std ...@@ -354,7 +357,7 @@ compile_hip_src(const std::vector<src_file>& srcs, std::string params, const std
bool hip_has_flags(const std::vector<std::string>& flags) bool hip_has_flags(const std::vector<std::string>& flags)
{ {
src_compiler compiler; src_compiler compiler;
compiler.compiler = MIGRAPHX_STRINGIZE(MIGRAPHX_HIP_COMPILER); compiler.compiler = MIGRAPHX_HIP_COMPILER;
compiler.flags = compiler.flags =
join_strings(flags, " ") + " -x hip -c --offload-arch=gfx900 --cuda-device-only"; join_strings(flags, " ") + " -x hip -c --offload-arch=gfx900 --cuda-device-only";
......
...@@ -168,6 +168,7 @@ struct compile_plan ...@@ -168,6 +168,7 @@ struct compile_plan
} }
const compiled_result& benchmark(problem_cache& pc) const const compiled_result& benchmark(problem_cache& pc) const
{ {
const auto trace_level = value_of(MIGRAPHX_TRACE_BENCHMARKING{});
if(results.empty()) if(results.empty())
MIGRAPHX_THROW("No configs to tune"); MIGRAPHX_THROW("No configs to tune");
if(results.size() == 1) if(results.size() == 1)
...@@ -178,9 +179,10 @@ struct compile_plan ...@@ -178,9 +179,10 @@ struct compile_plan
} }
if(not config) if(not config)
MIGRAPHX_THROW("Multiple kernels without config"); MIGRAPHX_THROW("Multiple kernels without config");
std::cout << "Benchmarking " << preop.name() << ": " << results.size() << " configs" if(trace_level > 0)
<< std::endl; std::cout << "Benchmarking " << preop.name() << ": " << results.size() << " configs"
if(enabled(MIGRAPHX_TRACE_BENCHMARKING{})) << std::endl;
if(trace_level > 1)
std::cout << "Problem: " << config->problem << std::endl; std::cout << "Problem: " << config->problem << std::endl;
std::vector<double> times; std::vector<double> times;
times.reserve(results.size()); times.reserve(results.size());
...@@ -189,22 +191,23 @@ struct compile_plan ...@@ -189,22 +191,23 @@ struct compile_plan
config->solutions.begin(), config->solutions.begin(),
std::back_inserter(times), std::back_inserter(times),
[&](const auto& cr, const auto& solution) { [&](const auto& cr, const auto& solution) {
if(enabled(MIGRAPHX_TRACE_BENCHMARKING{})) if(trace_level > 1)
std::cout << "Benchmarking solution: " << solution << std::endl; std::cout << "Benchmarking solution: " << solution << std::endl;
if(not cr.has_value()) if(not cr.has_value())
{ {
if(enabled(MIGRAPHX_TRACE_BENCHMARKING{})) if(trace_level > 1)
std::cout << "No binary" << std::endl; std::cout << "No binary" << std::endl;
return std::numeric_limits<double>::max(); return std::numeric_limits<double>::max();
} }
auto t = time_op( auto t = time_op(
*ctx, cr->replace.code_object, to_shapes(cr->ins->inputs()), 20); *ctx, cr->replace.code_object, to_shapes(cr->ins->inputs()), 20);
if(enabled(MIGRAPHX_TRACE_BENCHMARKING{})) if(trace_level > 1)
std::cout << t << "ms" << std::endl; std::cout << t << "ms" << std::endl;
return t; return t;
}); });
auto i = std::distance(times.begin(), std::min_element(times.begin(), times.end())); auto i = std::distance(times.begin(), std::min_element(times.begin(), times.end()));
std::cout << "Fastest solution: " << config->solutions.at(i) << std::endl; if(trace_level > 0)
std::cout << "Fastest solution: " << config->solutions.at(i) << std::endl;
pc.insert(preop.name(), config->problem, config->solutions.at(i)); pc.insert(preop.name(), config->problem, config->solutions.at(i));
if(not results[i].has_value()) if(not results[i].has_value())
MIGRAPHX_THROW("No valid tuned compilation."); MIGRAPHX_THROW("No valid tuned compilation.");
......
This diff is collapsed.
/* /*
* The MIT License (MIT) * The MIT License (MIT)
* *
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved. * Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy * Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal * of this software and associated documentation files (the "Software"), to deal
...@@ -40,9 +40,8 @@ inline namespace MIGRAPHX_INLINE_NS { ...@@ -40,9 +40,8 @@ inline namespace MIGRAPHX_INLINE_NS {
namespace gpu { namespace gpu {
struct context; struct context;
void blas_shape(const shape& s);
shape transpose_batch(const shape& s, unsigned trans_batch); shape transpose_batch(const shape& s, unsigned trans_batch);
void blas_shape(const shape& s);
template <class Op> template <class Op>
struct rocblas_gemm struct rocblas_gemm
...@@ -52,6 +51,7 @@ struct rocblas_gemm ...@@ -52,6 +51,7 @@ struct rocblas_gemm
float beta = 0; float beta = 0;
bool compute_fp32 = false; bool compute_fp32 = false;
unsigned trans_batch = 0; unsigned trans_batch = 0;
int32_t solution_idx = 0;
template <class Self, class F> template <class Self, class F>
static auto reflect(Self& self, F f) static auto reflect(Self& self, F f)
...@@ -60,7 +60,8 @@ struct rocblas_gemm ...@@ -60,7 +60,8 @@ struct rocblas_gemm
pack(f(self.alpha, "alpha"), pack(f(self.alpha, "alpha"),
f(self.beta, "beta"), f(self.beta, "beta"),
f(self.compute_fp32, "compute_fp32"), f(self.compute_fp32, "compute_fp32"),
f(self.trans_batch, "trans_batch"))); f(self.trans_batch, "trans_batch"),
f(self.solution_idx, "solution_idx")));
} }
std::string name() const std::string name() const
...@@ -76,6 +77,8 @@ struct rocblas_gemm ...@@ -76,6 +77,8 @@ struct rocblas_gemm
{ {
std::vector<shape> in_shapes(inputs); std::vector<shape> in_shapes(inputs);
in_shapes.pop_back(); in_shapes.pop_back();
// When input shapes are A, B, C the GEMM equation is C  =  α AB+ β C where α, β are
// scalars
check_shapes{in_shapes, *this}.has(2, 3); check_shapes{in_shapes, *this}.has(2, 3);
blas_shape(inputs[0]); blas_shape(inputs[0]);
blas_shape(inputs[1]); blas_shape(inputs[1]);
...@@ -111,11 +114,12 @@ struct rocblas_gemm ...@@ -111,11 +114,12 @@ struct rocblas_gemm
{ {
if(this->name() == "gpu::gemm") if(this->name() == "gpu::gemm")
{ {
gemm(ctx, output_shape, args, alpha, beta, compute_fp32); gemm_compute(ctx, output_shape, args, alpha, beta, compute_fp32, solution_idx);
} }
else else
{ {
gemm(ctx, output_shape, args, int32_t(alpha), int32_t(beta), compute_fp32); gemm_compute(
ctx, output_shape, args, int32_t(alpha), int32_t(beta), compute_fp32, solution_idx);
} }
return args.back(); return args.back();
} }
...@@ -124,6 +128,33 @@ struct rocblas_gemm ...@@ -124,6 +128,33 @@ struct rocblas_gemm
{ {
return shapes.size() - 1; return shapes.size() - 1;
} }
void finalize(context& ctx, const shape& output_shape, const std::vector<shape>& input_shapes)
{
#ifdef MIGRAPHX_USE_ROCBLAS_TUNING_API
if(enabled(MIGRAPHX_ENABLE_GEMM_TUNING{}) or ctx.get_exhaustive_tune_flag())
{
if(this->name() == "gpu::gemm")
{
solution_idx = gemm_finalize(
ctx, output_shape, input_shapes, alpha, beta, compute_fp32, solution_idx);
}
else
{
solution_idx = gemm_finalize(ctx,
output_shape,
input_shapes,
int32_t(alpha),
int32_t(beta),
compute_fp32,
solution_idx);
}
}
#else
// suppress compiler warnings
(void)ctx, (void)output_shape, (void)input_shapes;
#endif
}
}; };
} // namespace gpu } // namespace gpu
......
/* /*
* The MIT License (MIT) * The MIT License (MIT)
* *
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved. * Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy * Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal * of this software and associated documentation files (the "Software"), to deal
...@@ -24,26 +24,64 @@ ...@@ -24,26 +24,64 @@
#ifndef MIGRAPHX_GUARD_RTGLIB_GEMM_IMPL_HPP #ifndef MIGRAPHX_GUARD_RTGLIB_GEMM_IMPL_HPP
#define MIGRAPHX_GUARD_RTGLIB_GEMM_IMPL_HPP #define MIGRAPHX_GUARD_RTGLIB_GEMM_IMPL_HPP
#include <iterator>
#include <migraphx/shape.hpp> #include <migraphx/shape.hpp>
#include <migraphx/argument.hpp> #include <migraphx/argument.hpp>
#include <migraphx/gpu/context.hpp> #include <migraphx/gpu/context.hpp>
// Set this environment variable to "true" to perform GEMM tuning even when the
// --exhaustive-tune option isn't set. Can be used to skip slow convolution tuning.
MIGRAPHX_DECLARE_ENV_VAR(MIGRAPHX_ENABLE_GEMM_TUNING);
using milliseconds = std::chrono::duration<double, std::milli>;
using microseconds = std::chrono::duration<double, std::micro>;
namespace migraphx { namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
namespace gpu { namespace gpu {
void gemm(context& ctx, /**
const shape& output_shape, * @brief Templated implementations of the compute() and finalize() methods of the Gemm operator.
const std::vector<argument>& args, * For each function there are overloads using either float or int32_t for the arguments
float alpha, * alpha and beta.
float beta, *
bool compute_fp32); * @param ctx .
void gemm(context& ctx, * @param output_shape .
const shape& output_shape, * @param args .
const std::vector<argument>& args, * @param alpha .
int32_t alpha, * @param beta .
int32_t beta, * @param compute_fp32 .
bool compute_fp32); */
void gemm_compute(context& ctx,
const shape& output_shape,
const std::vector<argument>& args,
float alpha,
float beta,
bool compute_fp32,
int32_t solution_idx);
void gemm_compute(context& ctx,
const shape& output_shape,
const std::vector<argument>& args,
int32_t alpha,
int32_t beta,
bool compute_fp32,
int32_t solution_idx);
int32_t gemm_finalize(context& ctx,
const shape& output_shape,
const std::vector<shape>& input_shapes,
float alpha,
float beta,
bool compute_fp32);
int32_t gemm_finalize(context& ctx,
const shape& output_shape,
const std::vector<shape>& input_shapes,
int32_t alpha,
int32_t beta,
bool compute_fp32,
int32_t solution_idx);
} // namespace gpu } // namespace gpu
} // namespace MIGRAPHX_INLINE_NS } // namespace MIGRAPHX_INLINE_NS
......
/* /*
* The MIT License (MIT) * The MIT License (MIT)
* *
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved. * Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy * Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal * of this software and associated documentation files (the "Software"), to deal
......
...@@ -101,7 +101,9 @@ MIGRAPHX_DEVICE_MATH(erf, ::erf) ...@@ -101,7 +101,9 @@ MIGRAPHX_DEVICE_MATH(erf, ::erf)
MIGRAPHX_DEVICE_MATH(exp, ::exp) MIGRAPHX_DEVICE_MATH(exp, ::exp)
MIGRAPHX_DEVICE_MATH(floor, ::floor) MIGRAPHX_DEVICE_MATH(floor, ::floor)
MIGRAPHX_DEVICE_MATH(isnan, ::isnan) MIGRAPHX_DEVICE_MATH(isnan, ::isnan)
MIGRAPHX_DEVICE_MATH(isinf, ::isinf)
MIGRAPHX_DEVICE_MATH(log, ::log) MIGRAPHX_DEVICE_MATH(log, ::log)
MIGRAPHX_DEVICE_MATH(nearbyint, ::nearbyint)
MIGRAPHX_DEVICE_MATH(pow, ::pow) MIGRAPHX_DEVICE_MATH(pow, ::pow)
MIGRAPHX_DEVICE_MATH(remainder, ::remainder) MIGRAPHX_DEVICE_MATH(remainder, ::remainder)
MIGRAPHX_DEVICE_MATH(round, ::round) MIGRAPHX_DEVICE_MATH(round, ::round)
...@@ -135,6 +137,7 @@ MIGRAPHX_DEVICE_MATH_FOR(migraphx::half, ceil, ::hceil) ...@@ -135,6 +137,7 @@ MIGRAPHX_DEVICE_MATH_FOR(migraphx::half, ceil, ::hceil)
MIGRAPHX_DEVICE_MATH_FOR(migraphx::half, cos, ::hcos) MIGRAPHX_DEVICE_MATH_FOR(migraphx::half, cos, ::hcos)
MIGRAPHX_DEVICE_MATH_FOR(migraphx::half, exp, ::hexp) MIGRAPHX_DEVICE_MATH_FOR(migraphx::half, exp, ::hexp)
MIGRAPHX_DEVICE_MATH_FOR(migraphx::half, floor, ::hfloor) MIGRAPHX_DEVICE_MATH_FOR(migraphx::half, floor, ::hfloor)
MIGRAPHX_DEVICE_MATH_FOR(migraphx::half, isinf, ::__hisinf)
MIGRAPHX_DEVICE_MATH_FOR(migraphx::half, isnan, ::__hisnan) MIGRAPHX_DEVICE_MATH_FOR(migraphx::half, isnan, ::__hisnan)
MIGRAPHX_DEVICE_MATH_FOR(migraphx::half, log, ::hlog) MIGRAPHX_DEVICE_MATH_FOR(migraphx::half, log, ::hlog)
MIGRAPHX_DEVICE_MATH_FOR(migraphx::half, rsqrt, ::hrsqrt) MIGRAPHX_DEVICE_MATH_FOR(migraphx::half, rsqrt, ::hrsqrt)
...@@ -150,6 +153,7 @@ MIGRAPHX_DEVICE_MATH_HALF(atan, ::atan) ...@@ -150,6 +153,7 @@ MIGRAPHX_DEVICE_MATH_HALF(atan, ::atan)
MIGRAPHX_DEVICE_MATH_HALF(atanh, ::atanh) MIGRAPHX_DEVICE_MATH_HALF(atanh, ::atanh)
MIGRAPHX_DEVICE_MATH_HALF(cosh, ::cosh) MIGRAPHX_DEVICE_MATH_HALF(cosh, ::cosh)
MIGRAPHX_DEVICE_MATH_HALF(erf, ::erf) MIGRAPHX_DEVICE_MATH_HALF(erf, ::erf)
MIGRAPHX_DEVICE_MATH_HALF(nearbyint, ::nearbyint)
MIGRAPHX_DEVICE_MATH_HALF(pow, ::pow) MIGRAPHX_DEVICE_MATH_HALF(pow, ::pow)
MIGRAPHX_DEVICE_MATH_HALF(remainder, ::remainder) MIGRAPHX_DEVICE_MATH_HALF(remainder, ::remainder)
MIGRAPHX_DEVICE_MATH_HALF(round, ::round) MIGRAPHX_DEVICE_MATH_HALF(round, ::round)
...@@ -229,10 +233,12 @@ MIGRAPHX_DEVICE_MATH_VEC(erf) ...@@ -229,10 +233,12 @@ MIGRAPHX_DEVICE_MATH_VEC(erf)
MIGRAPHX_DEVICE_MATH_VEC(exp) MIGRAPHX_DEVICE_MATH_VEC(exp)
MIGRAPHX_DEVICE_MATH_VEC(floor) MIGRAPHX_DEVICE_MATH_VEC(floor)
MIGRAPHX_DEVICE_MATH_VEC(fmod) MIGRAPHX_DEVICE_MATH_VEC(fmod)
MIGRAPHX_DEVICE_MATH_VEC(isinf)
MIGRAPHX_DEVICE_MATH_VEC(isnan) MIGRAPHX_DEVICE_MATH_VEC(isnan)
MIGRAPHX_DEVICE_MATH_VEC(log) MIGRAPHX_DEVICE_MATH_VEC(log)
MIGRAPHX_DEVICE_MATH_VEC(max) MIGRAPHX_DEVICE_MATH_VEC(max)
MIGRAPHX_DEVICE_MATH_VEC(min) MIGRAPHX_DEVICE_MATH_VEC(min)
MIGRAPHX_DEVICE_MATH_VEC(nearbyint)
MIGRAPHX_DEVICE_MATH_VEC(pow) MIGRAPHX_DEVICE_MATH_VEC(pow)
MIGRAPHX_DEVICE_MATH_VEC(remainder) MIGRAPHX_DEVICE_MATH_VEC(remainder)
MIGRAPHX_DEVICE_MATH_VEC(round) MIGRAPHX_DEVICE_MATH_VEC(round)
......
...@@ -25,5 +25,5 @@ ...@@ -25,5 +25,5 @@
#define MIGRAPHX_VERSION_MAJOR @PROJECT_VERSION_MAJOR@ #define MIGRAPHX_VERSION_MAJOR @PROJECT_VERSION_MAJOR@
#define MIGRAPHX_VERSION_MINOR @PROJECT_VERSION_MINOR@ #define MIGRAPHX_VERSION_MINOR @PROJECT_VERSION_MINOR@
#define MIGRAPHX_VERSION_PATCH @PROJECT_VERSION_PATCH@ #define MIGRAPHX_VERSION_PATCH @PROJECT_VERSION_PATCH@
#define MIGRAPHX_VERSION_TWEAK @PROJECT_VERSION_TWEAK@ #define MIGRAPHX_VERSION_TWEAK "@PROJECT_VERSION_TWEAK@"
// clang-format on // clang-format on
...@@ -30,6 +30,9 @@ function(add_api_test TEST_NAME TEST_SRC TEST_DIR) ...@@ -30,6 +30,9 @@ function(add_api_test TEST_NAME TEST_SRC TEST_DIR)
add_test(NAME ${NAME} COMMAND $<TARGET_FILE:${NAME}> WORKING_DIRECTORY ${TEST_DIR}) add_test(NAME ${NAME} COMMAND $<TARGET_FILE:${NAME}> WORKING_DIRECTORY ${TEST_DIR})
add_dependencies(tests ${NAME}) add_dependencies(tests ${NAME})
add_dependencies(check ${NAME}) add_dependencies(check ${NAME})
if(WIN32)
target_compile_definitions(${NAME} PRIVATE _CRT_SECURE_NO_WARNINGS)
endif()
endfunction() endfunction()
# Workaround: C file dont work with clang-tidy right now, need a fix in rocm-cmake # Workaround: C file dont work with clang-tidy right now, need a fix in rocm-cmake
...@@ -41,6 +44,9 @@ function(add_c_api_test TEST_NAME TEST_SRC TEST_DIR) ...@@ -41,6 +44,9 @@ function(add_c_api_test TEST_NAME TEST_SRC TEST_DIR)
add_test(NAME ${NAME} COMMAND $<TARGET_FILE:${NAME}> WORKING_DIRECTORY ${TEST_DIR}) add_test(NAME ${NAME} COMMAND $<TARGET_FILE:${NAME}> WORKING_DIRECTORY ${TEST_DIR})
add_dependencies(tests ${NAME}) add_dependencies(tests ${NAME})
add_dependencies(check ${NAME}) add_dependencies(check ${NAME})
if(WIN32)
target_compile_definitions(${NAME} PRIVATE _CRT_SECURE_NO_WARNINGS)
endif()
endfunction() endfunction()
add_api_test(array_base test_array_base.cpp ${TEST_ONNX_DIR}) add_api_test(array_base test_array_base.cpp ${TEST_ONNX_DIR})
...@@ -57,10 +63,6 @@ add_api_test(custom_op test_custom_op.cpp ${TEST_ONNX_DIR}) ...@@ -57,10 +63,6 @@ add_api_test(custom_op test_custom_op.cpp ${TEST_ONNX_DIR})
add_api_test(tf_parser test_tf_parser.cpp ${TEST_TF_DIR}) add_api_test(tf_parser test_tf_parser.cpp ${TEST_TF_DIR})
# GPU-based tests # GPU-based tests
if(MIGRAPHX_ENABLE_GPU) if(MIGRAPHX_ENABLE_GPU)
list(APPEND CMAKE_PREFIX_PATH /opt/rocm)
find_package(hip)
add_api_test(gpu test_gpu.cpp ${TEST_ONNX_DIR}) add_api_test(gpu test_gpu.cpp ${TEST_ONNX_DIR})
target_link_libraries(test_api_gpu)
add_api_test(custom_op_gpu test_custom_op_gpu.cpp ${TEST_ONNX_DIR}) add_api_test(custom_op_gpu test_custom_op_gpu.cpp ${TEST_ONNX_DIR})
target_link_libraries(test_api_custom_op_gpu)
endif() endif()
...@@ -198,4 +198,29 @@ TEST_CASE(set_loop_default_iter_num) ...@@ -198,4 +198,29 @@ TEST_CASE(set_loop_default_iter_num)
EXPECT(out_shapes[1].lengths() == out_lens1); EXPECT(out_shapes[1].lengths() == out_lens1);
} }
TEST_CASE(set_loop_limit_iterations)
{
migraphx::onnx_options option;
option.set_default_loop_iterations(15);
option.set_limit_loop_iterations(10);
auto p = migraphx::parse_onnx("loop_default_test.onnx", option);
auto out_shapes = p.get_output_shapes();
std::vector<std::size_t> out_lens0 = {1};
EXPECT(out_shapes[0].lengths() == out_lens0);
std::vector<std::size_t> out_lens1 = {10, 1};
EXPECT(out_shapes[1].lengths() == out_lens1);
}
TEST_CASE(set_loop_limit_iterations2)
{
migraphx::onnx_options option;
option.set_limit_loop_iterations(10);
auto p = migraphx::parse_onnx("loop_test_implicit_tripcnt.onnx", option);
auto out_shapes = p.get_output_shapes();
std::vector<std::size_t> out_lens0 = {1};
EXPECT(out_shapes[0].lengths() == out_lens0);
std::vector<std::size_t> out_lens1 = {10, 1};
EXPECT(out_shapes[1].lengths() == out_lens1);
}
int main(int argc, const char* argv[]) { test::run(argc, argv); } int main(int argc, const char* argv[]) { test::run(argc, argv); }
...@@ -317,4 +317,59 @@ TEST_CASE(loop_test) ...@@ -317,4 +317,59 @@ TEST_CASE(loop_test)
} }
} }
TEST_CASE(loop_test_limit_max_iter)
{
auto run_prog = [&](int64_t limit_max_iterations) {
migraphx::onnx_options parse_options;
parse_options.set_limit_loop_iterations(limit_max_iterations);
auto p = migraphx::parse_onnx("loop_test_implicit_tripcnt.onnx", parse_options);
auto shapes_before = p.get_output_shapes();
migraphx::compile_options options;
options.set_offload_copy();
p.compile(migraphx::target("gpu"), options);
auto shapes_after = p.get_output_shapes();
CHECK(shapes_before.size() == 2);
CHECK(bool{shapes_before.front() == shapes_after.front()});
migraphx::program_parameters pp;
auto param_shapes = p.get_parameter_shapes();
auto aas = param_shapes["a"];
std::vector<float> xd = {1.0f};
pp.add("a", migraphx::argument(aas, xd.data()));
auto bbs = param_shapes["b"];
std::vector<float> yd = {2.0};
pp.add("b", migraphx::argument(bbs, yd.data()));
auto cs = param_shapes["keep_going_cond"];
bool cond = true;
pp.add("keep_going_cond", migraphx::argument(cs, &cond));
auto outputs = p.eval(pp);
auto output = outputs[0];
std::vector<std::vector<float>> ret;
ret.push_back(output.as_vector<float>());
output = outputs[1];
ret.push_back(output.as_vector<float>());
return ret;
};
{
auto result_vector = run_prog(5);
std::vector<float> gold0 = {2.0f};
EXPECT(result_vector.at(0) == gold0);
std::vector<float> gold1 = {-2, 4, 0, 0, 0};
EXPECT(result_vector.at(1) == gold1);
}
{
auto result_vector = run_prog(20);
std::vector<float> gold0 = {2.0f};
EXPECT(result_vector.at(0) == gold0);
std::vector<float> gold1 = {-2, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
EXPECT(result_vector.at(1) == gold1);
}
}
int main(int argc, const char* argv[]) { test::run(argc, argv); } int main(int argc, const char* argv[]) { test::run(argc, argv); }
...@@ -64,7 +64,7 @@ TEST_CASE(mul_literal_round_test) ...@@ -64,7 +64,7 @@ TEST_CASE(mul_literal_round_test)
auto l1 = mm->add_literal(1 / 0.00787402f); auto l1 = mm->add_literal(1 / 0.00787402f);
auto mul = mm->add_instruction(migraphx::make_op("mul"), l0, l1); auto mul = mm->add_instruction(migraphx::make_op("mul"), l0, l1);
auto round = mm->add_instruction(migraphx::make_op("round"), mul); auto round = mm->add_instruction(migraphx::make_op("nearbyint"), mul);
mm->add_return({round}); mm->add_return({round});
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <iostream>
#include <vector>
#include <migraphx/gpu/gemm.hpp>
#include <hip/hip_runtime_api.h>
#include <migraphx/gpu/target.hpp>
#include <migraphx/verify.hpp>
#include <test.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/iterator_for.hpp>
// includes needed for run_lowering
#include <migraphx/gpu/lowering.hpp>
#include <migraphx/auto_contiguous.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/pass_manager.hpp>
// Abbreviated lowering; we don't need the usual cleanup passes for this test
void run_lowering(migraphx::program& p, bool offload_copy = false)
{
auto ctx = migraphx::gpu::context{};
migraphx::run_passes(
*p.get_main_module(),
{migraphx::auto_contiguous{}, migraphx::gpu::lowering{&ctx, offload_copy}});
}
/**
* Tests the automatic GEMM tuning feature. In the finalize() method of the gemm op,
* rocBLAS API functions are called to quickly benchmark all the GEMM solutions
* available in the currently installed rocBLAS library and choose the index of the fastest.
*/
TEST_CASE(gemm_tune_with_rocblas)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape sa{migraphx::shape::float_type, {4, 2}};
migraphx::shape sb{migraphx::shape::float_type, {2, 3}};
auto a = mm->add_parameter("a", sa);
auto b = mm->add_parameter("b", sb);
migraphx::operation dot_op = migraphx::make_op("dot");
mm->add_instruction(dot_op, a, b);
// lowering adds gemm implementation for dot operator
run_lowering(p);
migraphx::target gpu_t = migraphx::gpu::target{};
migraphx::compile_options options;
options.exhaustive_tune = true;
p.compile(gpu_t, options);
migraphx::value solution_idx(0);
for(auto ins : iterator_for(*p.get_main_module()))
{
if(ins->name() == "gpu::gemm")
{
auto gemm_op = migraphx::get_operation(ins);
// tuned solution index is not deterministic, but anything other than 0
// (default, invalid, or not available) is good.
// gemm_op.to_value().debug_print();
solution_idx = gemm_op.to_value()["solution_idx"];
break;
}
}
#ifdef MIGRAPHX_USE_ROCBLAS_TUNING_API
EXPECT(0 != solution_idx.to<std::size_t>());
#else
EXPECT(0 == solution_idx.to<std::size_t>());
#endif
}
// GEMM tuning of a strided-batch matrix; invokes rocblas_gemm_strided_batched_ex
TEST_CASE(gemm_tune_strided)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape sa{migraphx::shape::float_type, {4, 2, 2}};
migraphx::shape sb{migraphx::shape::float_type, {4, 2, 2}};
migraphx::shape s_output{migraphx::shape::float_type, {4, 2, 2}};
auto a = mm->add_parameter("a", sa);
auto b = mm->add_parameter("b", sb);
auto output = mm->add_parameter("out", s_output);
auto gemm_oper = migraphx::make_op("gpu::gemm", {{"beta", 2}});
mm->add_instruction(gemm_oper, a, b, output);
migraphx::target gpu_t = migraphx::gpu::target{};
migraphx::compile_options options;
options.exhaustive_tune = true;
p.compile(gpu_t, options);
migraphx::value solution_idx(0);
for(auto ins : iterator_for(*p.get_main_module()))
{
if(ins->name() == "gpu::gemm")
{
auto gemm_op = migraphx::get_operation(ins);
auto gemmv = gemm_op.to_value();
// tuned solution index is not deterministic, but anything other than 0
// (default, invalid, or not available) is good.
solution_idx = gemm_op.to_value()["solution_idx"];
break;
}
}
#ifdef MIGRAPHX_USE_ROCBLAS_TUNING_API
EXPECT(0 != solution_idx.to<std::size_t>());
#else
EXPECT(0 == solution_idx.to<std::size_t>());
#endif
}
// GEMM tuning of a strided-batch matrix; created by lowering
TEST_CASE(gemm_tune_strided_lowered)
{
migraphx::program p;
auto* mm = p.get_main_module();
// At time of writing this test, gemm_impl considers a shape is strided if it has
// at least three dimensions and the 3rd-to-last is nonzero, invoking
// rocblas_gemm_strided_batched_ex. Also, DOT operator requires all dimensions except the last
// two to be equal.
migraphx::shape sa{migraphx::shape::float_type, {4, 2, 5}};
migraphx::shape sb{migraphx::shape::float_type, {4, 5, 3}};
auto a = mm->add_parameter("a", sa);
auto b = mm->add_parameter("b", sb);
migraphx::operation dot_op = migraphx::make_op("dot");
mm->add_instruction(dot_op, a, b);
// lowering adds gemm implementation for dot operator
run_lowering(p);
migraphx::target gpu_t = migraphx::gpu::target{};
migraphx::compile_options options;
options.exhaustive_tune = true;
p.compile(gpu_t, options);
migraphx::value solution_idx(0);
for(auto ins : iterator_for(*p.get_main_module()))
{
if(ins->name() == "gpu::gemm")
{
auto gemm_op = migraphx::get_operation(ins);
// tuned solution index is not deterministic, but anything other than 0
// (default, invalid, or not available) is good.
solution_idx = gemm_op.to_value()["solution_idx"];
break;
}
}
#ifdef MIGRAPHX_USE_ROCBLAS_TUNING_API
EXPECT(0 != solution_idx.to<std::size_t>());
#else
EXPECT(0 == solution_idx.to<std::size_t>());
#endif
}
TEST_CASE(gemm_tune_invalid_sol_index)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape sa{migraphx::shape::float_type, {4, 2}};
migraphx::shape sb{migraphx::shape::float_type, {2, 3}};
migraphx::shape s_output{migraphx::shape::float_type, {4, 3}};
auto a = mm->add_parameter("a", sa);
auto b = mm->add_parameter("b", sb);
auto output = mm->add_parameter("out", s_output);
auto gemm_oper = migraphx::make_op("gpu::gemm", {{"solution_idx", 987654321}});
mm->add_instruction(gemm_oper, a, b, output);
migraphx::target gpu_t = migraphx::gpu::target{};
migraphx::compile_options options;
options.exhaustive_tune = true;
p.compile(gpu_t, options);
migraphx::value solution_idx(0);
for(auto ins : iterator_for(*p.get_main_module()))
{
if(ins->name() == "gpu::gemm")
{
auto gemm_op = migraphx::get_operation(ins);
auto gemmv = gemm_op.to_value();
// given invalid starting index, should return default 0
solution_idx = gemm_op.to_value()["solution_idx"];
break;
}
}
#ifdef MIGRAPHX_USE_ROCBLAS_TUNING_API
EXPECT(0 == solution_idx.to<std::size_t>());
#else
EXPECT(0 != solution_idx.to<std::size_t>());
#endif
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
2eeafc37bca21dc8bf337dda7020b486543162d7 b7b8b5b2ce80edb33990c7ae0fedac6ae3c623f4
...@@ -3858,6 +3858,64 @@ def instance_norm_val_3d_test(): ...@@ -3858,6 +3858,64 @@ def instance_norm_val_3d_test():
return ([node], [], [y], [x_tensor, scale_tensor, bias_tensor]) return ([node], [], [y], [x_tensor, scale_tensor, bias_tensor])
@onnx_test()
def isinf_half_test():
t1 = helper.make_tensor_value_info('t1', TensorProto.FLOAT16, [2, 3])
t2 = helper.make_tensor_value_info('t2', TensorProto.BOOL, [2, 3])
node = onnx.helper.make_node(
'IsInf',
inputs=['t1'],
outputs=['t2'],
)
return ([node], [t1], [t2])
@onnx_test()
def isinf_neg_test():
t1 = helper.make_tensor_value_info('t1', TensorProto.FLOAT, [2, 3])
t2 = helper.make_tensor_value_info('t2', TensorProto.BOOL, [2, 3])
node = onnx.helper.make_node(
'IsInf',
detect_negative=[1],
detect_positive=[0],
inputs=['t1'],
outputs=['t2'],
)
return ([node], [t1], [t2])
@onnx_test()
def isinf_double_pos_test():
t1 = helper.make_tensor_value_info('t1', TensorProto.DOUBLE, [2, 3])
t2 = helper.make_tensor_value_info('t2', TensorProto.BOOL, [2, 3])
node = onnx.helper.make_node(
'IsInf',
detect_negative=[0],
detect_positive=[1],
inputs=['t1'],
outputs=['t2'],
)
return ([node], [t1], [t2])
@onnx_test()
def isinf_no_detect_test():
t1 = helper.make_tensor_value_info('t1', TensorProto.FLOAT, [2, 3])
t2 = helper.make_tensor_value_info('t2', TensorProto.BOOL, [2, 3])
node = onnx.helper.make_node(
'IsInf',
detect_negative=[0],
detect_positive=[0],
inputs=['t1'],
outputs=['t2'],
)
return ([node], [t1], [t2])
@onnx_test() @onnx_test()
def isnan_float_test(): def isnan_float_test():
t1 = helper.make_tensor_value_info('t1', TensorProto.FLOAT, [2, 3]) t1 = helper.make_tensor_value_info('t1', TensorProto.FLOAT, [2, 3])
...@@ -4276,6 +4334,50 @@ def loop_test(): ...@@ -4276,6 +4334,50 @@ def loop_test():
return ([node], [iter, cond, a, b], [b_loop, uout]) return ([node], [iter, cond, a, b], [b_loop, uout])
@onnx_test()
def loop_test_implicit_tripcnt():
body = helper.make_graph([
helper.make_node("Add", ["a", "b_in"], ["my_local"]),
helper.make_node("Sub", ["a", "b_in"], ["a_sub_b_in"]),
helper.make_node("Greater", ["my_local", "a_sub_b_in"],
["keep_going"]),
helper.make_node("Add", ["a_sub_b_in", "a_sub_b_in"],
["user_defined_vals"]),
], "body", [
helper.make_tensor_value_info('iteration_num', TensorProto.INT64, [1]),
helper.make_tensor_value_info('keep_going_inp', TensorProto.BOOL, [1]),
helper.make_tensor_value_info('b_in', TensorProto.FLOAT, [1])
], [
helper.make_tensor_value_info('keep_going', TensorProto.BOOL, [1]),
helper.make_tensor_value_info('a_sub_b_in', TensorProto.FLOAT, [1]),
helper.make_tensor_value_info('my_local', TensorProto.FLOAT, [1]),
helper.make_tensor_value_info('user_defined_vals', TensorProto.FLOAT,
[1]),
])
iter = helper.make_tensor(name='max_trip_count',
data_type=TensorProto.INT64,
dims=[1],
vals=[15])
node = helper.make_node(
"Loop",
inputs=["max_trip_count", "keep_going_cond", "b"],
outputs=["b_loop", "my_local_loop", "user_defined_vals_loop"],
body=body)
a = helper.make_tensor_value_info('a', TensorProto.FLOAT, [1])
b = helper.make_tensor_value_info('b', TensorProto.FLOAT, [1])
cond = helper.make_tensor_value_info('keep_going_cond', TensorProto.BOOL,
[1])
b_loop = helper.make_tensor_value_info('b_loop', TensorProto.FLOAT, [1])
uout = helper.make_tensor_value_info('user_defined_vals_loop',
TensorProto.FLOAT, [2, 1])
return ([node], [cond, a, b], [b_loop, uout], [iter])
@onnx_test() @onnx_test()
def lpnormalization_axis_error_test(): def lpnormalization_axis_error_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 3]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 3])
...@@ -4883,9 +4985,9 @@ def mod_test_fmod_different_dtypes(): ...@@ -4883,9 +4985,9 @@ def mod_test_fmod_different_dtypes():
@onnx_test() @onnx_test()
def multinomial_test(): def multinomial_test():
sample_size = 10 sample_size = 13
seed = 0.0 seed = 0.
input = helper.make_tensor_value_info("input", TensorProto.FLOAT, [1, 10]) input = helper.make_tensor_value_info("input", TensorProto.FLOAT, [3, 10])
output = helper.make_tensor_value_info("output", TensorProto.INT32, output = helper.make_tensor_value_info("output", TensorProto.INT32,
[1, 10]) [1, 10])
...@@ -4898,6 +5000,44 @@ def multinomial_test(): ...@@ -4898,6 +5000,44 @@ def multinomial_test():
return ([node], [input], [output]) return ([node], [input], [output])
@onnx_test()
def multinomial_dyn_test():
sample_size = 100000
seed = 1.3
categories = 5
input = helper.make_tensor_value_info("input", TensorProto.FLOAT,
[None, categories])
output = helper.make_tensor_value_info("output", TensorProto.FLOAT,
[None, categories])
node = onnx.helper.make_node(
'Multinomial',
inputs=['input'],
sample_size=sample_size,
dtype=1, # shape::float_type
seed=seed,
outputs=['output'])
return ([node], [input], [output])
@onnx_test()
def multinomial_autoseed_dyn_test():
# If seed attribute is not given, device should auto generate one at runtime
sample_size = 12
input = helper.make_tensor_value_info("input", TensorProto.FLOAT,
[None, 10])
output = helper.make_tensor_value_info("output", TensorProto.INT32,
[None, 10])
node = onnx.helper.make_node('Multinomial',
inputs=['input'],
sample_size=sample_size,
outputs=['output'])
return ([node], [input], [output])
@onnx_test() @onnx_test()
def multinomial_generated_seed_test(): def multinomial_generated_seed_test():
sample_size = 10 sample_size = 10
...@@ -6947,6 +7087,16 @@ def roialign_test(): ...@@ -6947,6 +7087,16 @@ def roialign_test():
return ([node], [x, roi, bi], [y]) return ([node], [x, roi, bi], [y])
@onnx_test()
def round_half_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT16, [4, 4])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT16, [4, 4])
node = onnx.helper.make_node('Round', inputs=['x'], outputs=['y'])
return ([node], [x], [y])
@onnx_test() @onnx_test()
def scatter_add_test(): def scatter_add_test():
x = helper.make_tensor_value_info('data', TensorProto.FLOAT, [3, 4, 5, 6]) x = helper.make_tensor_value_info('data', TensorProto.FLOAT, [3, 4, 5, 6])
...@@ -7866,6 +8016,32 @@ def slice_var_input_dyn1(): ...@@ -7866,6 +8016,32 @@ def slice_var_input_dyn1():
return ([node], [data, starts, ends, axes], [output]) return ([node], [data, starts, ends, axes], [output])
@onnx_test()
def slice_var_input_default_steps():
step = np.array([1, 1])
step_tensor = helper.make_tensor(name="step",
data_type=TensorProto.INT64,
dims=step.shape,
vals=step.astype(int))
arg_step = helper.make_node("Constant",
inputs=[],
outputs=['arg_step'],
value=step_tensor)
data = helper.make_tensor_value_info('data', TensorProto.FLOAT, [None, 2])
starts = helper.make_tensor_value_info('starts', TensorProto.INT64, [2])
ends = helper.make_tensor_value_info('ends', TensorProto.INT64, [2])
axes = helper.make_tensor_value_info('axes', TensorProto.INT64, [2])
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 2])
node = onnx.helper.make_node(
'Slice',
inputs=['data', 'starts', 'ends', 'axes', 'arg_step'],
outputs=['output'])
return ([arg_step, node], [data, starts, ends, axes], [output])
@onnx_test() @onnx_test()
def slice_var_input_steps_error(): def slice_var_input_steps_error():
step = np.array([2, 1]) step = np.array([2, 1])
...@@ -7879,9 +8055,9 @@ def slice_var_input_steps_error(): ...@@ -7879,9 +8055,9 @@ def slice_var_input_steps_error():
value=step_tensor) value=step_tensor)
data = helper.make_tensor_value_info('data', TensorProto.FLOAT, [3, 2]) data = helper.make_tensor_value_info('data', TensorProto.FLOAT, [3, 2])
starts = helper.make_tensor_value_info('starts', TensorProto.FLOAT, [2]) starts = helper.make_tensor_value_info('starts', TensorProto.INT64, [2])
ends = helper.make_tensor_value_info('ends', TensorProto.FLOAT, [2]) ends = helper.make_tensor_value_info('ends', TensorProto.INT64, [2])
axes = helper.make_tensor_value_info('axes', TensorProto.FLOAT, [2]) axes = helper.make_tensor_value_info('axes', TensorProto.INT64, [2])
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 2]) output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 2])
node = onnx.helper.make_node( node = onnx.helper.make_node(
...@@ -8042,6 +8218,42 @@ def split_test_no_attribute(): ...@@ -8042,6 +8218,42 @@ def split_test_no_attribute():
return ([const_node, node], [x], [y1, y2, y3, y4]) return ([const_node, node], [x], [y1, y2, y3, y4])
@onnx_test()
def split_test_uneven():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [12, 15])
y1 = helper.make_tensor_value_info('y1', TensorProto.FLOAT, [3, 15])
y2 = helper.make_tensor_value_info('y2', TensorProto.FLOAT, [3, 15])
y3 = helper.make_tensor_value_info('y3', TensorProto.FLOAT, [3, 15])
y4 = helper.make_tensor_value_info('y4', TensorProto.FLOAT, [3, 15])
y5 = helper.make_tensor_value_info('y5', TensorProto.FLOAT, [0, 15])
node = onnx.helper.make_node(
'Split',
inputs=['x'],
outputs=['y1', 'y2', 'y3', 'y4', 'y5'],
)
return ([node], [x], [y1, y2, y3, y4, y5])
@onnx_test()
def split_test_uneven_num_outputs():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [11, 15])
y1 = helper.make_tensor_value_info('y1', TensorProto.FLOAT, [3, 15])
y2 = helper.make_tensor_value_info('y2', TensorProto.FLOAT, [3, 15])
y3 = helper.make_tensor_value_info('y3', TensorProto.FLOAT, [3, 15])
y4 = helper.make_tensor_value_info('y4', TensorProto.FLOAT, [2, 15])
node = onnx.helper.make_node(
'Split',
inputs=['x'],
outputs=['y1', 'y2', 'y3', 'y4'],
num_outputs=4,
)
return ([node], [x], [y1, y2, y3, y4])
@onnx_test() @onnx_test()
def split_test_no_attribute_invalid_split(): def split_test_no_attribute_invalid_split():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [300, 15]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [300, 15])
...@@ -8101,6 +8313,24 @@ def split_test_no_attribute_invalid_input_split(): ...@@ -8101,6 +8313,24 @@ def split_test_no_attribute_invalid_input_split():
return ([node], [x], [y1, y2, y3]) return ([node], [x], [y1, y2, y3])
@onnx_test()
def split_test_invalid_num_outputs():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [11, 15])
y1 = helper.make_tensor_value_info('y1', TensorProto.FLOAT, [3, 15])
y2 = helper.make_tensor_value_info('y2', TensorProto.FLOAT, [3, 15])
y3 = helper.make_tensor_value_info('y3', TensorProto.FLOAT, [3, 15])
y4 = helper.make_tensor_value_info('y4', TensorProto.FLOAT, [2, 15])
node = onnx.helper.make_node(
'Split',
inputs=['x'],
outputs=['y1', 'y2', 'y3', 'y4'],
num_outputs=5,
)
return ([node], [x], [y1, y2, y3, y4])
@onnx_test() @onnx_test()
def sqrt_test(): def sqrt_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10, 15]) x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10, 15])
...@@ -8837,6 +9067,20 @@ def upsample_test(): ...@@ -8837,6 +9067,20 @@ def upsample_test():
return ([node], [X], [Y], [scale_tensor]) return ([node], [X], [Y], [scale_tensor])
@onnx_test()
def upsample_ver7_test():
X = helper.make_tensor_value_info('X', TensorProto.FLOAT, [1, 1, 2, 2])
Y = helper.make_tensor_value_info('Y', TensorProto.FLOAT, [1, 1, 4, 6])
node = onnx.helper.make_node('Upsample',
inputs=['X'],
outputs=['Y'],
mode='nearest',
scales=[1.0, 1.0, 2.0, 3.0])
return ([node], [X], [Y])
@onnx_test() @onnx_test()
def variable_batch_test(): def variable_batch_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, x = helper.make_tensor_value_info('0', TensorProto.FLOAT,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment