"tests/trainer/test_data_collator.py" did not exist on "c108d0b5a43fee12e1ef578fe871f0f123b06018"
Commit a8eb886b authored by Khalique Ahmed's avatar Khalique Ahmed
Browse files

Merge branch 'develop' of https://github.com/ROCmSoftwarePlatform/AMDMIGraphX into develop

parents 7e604e9b 4f3cc417
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef MIGRAPHX_GUARD_RTGLIB_TANH_HPP
#define MIGRAPHX_GUARD_RTGLIB_TANH_HPP
#include <migraphx/gpu/oper.hpp>
#include <migraphx/gpu/device/tanh.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
struct hip_tanh : unary_device<hip_tanh, device::tanh>
{
};
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef MIGRAPHX_GUARD_RTGLIB_UNARY_NOT_HPP
#define MIGRAPHX_GUARD_RTGLIB_UNARY_NOT_HPP
#include <migraphx/gpu/oper.hpp>
#include <migraphx/gpu/device/unary_not.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
struct hip_unary_not : unary_device<hip_unary_not, device::unary_not>
{
std::string name() const { return "gpu::not"; }
};
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef MIGRAPHX_GUARD_RTGLIB_WHERE_HPP
#define MIGRAPHX_GUARD_RTGLIB_WHERE_HPP
#include <migraphx/gpu/oper.hpp>
#include <migraphx/gpu/device/where.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
struct hip_where : ternary_device<hip_where, device::where>
{
shape compute_shape(const std::vector<shape>& inputs) const
{
check_shapes{inputs, *this}.has(4).same_dims();
auto s1 = inputs.at(1);
auto s2 = inputs.at(2);
if(s1 == s2 and s1.packed())
{
return s1;
}
else if(s1.packed() != s2.packed())
{
return s1.packed() ? s1 : s2;
}
else if(s1.broadcasted() != s2.broadcasted())
{
return s1.broadcasted() ? s2.with_lens(s1.lens()) : s1.with_lens(s1.lens());
}
else
{
return {s1.type(), s1.lens()};
}
}
};
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
...@@ -74,7 +74,7 @@ struct concat_compiler : compiler<concat_compiler> ...@@ -74,7 +74,7 @@ struct concat_compiler : compiler<concat_compiler>
options.output = inputs.back(); options.output = inputs.back();
options.params = "-Wno-float-equal"; options.params = "-Wno-float-equal";
auto axis = find_fast_axis(options.inputs); auto axis = find_fast_axis(options.inputs);
auto vec = vectorize::elements(axis, options.inputs); auto vec = vectorize::elements(ctx, axis, options.inputs);
options.kernel_name = v.get("kernel", "concat_kernel"); options.kernel_name = v.get("kernel", "concat_kernel");
options.set_launch_params( options.set_launch_params(
v, compute_global_for(ctx, get_concat_elements(options.inputs) / vec.size, 256)); v, compute_global_for(ctx, get_concat_elements(options.inputs) / vec.size, 256));
......
...@@ -65,7 +65,7 @@ struct gathernd_compiler : compiler<gathernd_compiler> ...@@ -65,7 +65,7 @@ struct gathernd_compiler : compiler<gathernd_compiler>
operation compile_op(context& ctx, const std::vector<shape>& inputs, const value& v) const operation compile_op(context& ctx, const std::vector<shape>& inputs, const value& v) const
{ {
hip_compile_options options; hip_compile_options options;
auto out_s = inputs.back(); const auto& out_s = inputs.back();
options.set_launch_params(v, compute_global_for(ctx, out_s.elements())); options.set_launch_params(v, compute_global_for(ctx, out_s.elements()));
options.inputs = inputs; options.inputs = inputs;
options.output = out_s; options.output = out_s;
......
...@@ -50,7 +50,6 @@ ${preamble} ...@@ -50,7 +50,6 @@ ${preamble}
extern "C" { extern "C" {
__global__ void ${kernel}(${params}) __global__ void ${kernel}(${params})
{ {
auto idx = make_index();
transform_args(make_tensors(), rotate_last(), ${transformers})(${args})([](auto... xs) { transform_args(make_tensors(), rotate_last(), ${transformers})(${args})([](auto... xs) {
${layernorm}<${axis}>(${post}, ${eps}, xs...); ${layernorm}<${axis}>(${post}, ${eps}, xs...);
}); });
...@@ -78,9 +77,8 @@ struct layernorm_compiler : compiler<layernorm_compiler> ...@@ -78,9 +77,8 @@ struct layernorm_compiler : compiler<layernorm_compiler>
// Vectorize if the axis is a reduction axis // Vectorize if the axis is a reduction axis
if(axis == faxis) if(axis == faxis)
{ {
vec = vectorize::elements(faxis, inputs); vec = vectorize::elements(ctx, faxis, inputs);
} }
auto preloads = preload::broadcasts(axis, inputs);
auto relements = inputs[0].lens()[axis] / vec.size; auto relements = inputs[0].lens()[axis] / vec.size;
auto nelements = (inputs.back().elements() / inputs[0].lens()[axis]); auto nelements = (inputs.back().elements() / inputs[0].lens()[axis]);
auto block_size = compute_block_size(relements, 256); auto block_size = compute_block_size(relements, 256);
...@@ -96,7 +94,7 @@ struct layernorm_compiler : compiler<layernorm_compiler> ...@@ -96,7 +94,7 @@ struct layernorm_compiler : compiler<layernorm_compiler>
{{"kernel", options.kernel_name}, {{"kernel", options.kernel_name},
{"params", enum_params(inputs.size(), "void * private_p")}, {"params", enum_params(inputs.size(), "void * private_p")},
{"args", enum_params(inputs.size(), "private_p")}, {"args", enum_params(inputs.size(), "private_p")},
{"transformers", make_transformer_args(preloads, vec)}, {"transformers", make_transformer_args(vec)},
{"post", v.get("post", std::string{"op::id{}"})}, {"post", v.get("post", std::string{"op::id{}"})},
{"preamble", v.get("preamble", std::string{})}, {"preamble", v.get("preamble", std::string{})},
{"layernorm", v.get("layernorm", std::string{"layernorm"})}, {"layernorm", v.get("layernorm", std::string{"layernorm"})},
......
...@@ -75,20 +75,16 @@ struct pointwise_compiler : compiler<pointwise_compiler> ...@@ -75,20 +75,16 @@ struct pointwise_compiler : compiler<pointwise_compiler>
options.virtual_inputs = reduce_dims(inputs); options.virtual_inputs = reduce_dims(inputs);
options.params = "-Wno-float-equal"; options.params = "-Wno-float-equal";
auto axis = find_fast_axis(options.virtual_inputs); auto axis = find_fast_axis(options.virtual_inputs);
auto vec = vectorize::elements(axis, options.virtual_inputs); auto vec = vectorize::elements(ctx, axis, options.virtual_inputs);
auto preloads = preload::broadcasts(axis, options.virtual_inputs);
options.kernel_name = v.get("kernel", "kernel"); options.kernel_name = v.get("kernel", "kernel");
options.set_launch_params( options.set_launch_params(
v, v, compute_global_for(ctx, options.output.elements() / vec.size, 256));
compute_global_for(ctx,
options.output.elements() / vec.size,
oversubscribe_if(not preloads.is_preloading())));
auto src = interpolate_string(pointwise_kernel, auto src = interpolate_string(pointwise_kernel,
{{"kernel", options.kernel_name}, {{"kernel", options.kernel_name},
{"params", enum_params(inputs.size(), "void * private_p")}, {"params", enum_params(inputs.size(), "void * private_p")},
{"args", enum_params(inputs.size(), "private_p")}, {"args", enum_params(inputs.size(), "private_p")},
{"lambda", v.at("lambda").to<std::string>()}, {"lambda", v.at("lambda").to<std::string>()},
{"transformers", make_transformer_args(preloads, vec)}, {"transformers", make_transformer_args(vec)},
{"preamble", v.get("preamble", std::string{})}}); {"preamble", v.get("preamble", std::string{})}});
return compile_hip_code_object(src, options); return compile_hip_code_object(src, options);
} }
......
...@@ -121,7 +121,7 @@ struct reduce_compiler : compiler<reduce_compiler> ...@@ -121,7 +121,7 @@ struct reduce_compiler : compiler<reduce_compiler>
// Vectorize if the axis is a reduction axis // Vectorize if the axis is a reduction axis
if(options.virtual_inputs.back().lens()[faxis] == 1) if(options.virtual_inputs.back().lens()[faxis] == 1)
{ {
vec = vectorize::elements(faxis, options.virtual_inputs); vec = vectorize::elements(ctx, faxis, options.virtual_inputs);
} }
auto relements = get_reduce_elements(options.virtual_inputs) / vec.size; auto relements = get_reduce_elements(options.virtual_inputs) / vec.size;
auto nelements = options.virtual_inputs.back().elements(); auto nelements = options.virtual_inputs.back().elements();
......
...@@ -32,6 +32,8 @@ namespace migraphx { ...@@ -32,6 +32,8 @@ namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
namespace gpu { namespace gpu {
MIGRAPHX_DECLARE_ENV_VAR(MIGRAPHX_USE_FAST_SOFTMAX)
using namespace migraphx::gpu::gen; // NOLINT using namespace migraphx::gpu::gen; // NOLINT
static const char* const softmax_kernel = R"__migraphx__( static const char* const softmax_kernel = R"__migraphx__(
...@@ -69,7 +71,7 @@ struct softmax_compiler : compiler<softmax_compiler> ...@@ -69,7 +71,7 @@ struct softmax_compiler : compiler<softmax_compiler>
// Vectorize if the axis is a reduction axis // Vectorize if the axis is a reduction axis
if(faxis == axis) if(faxis == axis)
{ {
vec = vectorize::elements(faxis, inputs); vec = vectorize::elements(ctx, faxis, inputs);
} }
auto relements = inputs[0].lens()[axis] / vec.size; auto relements = inputs[0].lens()[axis] / vec.size;
auto nelements = (inputs.back().elements() / inputs[0].lens()[axis]); auto nelements = (inputs.back().elements() / inputs[0].lens()[axis]);
...@@ -81,6 +83,9 @@ struct softmax_compiler : compiler<softmax_compiler> ...@@ -81,6 +83,9 @@ struct softmax_compiler : compiler<softmax_compiler>
options.inputs = inputs; options.inputs = inputs;
options.kernel_name = "softmax_kernel"; options.kernel_name = "softmax_kernel";
if(enabled(MIGRAPHX_USE_FAST_SOFTMAX{}))
options.params = "-DMIGRAPHX_USE_FAST_SOFTMAX";
auto src = interpolate_string( auto src = interpolate_string(
softmax_kernel, softmax_kernel,
{{"transformers", make_transformer_args(vec)}, {"axis", to_string(axis)}}); {{"transformers", make_transformer_args(vec)}, {"axis", to_string(axis)}});
......
...@@ -104,6 +104,7 @@ MIGRAPHX_DEVICE_MATH(floor, ::floor) ...@@ -104,6 +104,7 @@ MIGRAPHX_DEVICE_MATH(floor, ::floor)
MIGRAPHX_DEVICE_MATH(isnan, ::isnan) MIGRAPHX_DEVICE_MATH(isnan, ::isnan)
MIGRAPHX_DEVICE_MATH(log, ::log) MIGRAPHX_DEVICE_MATH(log, ::log)
MIGRAPHX_DEVICE_MATH(pow, ::pow) MIGRAPHX_DEVICE_MATH(pow, ::pow)
MIGRAPHX_DEVICE_MATH(remainder, ::remainder)
MIGRAPHX_DEVICE_MATH(round, ::round) MIGRAPHX_DEVICE_MATH(round, ::round)
MIGRAPHX_DEVICE_MATH(rsqrt, ::rsqrt) MIGRAPHX_DEVICE_MATH(rsqrt, ::rsqrt)
MIGRAPHX_DEVICE_MATH(sin, ::sin) MIGRAPHX_DEVICE_MATH(sin, ::sin)
...@@ -111,6 +112,7 @@ MIGRAPHX_DEVICE_MATH(sinh, ::sinh) ...@@ -111,6 +112,7 @@ MIGRAPHX_DEVICE_MATH(sinh, ::sinh)
MIGRAPHX_DEVICE_MATH(sqrt, ::sqrt) MIGRAPHX_DEVICE_MATH(sqrt, ::sqrt)
MIGRAPHX_DEVICE_MATH(tan, ::tan) MIGRAPHX_DEVICE_MATH(tan, ::tan)
MIGRAPHX_DEVICE_MATH(tanh, ::tanh) MIGRAPHX_DEVICE_MATH(tanh, ::tanh)
MIGRAPHX_DEVICE_MATH(fmod, ::fmod)
// Float overloads // Float overloads
MIGRAPHX_DEVICE_MATH_FOR(float, acos, ::acosf) MIGRAPHX_DEVICE_MATH_FOR(float, acos, ::acosf)
...@@ -126,6 +128,7 @@ MIGRAPHX_DEVICE_MATH_FOR(float, sin, ::sinf) ...@@ -126,6 +128,7 @@ MIGRAPHX_DEVICE_MATH_FOR(float, sin, ::sinf)
MIGRAPHX_DEVICE_MATH_FOR(float, sinh, ::sinhf) MIGRAPHX_DEVICE_MATH_FOR(float, sinh, ::sinhf)
MIGRAPHX_DEVICE_MATH_FOR(float, tan, ::tanf) MIGRAPHX_DEVICE_MATH_FOR(float, tan, ::tanf)
MIGRAPHX_DEVICE_MATH_FOR(float, tanh, ::tanhf) MIGRAPHX_DEVICE_MATH_FOR(float, tanh, ::tanhf)
MIGRAPHX_DEVICE_MATH_FOR(float, fmod, ::fmodf)
// Builtin half functions // Builtin half functions
MIGRAPHX_DEVICE_MATH_FOR(migraphx::half, abs, ::__habs) MIGRAPHX_DEVICE_MATH_FOR(migraphx::half, abs, ::__habs)
...@@ -148,11 +151,13 @@ MIGRAPHX_DEVICE_MATH_HALF(erf, ::erf) ...@@ -148,11 +151,13 @@ MIGRAPHX_DEVICE_MATH_HALF(erf, ::erf)
MIGRAPHX_DEVICE_MATH_HALF(floor, ::floor) MIGRAPHX_DEVICE_MATH_HALF(floor, ::floor)
MIGRAPHX_DEVICE_MATH_HALF(isnan, ::isnan) MIGRAPHX_DEVICE_MATH_HALF(isnan, ::isnan)
MIGRAPHX_DEVICE_MATH_HALF(pow, ::pow) MIGRAPHX_DEVICE_MATH_HALF(pow, ::pow)
MIGRAPHX_DEVICE_MATH_HALF(remainder, ::remainder)
MIGRAPHX_DEVICE_MATH_HALF(round, ::round) MIGRAPHX_DEVICE_MATH_HALF(round, ::round)
MIGRAPHX_DEVICE_MATH_HALF(sin, ::sin) MIGRAPHX_DEVICE_MATH_HALF(sin, ::sin)
MIGRAPHX_DEVICE_MATH_HALF(sinh, ::sinh) MIGRAPHX_DEVICE_MATH_HALF(sinh, ::sinh)
MIGRAPHX_DEVICE_MATH_HALF(tan, ::tan) MIGRAPHX_DEVICE_MATH_HALF(tan, ::tan)
MIGRAPHX_DEVICE_MATH_HALF(tanh, ::tanh) MIGRAPHX_DEVICE_MATH_HALF(tanh, ::tanh)
MIGRAPHX_DEVICE_MATH_HALF(fmod, ::fmod)
// Map math functions to hip half2 functions // Map math functions to hip half2 functions
// The half2 type is defined in include/hip/amd_detail/hip_fp16_gcc.h and is 2 16-bit floats // The half2 type is defined in include/hip/amd_detail/hip_fp16_gcc.h and is 2 16-bit floats
...@@ -226,11 +231,13 @@ MIGRAPHX_DEVICE_MATH_VEC(cosh) ...@@ -226,11 +231,13 @@ MIGRAPHX_DEVICE_MATH_VEC(cosh)
MIGRAPHX_DEVICE_MATH_VEC(erf) MIGRAPHX_DEVICE_MATH_VEC(erf)
MIGRAPHX_DEVICE_MATH_VEC(exp) MIGRAPHX_DEVICE_MATH_VEC(exp)
MIGRAPHX_DEVICE_MATH_VEC(floor) MIGRAPHX_DEVICE_MATH_VEC(floor)
MIGRAPHX_DEVICE_MATH_VEC(fmod)
MIGRAPHX_DEVICE_MATH_VEC(isnan) MIGRAPHX_DEVICE_MATH_VEC(isnan)
MIGRAPHX_DEVICE_MATH_VEC(log) MIGRAPHX_DEVICE_MATH_VEC(log)
MIGRAPHX_DEVICE_MATH_VEC(max) MIGRAPHX_DEVICE_MATH_VEC(max)
MIGRAPHX_DEVICE_MATH_VEC(min) MIGRAPHX_DEVICE_MATH_VEC(min)
MIGRAPHX_DEVICE_MATH_VEC(pow) MIGRAPHX_DEVICE_MATH_VEC(pow)
MIGRAPHX_DEVICE_MATH_VEC(remainder)
MIGRAPHX_DEVICE_MATH_VEC(round) MIGRAPHX_DEVICE_MATH_VEC(round)
MIGRAPHX_DEVICE_MATH_VEC(rsqrt) MIGRAPHX_DEVICE_MATH_VEC(rsqrt)
MIGRAPHX_DEVICE_MATH_VEC(sin) MIGRAPHX_DEVICE_MATH_VEC(sin)
......
...@@ -197,11 +197,11 @@ struct block ...@@ -197,11 +197,11 @@ struct block
struct reducer struct reducer
{ {
index idx; index idx;
Slicer slicer; Slicer slice;
template <class Op, class T, class Read> template <class Op, class T, class Read>
__device__ auto reduce(Op op, T init, Read read) const __device__ auto reduce(Op op, T init, Read read) const
{ {
return sliced(slicer, [=](auto x, auto... xs) { return sliced(slice, [=](auto x, auto... xs) {
return block_reduce(idx, op, init, x.get_shape().elements(), [&](auto j) { return block_reduce(idx, op, init, x.get_shape().elements(), [&](auto j) {
return vec_reduce(read(x[j], xs[j]...), op); return vec_reduce(read(x[j], xs[j]...), op);
}); });
...@@ -218,7 +218,7 @@ struct block ...@@ -218,7 +218,7 @@ struct block
template <class F> template <class F>
__device__ auto inner(F f) const __device__ auto inner(F f) const
{ {
return sliced(slicer, [=](auto x, auto... xs) { return sliced(slice, [=](auto x, auto... xs) {
idx.local_stride(x.get_shape().elements(), [&](auto j) { f(x[j], xs[j]...); }); idx.local_stride(x.get_shape().elements(), [&](auto j) { f(x[j], xs[j]...); });
}); });
} }
...@@ -226,7 +226,7 @@ struct block ...@@ -226,7 +226,7 @@ struct block
template <class Input> template <class Input>
constexpr auto elements() const constexpr auto elements() const
{ {
using reduce_type = decltype(slicer(Input{})); using reduce_type = decltype(slice(Input{}));
using value_type = typename Input::type; using value_type = typename Input::type;
constexpr auto relements = get_shape_c<reduce_type>{}.elements(); constexpr auto relements = get_shape_c<reduce_type>{}.elements();
if constexpr(vec_size<value_type>() > 1) if constexpr(vec_size<value_type>() > 1)
...@@ -260,11 +260,11 @@ struct lane ...@@ -260,11 +260,11 @@ struct lane
struct reducer struct reducer
{ {
index idx; index idx;
Slicer slicer; Slicer slice;
template <class Op, class T, class Read> template <class Op, class T, class Read>
__device__ auto reduce(Op op, T init, Read read) const __device__ auto reduce(Op op, T init, Read read) const
{ {
return sliced(slicer, [=](auto x, auto... xs) { return sliced(slice, [=](auto x, auto... xs) {
using type = typename decltype(x)::type; using type = typename decltype(x)::type;
type r = init; type r = init;
for(index_int j = 0; j < x.get_shape().elements(); j++) for(index_int j = 0; j < x.get_shape().elements(); j++)
...@@ -284,7 +284,7 @@ struct lane ...@@ -284,7 +284,7 @@ struct lane
template <class F> template <class F>
__device__ auto inner(F f) const __device__ auto inner(F f) const
{ {
return sliced(slicer, [=](auto x, auto... xs) { return sliced(slice, [=](auto x, auto... xs) {
for(index_int j = 0; j < x.get_shape().elements(); j++) for(index_int j = 0; j < x.get_shape().elements(); j++)
{ {
f(x[j], xs[j]...); f(x[j], xs[j]...);
...@@ -295,7 +295,7 @@ struct lane ...@@ -295,7 +295,7 @@ struct lane
template <class Input> template <class Input>
constexpr auto elements() const constexpr auto elements() const
{ {
using reduce_type = decltype(slicer(Input{})); using reduce_type = decltype(slice(Input{}));
return get_shape_c<reduce_type>{}.elements(); return get_shape_c<reduce_type>{}.elements();
} }
}; };
......
...@@ -33,11 +33,15 @@ template <index_int Axis, class Input, class Output> ...@@ -33,11 +33,15 @@ template <index_int Axis, class Input, class Output>
__device__ void softmax(Input input, Output output) __device__ void softmax(Input input, Output output)
{ {
reduce::block::run<reduce::with_axis<Input, Axis>>([&](auto, auto r) { reduce::block::run<reduce::with_axis<Input, Axis>>([&](auto, auto r) {
auto batch_max = r.reduce(op::max{}, lowest{}, op::id{})(input); #ifdef MIGRAPHX_USE_FAST_SOFTMAX
auto batch_sum = const auto c = vec_at(r.slice(input)[0], 0);
r.reduce(op::sum{}, 0, [&](auto x) { return migraphx::exp(x - batch_max); })(input); #else
r.inner([&](auto& y, auto x) { y = migraphx::exp(x - batch_max) / batch_sum; })(output, const auto c = r.reduce(op::max{}, lowest{}, op::id{})(input);
input); #endif
auto batch_sum = r.reduce(op::sum{}, 0, [&](auto x) {
return migraphx::convert<float>(migraphx::exp(x - c));
})(input);
r.inner([&](auto& y, auto x) { y = migraphx::exp(x - c) / batch_sum; })(output, input);
}); });
} }
......
...@@ -26,6 +26,8 @@ ...@@ -26,6 +26,8 @@
#include <migraphx/manage_ptr.hpp> #include <migraphx/manage_ptr.hpp>
#include <migraphx/instruction.hpp> #include <migraphx/instruction.hpp>
#include <migraphx/make_op.hpp> #include <migraphx/make_op.hpp>
#include <migraphx/instruction_ref.hpp>
#include <migraphx/stringutils.hpp>
#include <migraphx/op/convolution.hpp> #include <migraphx/op/convolution.hpp>
#include <migraphx/op/deconvolution.hpp> #include <migraphx/op/deconvolution.hpp>
...@@ -81,77 +83,21 @@ struct miopen_apply ...@@ -81,77 +83,21 @@ struct miopen_apply
(void)i; (void)i;
} }
const std::unordered_set<std::string>& get_rocblas_fp32_archs()
{
static std::unordered_set<std::string> supported_archs{"gfx908", "gfx90a"};
return supported_archs;
}
void init() void init()
{ {
assert(mod != nullptr); assert(mod != nullptr);
assert(pass != nullptr); assert(pass != nullptr);
#if ROCBLAS_VERSION_MAJOR >= 2 && ROCBLAS_VERSION_MINOR >= 38 auto& ctx = get_context();
auto& ctx = get_context(); int8_x4_format = get_int8_x4_format(ctx);
const auto device_name = trim(split_string(get_device_name(), ':').front()); compute_fp32 = get_compute_fp32_flag();
if(contains(get_rocblas_fp32_archs(), device_name))
compute_fp32 = true;
rocblas_gemm_flags flag;
rocblas_query_int8_layout_flag(ctx.get_stream().get_rocblas(), &flag);
int8_x4_format = (flag == rocblas_gemm_flags_pack_int8x4);
#endif
offload_copy = (mod->name() == "main") ? pass->offload_copy : false; offload_copy = (mod->name() == "main") ? pass->offload_copy : false;
add_generic_op("acos");
add_generic_op("acosh");
add_generic_op("add");
add_generic_op("asin");
add_generic_op("asinh");
add_generic_op("atan");
add_generic_op("atanh");
add_generic_op("ceil");
add_generic_op("contiguous"); add_generic_op("contiguous");
add_generic_op("cos");
add_generic_op("cosh");
add_generic_op("div");
add_generic_op("equal");
add_generic_op("erf");
add_generic_op("exp");
add_generic_op("floor");
add_generic_op("greater");
add_generic_op("less");
add_generic_op("log");
add_generic_op("logical_and");
add_generic_op("logical_or");
add_generic_op("logical_xor");
add_generic_op("max");
add_generic_op("min");
add_generic_op("mul");
add_generic_op("not");
add_generic_op("pow");
add_generic_op("prelu");
add_generic_op("recip");
add_generic_op("relu");
add_generic_op("round");
add_generic_op("rsqrt");
add_generic_op("sigmoid");
add_generic_op("sign");
add_generic_op("sin");
add_generic_op("sinh");
add_generic_op("sqdiff");
add_generic_op("sqrt");
add_generic_op("sub");
add_generic_op("tan");
add_generic_op("tanh");
add_generic_op("where");
add_extend_op("abs");
add_extend_op("argmax"); add_extend_op("argmax");
add_extend_op("argmin"); add_extend_op("argmin");
add_extend_op("clip");
add_extend_op("convert");
add_extend_op("elu"); add_extend_op("elu");
add_extend_op("gather"); add_extend_op("gather");
add_extend_op("leaky_relu"); add_extend_op("leaky_relu");
...@@ -227,7 +173,8 @@ struct miopen_apply ...@@ -227,7 +173,8 @@ struct miopen_apply
init(); init();
for(auto it = mod->begin(); it != mod->end(); it++) for(auto it = mod->begin(); it != mod->end(); it++)
{ {
auto s = it->get_shape(); auto s = it->get_shape();
auto attrs = it->get_operator().attributes();
if(apply_map.count(it->name()) > 0) if(apply_map.count(it->name()) > 0)
{ {
check_shape(s, apply_map.at(it->name())(it)); check_shape(s, apply_map.at(it->name())(it));
...@@ -236,11 +183,37 @@ struct miopen_apply ...@@ -236,11 +183,37 @@ struct miopen_apply
{ {
check_shape(s, insert_precompile_op(it)); check_shape(s, insert_precompile_op(it));
} }
else if(attrs.contains("target"))
{
check_shape(s, insert_custom_op(it, attrs));
}
} }
copy_params(); copy_params();
} }
instruction_ref insert_custom_op(instruction_ref ins, const value& attrs) const
{
const auto& custom_op = ins->get_operator();
if(attrs.at("target") == "cpu")
{
auto s = ins->get_shape();
std::vector<instruction_ref> cpu_inputs;
auto inputs = ins->inputs();
auto output = inputs.back();
std::transform(
inputs.begin(), inputs.end(), std::back_inserter(cpu_inputs), [&](auto in) {
return mod->insert_instruction(ins, make_op("hip::copy_from_gpu"), in);
});
cpu_inputs.front() =
mod->insert_instruction(ins, make_op("hip::sync_stream"), cpu_inputs);
auto cpu_out = mod->insert_instruction(ins, custom_op, cpu_inputs);
auto gpu_out =
mod->insert_instruction(ins, make_op("hip::copy_to_gpu"), cpu_out, output);
return mod->replace_instruction(ins, gpu_out);
}
return ins;
}
instruction_ref insert_precompile_op(instruction_ref ins) const instruction_ref insert_precompile_op(instruction_ref ins) const
{ {
auto output = insert_allocation(ins, ins->get_shape()); auto output = insert_allocation(ins, ins->get_shape());
......
...@@ -22,7 +22,6 @@ ...@@ -22,7 +22,6 @@
* THE SOFTWARE. * THE SOFTWARE.
*/ */
#include <migraphx/gpu/quant_convolution.hpp> #include <migraphx/gpu/quant_convolution.hpp>
#include <migraphx/gpu/device/convert.hpp>
#include <migraphx/gpu/context.hpp> #include <migraphx/gpu/context.hpp>
#include <migraphx/generate.hpp> #include <migraphx/generate.hpp>
......
...@@ -21,7 +21,13 @@ ...@@ -21,7 +21,13 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE. * THE SOFTWARE.
*/ */
#include <unordered_set>
#include <migraphx/ranges.hpp>
#include <migraphx/stringutils.hpp>
#include <migraphx/gpu/device_name.hpp>
#include <migraphx/gpu/rocblas.hpp> #include <migraphx/gpu/rocblas.hpp>
#include <migraphx/gpu/context.hpp>
namespace migraphx { namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
...@@ -41,6 +47,33 @@ rocblas_handle_ptr create_rocblas_handle_ptr(hipStream_t s) ...@@ -41,6 +47,33 @@ rocblas_handle_ptr create_rocblas_handle_ptr(hipStream_t s)
return rb; return rb;
} }
const std::unordered_set<std::string>& get_rocblas_fp32_archs()
{
static std::unordered_set<std::string> supported_archs{"gfx908", "gfx90a"};
return supported_archs;
}
bool get_compute_fp32_flag()
{
bool compute_fp32 = false;
#if ROCBLAS_VERSION_MAJOR >= 2 && ROCBLAS_VERSION_MINOR >= 38
const auto device_name = trim(split_string(get_device_name(), ':').front());
if(contains(get_rocblas_fp32_archs(), device_name))
compute_fp32 = true;
#endif
return compute_fp32;
}
bool get_int8_x4_format(context& ctx)
{
bool int8_x4_format = true;
#if ROCBLAS_VERSION_MAJOR >= 2 && ROCBLAS_VERSION_MINOR >= 38
rocblas_gemm_flags flag;
rocblas_query_int8_layout_flag(ctx.get_stream().get_rocblas(), &flag);
int8_x4_format = (flag == rocblas_gemm_flags_pack_int8x4);
#endif
return int8_x4_format;
}
} // namespace gpu } // namespace gpu
} // namespace MIGRAPHX_INLINE_NS } // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx } // namespace migraphx
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/gpu/softmax.hpp>
#include <migraphx/gpu/device/softmax.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/tune_axis.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
shape hip_softmax::compute_shape(const std::vector<shape>& inputs) const
{
check_shapes{inputs, *this}.has(2).standard();
return op.normalize_compute_shape({inputs.at(0)});
}
argument hip_softmax::compute(context& ctx, const shape&, const std::vector<argument>& args) const
{
auto n_dim = args.front().get_shape().lens().size();
auto tuned_axis = tune_axis(n_dim, op.axis, op.name());
device::softmax(ctx.get_stream().get(), args.back(), args.front(), tuned_axis);
return args.back();
}
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
...@@ -347,7 +347,7 @@ void tf_parser::parse_node(const std::string& name) ...@@ -347,7 +347,7 @@ void tf_parser::parse_node(const std::string& name)
// input was from a node with multiple outputs // input was from a node with multiple outputs
if(contains(input_name, ':')) if(contains(input_name, ':'))
{ {
input_name = input_name.substr(0, input.find(':')); input_name.resize(input.find(':'));
} }
else else
{ {
......
...@@ -511,14 +511,7 @@ void print_value(std::ostream& os, const std::vector<value>& x) ...@@ -511,14 +511,7 @@ void print_value(std::ostream& os, const std::vector<value>& x)
os << "}"; os << "}";
} }
void print_value(std::ostream& os, const value::binary& x) void print_value(std::ostream& os, const value::binary& x) { os << x; }
{
// Convert binary to integers
std::vector<int> v(x.begin(), x.end());
os << "{";
os << to_string_range(v);
os << "}";
}
std::ostream& operator<<(std::ostream& os, const value& d) std::ostream& operator<<(std::ostream& os, const value& d)
{ {
......
...@@ -43,6 +43,8 @@ struct sigmoid_custom_op final : migraphx::experimental_custom_op_base ...@@ -43,6 +43,8 @@ struct sigmoid_custom_op final : migraphx::experimental_custom_op_base
return inputs[1]; return inputs[1];
} }
virtual bool runs_on_offload_target() const override { return true; }
virtual migraphx::shape compute_shape(migraphx::shapes inputs) const override virtual migraphx::shape compute_shape(migraphx::shapes inputs) const override
{ {
if(inputs.size() != 2) if(inputs.size() != 2)
...@@ -111,4 +113,45 @@ TEST_CASE(run_sigmoid_with_incorrect_shape) ...@@ -111,4 +113,45 @@ TEST_CASE(run_sigmoid_with_incorrect_shape)
"Error in compute_shape of: sigmoid_custom_op: op must have two inputs")); "Error in compute_shape of: sigmoid_custom_op: op must have two inputs"));
} }
struct identity_custom_op final : migraphx::experimental_custom_op_base
{
virtual std::string name() const override { return "identity_custom_op"; }
virtual migraphx::argument
compute(migraphx::context, migraphx::shape, migraphx::arguments inputs) const override
{
return inputs[0];
}
virtual bool runs_on_offload_target() const override { return true; }
virtual migraphx::shape compute_shape(migraphx::shapes inputs) const override
{
if(inputs.size() != 1)
{
throw std::runtime_error("Identity op must have only one input");
}
return inputs.back();
}
virtual std::vector<size_t> output_alias(migraphx::shapes) const override { return {0, 1}; }
};
TEST_CASE(run_custom_op_with_invalid_output_alias)
{
identity_custom_op i_op;
migraphx::register_experimental_custom_op(i_op);
auto op = migraphx::operation("identity_custom_op");
EXPECT(op.name() == "identity_custom_op");
migraphx::program p;
migraphx::shape s{migraphx_shape_float_type, {12}};
migraphx::module m = p.get_main_module();
auto x = m.add_parameter("x", s);
auto i_ins = m.add_instruction(migraphx::operation("identity_custom_op"), {x});
migraphx_test_private_disable_exception_catch(true);
EXPECT(test::throws<std::exception>(
[&] { p.compile(migraphx::target("ref")); },
"Currently, CustomOps in MIGraphX only supports one output_alias"));
}
int main(int argc, const char* argv[]) { test::run(argc, argv); } int main(int argc, const char* argv[]) { test::run(argc, argv); }
...@@ -24,40 +24,89 @@ ...@@ -24,40 +24,89 @@
#include <hip/hip_runtime_api.h> #include <hip/hip_runtime_api.h>
#include <migraphx/migraphx.h> #include <migraphx/migraphx.h>
#include <migraphx/migraphx.hpp> #include <migraphx/migraphx.hpp>
#include <numeric>
#include <stdexcept> #include <stdexcept>
#include "test.hpp" #include "test.hpp"
#define MIGRAPHX_HIP_ASSERT(x) (EXPECT(x == hipSuccess)) #define MIGRAPHX_HIP_ASSERT(x) (EXPECT(x == hipSuccess))
struct simple_custom_op final : migraphx::experimental_custom_op_base
struct half_copy_host final : migraphx::experimental_custom_op_base
{ {
virtual std::string name() const override { return "simple_custom_op"; } virtual std::string name() const override { return "half_copy_host"; }
virtual bool runs_on_offload_target() const override { return false; }
virtual migraphx::argument virtual migraphx::argument
compute(migraphx::context ctx, migraphx::shape, migraphx::arguments inputs) const override compute(migraphx::context ctx, migraphx::shape, migraphx::arguments inputs) const override
{ {
// sets first half size_bytes of the input 0, and rest of the half bytes are copied. // This custom op simply sets first half size_bytes of the input to 0, and rest of the half
int* h_output = nullptr; // bytes are copied. for this custom_op, it does its computation on the host. Therefore,
auto* d_output = reinterpret_cast<int*>(inputs[0].data()); // `runs_on_offload_target()` is set to false. MIGraphX would inject necessary buffer copies
auto input_bytes = inputs[0].get_shape().bytes(); // to and from GPU to Host based on `runs_on_offload_targe()` flag for input buffers as well
auto* output_ptr = inputs[1].data(); // as the output buffers
auto copy_bytes = input_bytes / 2; auto* input_buffer_ptr = inputs[0].data();
auto* output_buffer_ptr = inputs[1].data();
auto input_bytes = inputs[0].get_shape().bytes();
auto copy_bytes = input_bytes / 2;
MIGRAPHX_HIP_ASSERT(hipSetDevice(0)); MIGRAPHX_HIP_ASSERT(hipSetDevice(0));
MIGRAPHX_HIP_ASSERT(hipHostMalloc(&h_output, input_bytes)); MIGRAPHX_HIP_ASSERT(hipMemcpyAsync(output_buffer_ptr,
MIGRAPHX_HIP_ASSERT(hipMemcpyAsync( input_buffer_ptr,
h_output, d_output, input_bytes, hipMemcpyDeviceToHost, ctx.get_queue<hipStream_t>())); input_bytes,
hipMemcpyHostToHost,
ctx.get_queue<hipStream_t>()));
MIGRAPHX_HIP_ASSERT(hipDeviceSynchronize());
MIGRAPHX_HIP_ASSERT(hipMemset(output_buffer_ptr, 0, copy_bytes));
MIGRAPHX_HIP_ASSERT(hipDeviceSynchronize()); MIGRAPHX_HIP_ASSERT(hipDeviceSynchronize());
MIGRAPHX_HIP_ASSERT(hipMemset(h_output, 0, copy_bytes)); return inputs[1];
}
virtual migraphx::shape compute_shape(migraphx::shapes inputs) const override
{
if(not inputs[0].standard() or not inputs[1].standard())
{
throw std::runtime_error("Input args must be standard shaped");
}
if(inputs.size() != 2)
{
throw std::runtime_error("number of inputs must be 2");
}
return inputs.back();
}
};
struct half_copy_device final : migraphx::experimental_custom_op_base
{
virtual std::string name() const override { return "half_copy_device"; }
virtual bool runs_on_offload_target() const override { return true; }
virtual migraphx::argument
compute(migraphx::context ctx, migraphx::shape, migraphx::arguments inputs) const override
{
// This custom op simply sets first half size_bytes of the input to 0, and rest of the half
// bytes are copied. for this custom_op, it does its computation on the "GPU". Therefore,
// `runs_on_offload_target()` is set to "true".
auto* input_buffer_ptr = inputs[0].data();
auto* output_buffer_ptr = inputs[1].data();
auto input_bytes = inputs[0].get_shape().bytes();
auto copy_bytes = input_bytes / 2;
MIGRAPHX_HIP_ASSERT(hipSetDevice(0));
MIGRAPHX_HIP_ASSERT(hipMemcpyAsync(output_buffer_ptr,
input_buffer_ptr,
input_bytes,
hipMemcpyDeviceToDevice,
ctx.get_queue<hipStream_t>()));
MIGRAPHX_HIP_ASSERT(hipDeviceSynchronize()); MIGRAPHX_HIP_ASSERT(hipDeviceSynchronize());
MIGRAPHX_HIP_ASSERT(hipMemcpy(output_ptr, h_output, input_bytes, hipMemcpyHostToDevice)); MIGRAPHX_HIP_ASSERT(hipMemset(output_buffer_ptr, 0, copy_bytes));
MIGRAPHX_HIP_ASSERT(hipDeviceSynchronize()); MIGRAPHX_HIP_ASSERT(hipDeviceSynchronize());
MIGRAPHX_HIP_ASSERT(hipHostFree(h_output));
return inputs[1]; return inputs[1];
} }
virtual migraphx::shape compute_shape(migraphx::shapes inputs) const override virtual migraphx::shape compute_shape(migraphx::shapes inputs) const override
{ {
if(not inputs[0].standard()) if(not inputs[0].standard() or not inputs[1].standard())
{ {
throw std::runtime_error("first arg must be standard shaped"); throw std::runtime_error("Input args must be standard shaped");
} }
if(inputs.size() != 2) if(inputs.size() != 2)
{ {
...@@ -67,36 +116,208 @@ struct simple_custom_op final : migraphx::experimental_custom_op_base ...@@ -67,36 +116,208 @@ struct simple_custom_op final : migraphx::experimental_custom_op_base
} }
}; };
TEST_CASE(run_simple_custom_op) // overwrites input buffer
struct half_copy_device_same_buffer final : migraphx::experimental_custom_op_base
{
virtual std::string name() const override { return "half_copy_device_same_buffer"; }
virtual bool runs_on_offload_target() const override { return true; }
virtual migraphx::argument
compute(migraphx::context, migraphx::shape, migraphx::arguments inputs) const override
{
// This custom op simply sets first half size_bytes of the input 0, and rest of the half
// bytes are copied. for this custom_op, it does its computation on the "device". Therefore,
// `runs_on_offload_target()` is set to "true"
auto* buffer_ptr = inputs[0].data();
auto input_bytes = inputs[0].get_shape().bytes();
auto copy_bytes = input_bytes / 2;
MIGRAPHX_HIP_ASSERT(hipSetDevice(0));
MIGRAPHX_HIP_ASSERT(hipMemset(buffer_ptr, 0, copy_bytes));
MIGRAPHX_HIP_ASSERT(hipDeviceSynchronize());
return inputs[0];
}
virtual migraphx::shape compute_shape(migraphx::shapes inputs) const override
{
if(not inputs[0].standard())
{
throw std::runtime_error("Input arg must be standard shaped");
}
return inputs.front();
}
};
TEST_CASE(register_half_copy_op)
{
half_copy_host hch;
migraphx::register_experimental_custom_op(hch);
auto op = migraphx::operation("half_copy_host");
EXPECT(op.name() == "half_copy_host");
half_copy_device hcd;
migraphx::register_experimental_custom_op(hcd);
op = migraphx::operation("half_copy_device");
EXPECT(op.name() == "half_copy_device");
half_copy_device_same_buffer hcdsb;
migraphx::register_experimental_custom_op(hcdsb);
op = migraphx::operation("half_copy_device_same_buffer");
EXPECT(op.name() == "half_copy_device_same_buffer");
}
TEST_CASE(half_copy_custom_op_test)
{
auto run_test_prog = [](const std::string& op_name, bool buffer_alloc) {
migraphx::program p;
migraphx::module m = p.get_main_module();
migraphx::shape s{migraphx_shape_float_type, {4, 3}};
auto x = m.add_parameter("x", s);
migraphx::instructions inputs = {x};
if(buffer_alloc)
{
auto alloc = m.add_allocation(s);
inputs = {x, alloc};
}
auto half_copy_ins = m.add_instruction(migraphx::operation(op_name.c_str()), inputs);
m.add_return({half_copy_ins});
migraphx::compile_options options;
options.set_offload_copy();
p.compile(migraphx::target("gpu"), options);
migraphx::program_parameters pp;
std::vector<float> x_data(12);
std::iota(x_data.begin(), x_data.end(), 0);
pp.add("x", migraphx::argument(s, x_data.data()));
auto results = p.eval(pp);
auto result = results[0];
auto result_vec = result.as_vector<float>();
std::vector<float> expected_result(12, 0);
std::iota(expected_result.begin() + 6, expected_result.end(), 6);
EXPECT(bool{result == migraphx::argument(s, expected_result.data())});
};
// register all the ops
half_copy_host hch;
migraphx::register_experimental_custom_op(hch);
half_copy_device hcd;
migraphx::register_experimental_custom_op(hcd);
half_copy_device_same_buffer hcdsb;
migraphx::register_experimental_custom_op(hcdsb);
std::vector<std::pair<std::string, bool>> tests_config = {
{"half_copy_host", true},
{"half_copy_device", true},
{"half_copy_device_same_buffer", false}};
for(const auto& i : tests_config)
{
run_test_prog(i.first, i.second);
}
}
struct stride_two final : migraphx::experimental_custom_op_base
{
virtual std::string name() const override { return "stride_two"; }
virtual migraphx::argument
compute(migraphx::context, migraphx::shape out_shape, migraphx::arguments inputs) const override
{
return {out_shape, inputs[0].data()};
}
virtual migraphx::shape compute_shape(migraphx::shapes inputs) const override
{
if(inputs.size() != 1)
{
throw std::runtime_error("stride_two op must have only one input argument");
};
if(not inputs[0].standard())
{
throw std::runtime_error("stride_two op only works on the standard input shapes");
}
migraphx::shape input_s = inputs[0];
std::vector<size_t> dims = input_s.lengths();
std::vector<size_t> new_dims;
std::vector<size_t> strides = input_s.strides();
std::vector<size_t> new_strides;
std::for_each(dims.begin(), dims.end(), [&](auto i) { new_dims.push_back(i / 2); });
std::for_each(
strides.begin(), strides.end(), [&](auto i) { new_strides.push_back(i * 2); });
migraphx::shape output_shape{input_s.type(), new_dims, new_strides};
return output_shape;
}
virtual bool runs_on_offload_target() const override { return true; }
virtual std::vector<size_t> output_alias(migraphx::shapes) const override { return {0}; };
};
TEST_CASE(stride_two_custom_op_test)
{ {
simple_custom_op simple_op; stride_two st;
migraphx::register_experimental_custom_op(simple_op); migraphx::register_experimental_custom_op(st);
migraphx::program p;
migraphx::module m = p.get_main_module();
migraphx::shape s{migraphx_shape_float_type, {4, 4, 4}};
auto x = m.add_parameter("x", s);
auto stride_two_ins = m.add_instruction(migraphx::operation("stride_two"), {x});
m.add_return({stride_two_ins});
migraphx::compile_options options;
options.set_offload_copy();
p.compile(migraphx::target("gpu"), options);
migraphx::program_parameters pp;
std::vector<float> x_data(64);
std::iota(x_data.begin(), x_data.end(), 0);
pp.add("x", migraphx::argument(s, x_data.data()));
auto results = p.eval(pp);
auto result = results[0];
auto result_vec = result.as_vector<float>();
std::vector<float> expected_result = {0, 2, 8, 10, 32, 34, 40, 42};
EXPECT(result_vec == expected_result);
}
TEST_CASE(custom_op_with_pre_and_post_subgraph_test)
{
half_copy_host hco;
migraphx::register_experimental_custom_op(hco);
stride_two st;
migraphx::register_experimental_custom_op(st);
migraphx::program p; migraphx::program p;
migraphx::shape s{migraphx_shape_int32_type, {4, 3}}; migraphx::shape s{migraphx_shape_float_type, {4, 6}};
migraphx::shape trans_shape{migraphx_shape_int32_type, {3, 4}};
migraphx::module m = p.get_main_module(); migraphx::module m = p.get_main_module();
auto x = m.add_parameter("x", s); auto x = m.add_parameter("x", s);
auto neg = m.add_instruction(migraphx::operation("neg"), x); // pre-subgraph
auto alloc = m.add_allocation(trans_shape); auto neg_ins = m.add_instruction(migraphx::operation("neg"), x);
auto neg_trans = auto trans_ins =
m.add_instruction(migraphx::operation("transpose", "{permutation: [1, 0]}"), {neg}); m.add_instruction(migraphx::operation("transpose", "{permutation: [1, 0]}"), {neg_ins});
auto neg_cont = m.add_instruction(migraphx::operation("contiguous"), {neg_trans}); auto cont_ins = m.add_instruction(migraphx::operation("contiguous"), {trans_ins});
auto custom_kernel = // custom_op
m.add_instruction(migraphx::operation("simple_custom_op"), {neg_cont, alloc}); migraphx::shape trans_shape{migraphx_shape_float_type, {6, 4}};
auto relu = m.add_instruction(migraphx::operation("relu"), custom_kernel); auto alloc = m.add_allocation(trans_shape);
m.add_return({relu}); auto half_copy_ins =
m.add_instruction(migraphx::operation("half_copy_host"), {cont_ins, alloc});
// post-subgraph
auto abs_ins = m.add_instruction(migraphx::operation("abs"), {half_copy_ins});
// another custom_op
auto stride_two_ins = m.add_instruction(migraphx::operation("stride_two"), {abs_ins});
// post-subgraph
auto relu_ins = m.add_instruction(migraphx::operation("relu"), {stride_two_ins});
m.add_return({relu_ins});
migraphx::compile_options options; migraphx::compile_options options;
options.set_offload_copy(); options.set_offload_copy();
p.compile(migraphx::target("gpu"), options); p.compile(migraphx::target("gpu"), options);
migraphx::program_parameters pp; migraphx::program_parameters pp;
std::vector<int> x_data(12, -3); std::vector<float> x_data(s.elements());
std::iota(x_data.begin(), x_data.end(), 0);
pp.add("x", migraphx::argument(s, x_data.data())); pp.add("x", migraphx::argument(s, x_data.data()));
auto results = p.eval(pp); auto results = p.eval(pp);
auto result = results[0]; auto result = results[0];
auto result_vec = result.as_vector<int>(); auto result_vec = result.as_vector<float>();
std::vector<int> expected_result(12, 0); std::vector<float> expected_result = {0, 0, 0, 0, 4, 16};
std::fill(expected_result.begin() + 6, expected_result.end(), 3); EXPECT(bool{result == migraphx::argument(migraphx::shape{migraphx_shape_float_type, {3, 2}},
EXPECT(bool{result == migraphx::argument(trans_shape, expected_result.data())}); expected_result.data())});
} }
int main(int argc, const char* argv[]) { test::run(argc, argv); } int main(int argc, const char* argv[]) { test::run(argc, argv); }
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment