Commit ff3bd8e6 authored by Khalique Ahmed's avatar Khalique Ahmed
Browse files

manual merge

parents 32b69ceb c310bc5c
#ifndef MIGRAPHX_GUARD_RTGLIB_MIOPEN_MLIR_CONV_HPP
#define MIGRAPHX_GUARD_RTGLIB_MIOPEN_MLIR_CONV_HPP
#include <migraphx/config.hpp>
#include <migraphx/gpu/context.hpp>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
struct module;
namespace gpu {
struct mlir_conv
{
context* ctx;
std::string name() const { return "mlir::convolution"; }
void apply(module& m) const;
};
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#endif
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
#include <migraphx/gpu/equal.hpp> #include <migraphx/gpu/equal.hpp>
#include <migraphx/gpu/gemm.hpp> #include <migraphx/gpu/gemm.hpp>
#include <migraphx/gpu/greater.hpp> #include <migraphx/gpu/greater.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/gpu/int8_conv_pack.hpp> #include <migraphx/gpu/int8_conv_pack.hpp>
#include <migraphx/gpu/leaky_relu.hpp> #include <migraphx/gpu/leaky_relu.hpp>
#include <migraphx/gpu/less.hpp> #include <migraphx/gpu/less.hpp>
...@@ -188,7 +187,7 @@ struct miopen_apply ...@@ -188,7 +187,7 @@ struct miopen_apply
auto pos = std::next(ins); auto pos = std::next(ins);
auto a = insert_allocation(pos, ins->get_shape()); auto a = insert_allocation(pos, ins->get_shape());
auto c = mod->insert_instruction(pos, hip_copy_to_gpu{}, ins, a); auto c = mod->insert_instruction(pos, make_op("hip::copy_to_gpu"), ins, a);
mod->replace_instruction(ins, c); mod->replace_instruction(ins, c);
} }
...@@ -202,14 +201,14 @@ struct miopen_apply ...@@ -202,14 +201,14 @@ struct miopen_apply
// output with copy output // output with copy output
for(const auto& in : inputs) for(const auto& in : inputs)
{ {
auto p_output = mod->insert_instruction(ret, hip_copy_from_gpu{}, in); auto p_output = mod->insert_instruction(ret, make_op("hip::copy_from_gpu"), in);
instruction::replace_argument(ret, in, p_output); instruction::replace_argument(ret, in, p_output);
} }
} }
// else branch to handle legacy program without the return instruction // else branch to handle legacy program without the return instruction
else else
{ {
mod->add_instruction(hip_copy_from_gpu{}, ret); mod->add_instruction(make_op("hip::copy_from_gpu"), ret);
} }
} }
...@@ -233,7 +232,8 @@ struct miopen_apply ...@@ -233,7 +232,8 @@ struct miopen_apply
// Instruction's output is an input of the ret instruction // Instruction's output is an input of the ret instruction
if(offload_copy) if(offload_copy)
{ {
auto result = mod->insert_instruction(ins, hip_allocate{s, std::move(tag)}); auto result = mod->insert_instruction(
ins, make_op("hip::allocate", {{"shape", to_value(s)}, {"tag", std::move(tag)}}));
return result; return result;
} }
...@@ -247,7 +247,8 @@ struct miopen_apply ...@@ -247,7 +247,8 @@ struct miopen_apply
return mod->add_parameter("output", s); return mod->add_parameter("output", s);
} }
return mod->insert_instruction(ins, hip_allocate{s, std::move(tag)}); return mod->insert_instruction(
ins, make_op("hip::allocate", {{"shape", to_value(s)}, {"tag", std::move(tag)}}));
} }
void add_convolution_op() void add_convolution_op()
...@@ -301,7 +302,8 @@ struct miopen_apply ...@@ -301,7 +302,8 @@ struct miopen_apply
if(ins == last or refs.back()->outputs().size() > 1 or c_alias->inputs().empty()) if(ins == last or refs.back()->outputs().size() > 1 or c_alias->inputs().empty())
{ {
auto output = insert_allocation(ins, ins->get_shape()); auto output = insert_allocation(ins, ins->get_shape());
auto copy_out = mod->insert_instruction(ins, hip_copy{}, refs.back(), output); auto copy_out =
mod->insert_instruction(ins, make_op("hip::copy"), refs.back(), output);
refs.back() = copy_out; refs.back() = copy_out;
refs.push_back(copy_out); refs.push_back(copy_out);
} }
...@@ -413,8 +415,9 @@ struct miopen_apply ...@@ -413,8 +415,9 @@ struct miopen_apply
{ {
apply_map.emplace("if", [=](instruction_ref ins) { apply_map.emplace("if", [=](instruction_ref ins) {
std::vector<instruction_ref> inputs = ins->inputs(); std::vector<instruction_ref> inputs = ins->inputs();
auto cpu_cond = mod->insert_instruction(ins, hip_copy_from_gpu{}, inputs.front()); auto cpu_cond =
auto sync_cond = mod->insert_instruction(ins, hip_sync_device{}, cpu_cond); mod->insert_instruction(ins, make_op("hip::copy_from_gpu"), inputs.front());
auto sync_cond = mod->insert_instruction(ins, make_op("hip::sync_stream"), cpu_cond);
inputs.front() = sync_cond; inputs.front() = sync_cond;
std::vector<module_ref> mod_args = ins->module_inputs(); std::vector<module_ref> mod_args = ins->module_inputs();
...@@ -437,7 +440,8 @@ struct miopen_apply ...@@ -437,7 +440,8 @@ struct miopen_apply
} }
else else
{ {
output = mod->insert_instruction(ins, hip_allocate{s}); output = mod->insert_instruction(
ins, make_op("hip::allocate", {{"shape", to_value(s)}}));
} }
inputs.push_back(output); inputs.push_back(output);
} }
......
#include <migraphx/gpu/mlir_conv.hpp>
#include <migraphx/manage_ptr.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/op/convolution.hpp>
#include <migraphx/gpu/context.hpp>
#include <migraphx/gpu/convolution.hpp>
#include <migraphx/iterator_for.hpp>
#include <migraphx/program.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/program.hpp>
#include <migraphx/gpu/kernel.hpp>
#include <migraphx/gpu/target.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/gpu/compile_hip.hpp>
#include <utility>
#include <functional>
#include <algorithm>
#ifdef MIGRAPHX_MLIR_MIOPEN_SUPPORT
#include <Miir.h>
#endif // MIGRAPHX_MLIR_MIOPEN_SUPPORT
#include <cstdio>
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
struct mlir_apply
{
module* mod = nullptr;
const mlir_conv* pass = nullptr;
const char* mlir_kernel_name = "migraphx_conv2d";
std::unordered_map<uint64_t, instruction_ref> literal_map{};
struct execution_spec
{
migraphx::value::binary binary;
size_t global_size;
size_t local_size;
execution_spec(migraphx::value::binary&& binary_m, size_t global_s, size_t local_s)
: binary(std::move(binary_m)), global_size(global_s), local_size(local_s)
{
}
};
std::unordered_map<std::string, std::shared_ptr<execution_spec>> binary_map{};
context& get_context() const
{
assert(pass != nullptr);
assert(pass->ctx != nullptr);
return *pass->ctx;
}
void init() const
{
assert(mod != nullptr);
assert(pass != nullptr);
}
std::shared_ptr<execution_spec> make_mlir_binary(instruction_ref op_r)
{
std::shared_ptr<execution_spec> result;
#ifdef MIGRAPHX_MLIR_MIOPEN_SUPPORT
auto conv = any_cast<op::convolution>(op_r->get_operator());
auto inp_t = op_r->inputs().at(0)->get_shape();
auto flt_t = op_r->inputs().at(1)->get_shape();
auto out_t = op_r->get_shape();
auto get_type_str = [](const shape& s) -> const char* {
switch(s.type())
{
case shape::float_type: return "f32";
case shape::half_type: return "f16";
case shape::bool_type:
case shape::double_type:
case shape::uint8_type:
case shape::int8_type:
case shape::uint16_type:
case shape::int16_type:
case shape::int32_type:
case shape::int64_type:
case shape::uint32_type:
case shape::uint64_type:
case shape::tuple_type: break;
}
return nullptr;
};
const auto* inp_t_s = get_type_str(inp_t);
const auto* flt_t_s = get_type_str(flt_t);
const auto* out_t_s = get_type_str(out_t);
if(out_t_s == nullptr || inp_t_s == nullptr || flt_t_s == nullptr)
return result;
std::string mlir_options = "--kernel_name " + std::string(mlir_kernel_name);
// platform spec
auto& device = get_context().get_current_device();
char dev_name[64];
sprintf(dev_name, "gfx%lu%02lu", device.get_device_major(), device.get_device_minor());
mlir_options += " --arch " + std::string(dev_name) + " --num_cu " +
std::to_string(device.get_cu_count()); // ???
// Conv spec
mlir_options +=
" --operation "
"conv2d"
" --batchsize " +
std::to_string(conv.group) + " --groupsize " + std::to_string(1) + " --padding_h " +
std::to_string(conv.padding[0]) + " --padding_w " + std::to_string(conv.padding[1]) +
" --conv_stride_h " + std::to_string(conv.stride[0]) + " --conv_stride_w " +
std::to_string(conv.stride[1]) + " --dilation_h " + std::to_string(conv.dilation[0]) +
" --dilation_w " + std::to_string(conv.dilation[1]);
// Input spec
mlir_options += " --in_layout "
"NCHWG"
" --in_type " +
std::string(inp_t_s) + " --in_channels " + std::to_string(inp_t.lens()[1]) +
" --in_h " + std::to_string(inp_t.lens()[2]) + " --in_w " +
std::to_string(inp_t.lens()[3]);
// Filter spec
mlir_options += " --fil_layout "
"NCHWG"
" --fil_type " +
std::string(flt_t_s) + " --fil_h " + std::to_string(flt_t.lens()[2]) +
" --fil_w " + std::to_string(flt_t.lens()[3]);
// Output spec
mlir_options += " --out_layout "
"NCHWG"
" --out_type " +
std::string(out_t_s) + " --out_channels " +
std::to_string(out_t.lens()[1]) + " --out_h " +
std::to_string(out_t.lens()[2]) + " --out_w " +
std::to_string(out_t.lens()[3]);
auto bin_i = binary_map.find(mlir_options);
if(bin_i == binary_map.end())
{
size_t bin_size = 0;
using mlir_handle = MIGRAPHX_MANAGE_PTR(MiirHandle, miirDestroyHandle);
auto handle = mlir_handle(miirCreateHandle(mlir_options.c_str()));
if(miirLowerBin(handle.get()) == MIIR_SUCCESS &&
miirBufferGet(handle.get(), nullptr, &bin_size) == MIIR_SUCCESS)
{
migraphx::value::binary bin(bin_size);
if(miirBufferGet(handle.get(), reinterpret_cast<char*>(bin.data()), &bin_size) ==
MIIR_SUCCESS)
{
size_t global_size;
size_t block_size;
if(miirGetExecutionDims(handle.get(), &global_size, &block_size) ==
MIIR_SUCCESS)
{
result = std::make_shared<execution_spec>(
std::move(bin), global_size, block_size);
}
}
}
binary_map[mlir_options] = result;
}
else
{
result = bin_i->second;
}
#else // MIGRAPHX_MLIR_MIOPEN_SUPPORT
(void)op_r;
#endif // MIGRAPHX_MLIR_MIOPEN_SUPPORT
return result;
}
instruction_ref get_literal(uint64_t value)
{
auto fi = literal_map.find(value);
if(fi != literal_map.end())
return fi->second;
auto lit = mod->add_literal(value);
literal_map.emplace(value, lit);
return lit;
}
operation make_code_object_op(instruction_ref op_r, const std::shared_ptr<execution_spec>& spec)
{
// each pointer is expanded out to a MemRefDescriptor
auto inp_t = op_r->inputs().at(0)->get_shape();
auto flt_t = op_r->inputs().at(1)->get_shape();
auto out_t = op_r->get_shape();
auto i64 = shape(shape::uint64_type);
std::vector<shape> expected_inputs = {
flt_t, flt_t, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, inp_t,
inp_t, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, out_t, out_t,
i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, i64, out_t};
return migraphx::make_op("gpu::code_object",
{
{"code_object", spec->binary},
{"symbol_name", mlir_kernel_name},
{"global", spec->global_size},
{"local", spec->local_size},
{"expected_inputs", migraphx::to_value(expected_inputs)},
{"output", migraphx::to_value(out_t)},
});
}
void add_memref_descriptor(std::vector<instruction_ref>& refs, instruction_ref inst)
{
const size_t offset = 0;
auto inst_t = inst->get_shape();
refs.push_back(inst);
refs.push_back(inst);
refs.push_back(get_literal(offset)); // offset
// dim sizes
std::transform(inst_t.lens().begin(),
inst_t.lens().end(),
std::back_inserter(refs),
[&](const auto& lval) { return get_literal(lval); });
refs.push_back(get_literal(1)); // G
// dim strides
std::transform(inst_t.strides().begin(),
inst_t.strides().end(),
std::back_inserter(refs),
[&](const auto& lval) { return get_literal(lval); });
refs.push_back(get_literal(1)); // G
}
instruction_ref insert_allocation(instruction_ref ins, const shape& s) const
{
return mod->insert_instruction(ins, hip_allocate{s});
}
void replace_conv_op(instruction_ref ins)
{
auto conv_bin = make_mlir_binary(ins);
if(conv_bin)
{
auto conv = make_code_object_op(ins, conv_bin);
auto inp = ins->inputs().at(0);
auto flt = ins->inputs().at(1);
auto out = insert_allocation(ins, ins->get_shape());
std::vector<instruction_ref> refs;
refs.reserve(3 * 13 + 1);
add_memref_descriptor(refs, flt);
add_memref_descriptor(refs, inp);
add_memref_descriptor(refs, out);
refs.push_back(out);
mod->replace_instruction(ins, conv, refs);
}
}
void apply()
{
init();
for(auto it : iterator_for(*mod))
{
if(it->name() == "convolution")
{
replace_conv_op(it);
}
}
}
};
void mlir_conv::apply(module& m) const { mlir_apply{&m, this}.apply(); }
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
#include <migraphx/gpu/sync_device.hpp> #include <migraphx/gpu/sync_device.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/program.hpp> #include <migraphx/program.hpp>
#include <migraphx/instruction.hpp> #include <migraphx/instruction.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/iterator_for.hpp> #include <migraphx/iterator_for.hpp>
namespace migraphx { namespace migraphx {
...@@ -18,7 +18,7 @@ void sync_device::apply(module& p) const ...@@ -18,7 +18,7 @@ void sync_device::apply(module& p) const
return (i->name() == "hip::copy_from_gpu"); return (i->name() == "hip::copy_from_gpu");
})) }))
{ {
auto sync_in = p.insert_instruction(last, hip_sync_device{}, inputs); auto sync_in = p.insert_instruction(last, make_op("hip::sync_stream"), inputs);
if(not inputs.empty()) if(not inputs.empty())
{ {
p.replace_instruction(inputs.front(), sync_in); p.replace_instruction(inputs.front(), sync_in);
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <migraphx/eliminate_common_subexpression.hpp> #include <migraphx/eliminate_common_subexpression.hpp>
#include <migraphx/eliminate_concat.hpp> #include <migraphx/eliminate_concat.hpp>
#include <migraphx/eliminate_contiguous.hpp> #include <migraphx/eliminate_contiguous.hpp>
#include <migraphx/eliminate_data_type.hpp>
#include <migraphx/eliminate_identity.hpp> #include <migraphx/eliminate_identity.hpp>
#include <migraphx/eliminate_pad.hpp> #include <migraphx/eliminate_pad.hpp>
#include <migraphx/memory_coloring.hpp> #include <migraphx/memory_coloring.hpp>
...@@ -26,6 +27,7 @@ ...@@ -26,6 +27,7 @@
#include <migraphx/gpu/eliminate_workspace.hpp> #include <migraphx/gpu/eliminate_workspace.hpp>
#include <migraphx/gpu/fuse_ops.hpp> #include <migraphx/gpu/fuse_ops.hpp>
#include <migraphx/gpu/lowering.hpp> #include <migraphx/gpu/lowering.hpp>
#include <migraphx/gpu/mlir_conv.hpp>
#include <migraphx/gpu/pack_int8_args.hpp> #include <migraphx/gpu/pack_int8_args.hpp>
#include <migraphx/gpu/preallocate_param.hpp> #include <migraphx/gpu/preallocate_param.hpp>
#include <migraphx/gpu/schedule_model.hpp> #include <migraphx/gpu/schedule_model.hpp>
...@@ -42,12 +44,19 @@ MIGRAPHX_DECLARE_ENV_VAR(MIGRAPHX_DISABLE_SCHEDULE_PASS) ...@@ -42,12 +44,19 @@ MIGRAPHX_DECLARE_ENV_VAR(MIGRAPHX_DISABLE_SCHEDULE_PASS)
std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_options& options) const std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_options& options) const
{ {
auto& ctx = any_cast<context>(gctx); auto& ctx = any_cast<context>(gctx);
std::set<shape::type_t> unsupported_types(shape::types().begin(), shape::types().end());
unsupported_types.erase(shape::type_t::float_type);
unsupported_types.erase(shape::type_t::half_type);
unsupported_types.erase(shape::type_t::bool_type);
unsupported_types.erase(shape::type_t::int8_type);
unsupported_types.erase(shape::type_t::uint8_type);
// clang-format off // clang-format off
return return
{ {
normalize_ops{}, normalize_ops{},
decompose{}, decompose{},
dead_code_elimination{}, dead_code_elimination{},
eliminate_data_type{unsupported_types, shape::type_t::float_type},
simplify_reshapes{}, simplify_reshapes{},
eliminate_identity{}, eliminate_identity{},
eliminate_pad{}, eliminate_pad{},
...@@ -67,6 +76,7 @@ std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_opti ...@@ -67,6 +76,7 @@ std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_opti
simplify_reshapes{}, simplify_reshapes{},
propagate_constant{}, propagate_constant{},
dead_code_elimination{}, dead_code_elimination{},
mlir_conv{&ctx},
lowering{&ctx, options.offload_copy}, lowering{&ctx, options.offload_copy},
eliminate_contiguous{"gpu::contiguous"}, eliminate_contiguous{"gpu::contiguous"},
dead_code_elimination{}, dead_code_elimination{},
......
...@@ -25,7 +25,7 @@ target_include_directories(migraphx_tf PRIVATE include) ...@@ -25,7 +25,7 @@ target_include_directories(migraphx_tf PRIVATE include)
set_target_properties(migraphx_tf PROPERTIES EXPORT_NAME tf) set_target_properties(migraphx_tf PROPERTIES EXPORT_NAME tf)
rocm_set_soversion(migraphx_tf ${MIGRAPHX_SO_VERSION}) rocm_set_soversion(migraphx_tf ${MIGRAPHX_SO_VERSION})
rocm_clang_tidy_check(migraphx_tf) rocm_clang_tidy_check(migraphx_tf)
target_link_libraries(migraphx_tf PRIVATE tf-proto) target_link_libraries(migraphx_tf PRIVATE tf-proto "-Wl,--exclude-libs,ALL")
target_link_libraries(migraphx_tf PUBLIC migraphx) target_link_libraries(migraphx_tf PUBLIC migraphx)
rocm_install_targets( rocm_install_targets(
......
#include <migraphx/argument.hpp>
#include <migraphx/literal.hpp>
#include <migraphx/serialize.hpp>
#include <sstream>
#include <string>
#include "test.hpp"
migraphx::argument as_argument(migraphx::argument a) { return a; }
template <class T>
migraphx::argument as_argument(T x)
{
return migraphx::literal{x}.get_argument();
}
template <class... Ts>
migraphx::argument make_tuple(Ts... xs)
{
return migraphx::argument{{as_argument(xs)...}};
}
TEST_CASE(copy_eq)
{
auto a1 = as_argument(3);
auto a2 = as_argument(3);
auto a3 = as_argument(1);
auto a4 = a1; // NOLINT
EXPECT(a1 == a2);
EXPECT(a2 != a3);
EXPECT(a1 == a4);
EXPECT(a4 != a3);
EXPECT(a1.get_sub_objects().empty());
EXPECT(a2.get_sub_objects().empty());
EXPECT(a3.get_sub_objects().empty());
EXPECT(a4.get_sub_objects().empty());
}
TEST_CASE(default_construct)
{
migraphx::argument a1{};
migraphx::argument a2{};
EXPECT(a1.empty());
EXPECT(a2.empty());
EXPECT(a1 == a2);
EXPECT(a1.to_string().empty());
EXPECT(a2.to_string().empty());
EXPECT(a1.get_sub_objects().empty());
EXPECT(a2.get_sub_objects().empty());
}
TEST_CASE(string_elems)
{
migraphx::shape s{migraphx::shape::int64_type, {3}};
migraphx::literal l{s, {1, 2, 3}};
auto a = l.get_argument();
EXPECT(a.to_string() == "1, 2, 3");
}
TEST_CASE(tuple)
{
auto a1 = make_tuple(3, 3.0);
EXPECT(a1.get_shape().type() == migraphx::shape::tuple_type);
EXPECT(a1.get_sub_objects().size() == 2);
EXPECT(a1.get_sub_objects()[0] == as_argument(3));
EXPECT(a1.get_sub_objects()[1] == as_argument(3.0));
auto a2 = make_tuple(3, 3.0);
EXPECT(a1 == a2);
EXPECT(a1.to_string() == a2.to_string());
auto a3 = make_tuple(3, 4.0);
EXPECT(a1 != a3);
EXPECT(a1.to_string() != a3.to_string());
}
TEST_CASE(nested_tuple)
{
auto a1 = make_tuple(3, make_tuple(5, 4));
EXPECT(a1.get_shape().type() == migraphx::shape::tuple_type);
EXPECT(a1.get_sub_objects().size() == 2);
EXPECT(a1.get_sub_objects()[0] == as_argument(3));
EXPECT(a1.get_sub_objects()[1] == make_tuple(5, 4));
auto a2 = make_tuple(3, make_tuple(5, 4));
EXPECT(a1 == a2);
EXPECT(a1.to_string() == a2.to_string());
auto a3 = make_tuple(3, make_tuple(5, 6));
EXPECT(a1 != a3);
EXPECT(a1.to_string() != a3.to_string());
}
TEST_CASE(tuple_visit)
{
auto a1 = make_tuple(3, 3.0);
EXPECT(test::throws([&] { a1.visit([](auto&&) {}); }));
EXPECT(test::throws([&] { a1.at<float>(); }));
bool reaches = false;
a1.visit([&](auto&&) { EXPECT(false); },
[&](auto&& xs) {
reaches = true;
EXPECT(xs.size() == 2);
EXPECT(xs[0] == as_argument(3));
EXPECT(xs[1] == as_argument(3.0));
});
EXPECT(reaches);
}
TEST_CASE(tuple_visit_all)
{
auto a1 = make_tuple(3, 3.0);
auto a2 = make_tuple(1, 2, 3);
EXPECT(test::throws([&] { visit_all(a1, a2)([](auto&&, auto&&) {}); }));
bool reaches = false;
visit_all(a1, a2)([&](auto&&, auto&&) { EXPECT(false); },
[&](auto&& xs, auto&& ys) {
reaches = true;
EXPECT(xs.size() == 2);
EXPECT(xs[0] == as_argument(3));
EXPECT(xs[1] == as_argument(3.0));
EXPECT(ys.size() == 3);
EXPECT(ys[0] == as_argument(1));
EXPECT(ys[1] == as_argument(2));
EXPECT(ys[2] == as_argument(3));
});
EXPECT(reaches);
}
TEST_CASE(value_argument)
{
migraphx::shape s{migraphx::shape::int64_type, {3}};
migraphx::literal l1{s, {1, 2, 3}};
auto a1 = l1.get_argument();
auto v1 = migraphx::to_value(a1);
migraphx::literal l2{1};
auto a2 = l2.get_argument();
auto v2 = migraphx::to_value(a2);
EXPECT(v1 != v2);
auto a3 = migraphx::from_value<migraphx::argument>(v1);
EXPECT(a3 == a1);
auto a4 = migraphx::from_value<migraphx::argument>(v2);
EXPECT(a4 == a2);
}
TEST_CASE(value_tuple)
{
auto a1 = make_tuple(3, 3.0, make_tuple(3, 4));
auto a2 = make_tuple(1, 2, 3);
auto v1 = migraphx::to_value(a1);
auto v2 = migraphx::to_value(a2);
EXPECT(v1 != v2);
auto a3 = migraphx::from_value<migraphx::argument>(v1);
EXPECT(a3 == a1);
auto a4 = migraphx::from_value<migraphx::argument>(v2);
EXPECT(a4 == a2);
}
TEST_CASE(argument_share)
{
migraphx::shape s{migraphx::shape::int64_type, {3}};
std::vector<char> buffer(s.bytes());
migraphx::argument a1(s, [=]() mutable { return buffer.data(); });
auto a2 = a1; // NOLINT
EXPECT(a1.data() != a2.data());
auto a3 = a1.share();
EXPECT(a1.data() != a3.data());
auto a4 = a3; // NOLINT
EXPECT(a4.data() == a3.data());
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
...@@ -2,12 +2,13 @@ ...@@ -2,12 +2,13 @@
#include <migraphx/pass_manager.hpp> #include <migraphx/pass_manager.hpp>
#include <basic_ops.hpp> #include <basic_ops.hpp>
#include <migraphx/make_op.hpp> #include <migraphx/make_op.hpp>
#include <migraphx/ranges.hpp>
#include <test.hpp> #include <test.hpp>
void run_pass(migraphx::program& p) void run_pass(migraphx::program& p)
{ {
migraphx::run_passes(*p.get_main_module(), {migraphx::dead_code_elimination{}}); migraphx::run_passes(p, {migraphx::dead_code_elimination{}});
} }
TEST_CASE(simple_test) TEST_CASE(simple_test)
...@@ -177,4 +178,21 @@ TEST_CASE(duplicate_args3) ...@@ -177,4 +178,21 @@ TEST_CASE(duplicate_args3)
EXPECT(result == migraphx::literal{0}); EXPECT(result == migraphx::literal{0});
} }
TEST_CASE(unused_module)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto* m1 = p.create_module("unused");
auto* m2 = p.create_module("used");
auto l0 = mm->add_literal(0);
m1->add_literal(0);
m2->add_literal(0);
mm->add_instruction(mod_pass_op{}, {l0}, {m2});
EXPECT(migraphx::contains(p.get_modules(), m1));
EXPECT(migraphx::contains(p.get_modules(), m2));
run_pass(p);
EXPECT(migraphx::contains(p.get_modules(), m2));
EXPECT(not migraphx::contains(p.get_modules(), m1));
}
int main(int argc, const char* argv[]) { test::run(argc, argv); } int main(int argc, const char* argv[]) { test::run(argc, argv); }
...@@ -128,8 +128,21 @@ TEST_CASE(dot_add_beta_int) ...@@ -128,8 +128,21 @@ TEST_CASE(dot_add_beta_int)
m1.add_instruction(migraphx::make_op("dot", {{"alpha", 1.0}, {"beta", 0.5}}), x, y, z); m1.add_instruction(migraphx::make_op("dot", {{"alpha", 1.0}, {"beta", 0.5}}), x, y, z);
m1.add_instruction(migraphx::make_op("identity"), dot); m1.add_instruction(migraphx::make_op("identity"), dot);
} }
migraphx::module m2 = m1;
run_pass(m1); run_pass(m1);
migraphx::module m2;
{
auto x = m2.add_parameter("x", migraphx::shape{migraphx::shape::int32_type, {2, 2}});
auto y = m2.add_parameter("y", migraphx::shape{migraphx::shape::int32_type, {2, 2}});
auto z = m2.add_parameter("z", migraphx::shape{migraphx::shape::int32_type, {2, 2}});
auto dot = m2.add_instruction(migraphx::make_op("dot", {{"alpha", 1}, {"beta", 0}}), x, y);
auto beta =
m2.add_literal(migraphx::literal{migraphx::shape{migraphx::shape::int32_type}, {0.5}});
auto beta_broadcast = m2.add_instruction(
migraphx::make_op("multibroadcast", {{"output_lens", {2, 2}}}), beta);
auto mul = m2.add_instruction(migraphx::make_op("mul"), z, beta_broadcast);
auto add = m2.add_instruction(migraphx::make_op("add"), dot, mul);
m2.add_instruction(migraphx::make_op("identity"), add);
}
EXPECT(m1 == m2); EXPECT(m1 == m2);
} }
......
...@@ -89,17 +89,17 @@ struct invert_pass ...@@ -89,17 +89,17 @@ struct invert_pass
{ {
std::string name() const { return "invert_pass"; } std::string name() const { return "invert_pass"; }
void apply(migraphx::module& p) const void apply(migraphx::module& m) const
{ {
for(auto ins : migraphx::iterator_for(p)) for(auto ins : migraphx::iterator_for(m))
{ {
if(ins->name() == "sum") if(ins->name() == "sum")
{ {
p.replace_instruction(ins, minus_op{}, ins->inputs()); m.replace_instruction(ins, minus_op{}, ins->inputs());
} }
else if(ins->name() == "minus") else if(ins->name() == "minus")
{ {
p.replace_instruction(ins, sum_op{}, ins->inputs()); m.replace_instruction(ins, sum_op{}, ins->inputs());
} }
} }
} }
......
...@@ -14,7 +14,7 @@ const std::string write_2s = R"__migraphx__( ...@@ -14,7 +14,7 @@ const std::string write_2s = R"__migraphx__(
#include <hip/hip_runtime.h> #include <hip/hip_runtime.h>
extern "C" { extern "C" {
__global__ void write(int* data) __global__ void write(int8_t* data)
{ {
int num = threadIdx.x + blockDim.x * blockIdx.x; int num = threadIdx.x + blockDim.x * blockIdx.x;
data[num] = 2; data[num] = 2;
...@@ -31,7 +31,7 @@ const std::string add_2s_binary = R"__migraphx__( ...@@ -31,7 +31,7 @@ const std::string add_2s_binary = R"__migraphx__(
#include <hip/hip_runtime.h> #include <hip/hip_runtime.h>
extern "C" { extern "C" {
__global__ void add_2(std::int32_t* x, std::int32_t* y) __global__ void add_2(std::int8_t* x, std::int8_t* y)
{ {
int num = threadIdx.x + blockDim.x * blockIdx.x; int num = threadIdx.x + blockDim.x * blockIdx.x;
y[num] = x[num] + 2; y[num] = x[num] + 2;
...@@ -89,14 +89,14 @@ TEST_CASE(simple_compile_hip) ...@@ -89,14 +89,14 @@ TEST_CASE(simple_compile_hip)
{make_src_file("main.cpp", write_2s)}, "", get_device_name()); {make_src_file("main.cpp", write_2s)}, "", get_device_name());
EXPECT(binaries.size() == 1); EXPECT(binaries.size() == 1);
migraphx::argument input{{migraphx::shape::int32_type, {5}}}; migraphx::argument input{{migraphx::shape::int8_type, {5}}};
auto ginput = migraphx::gpu::to_gpu(input); auto ginput = migraphx::gpu::to_gpu(input);
migraphx::gpu::kernel k{binaries.front(), "write"}; migraphx::gpu::kernel k{binaries.front(), "write"};
k.launch(nullptr, input.get_shape().elements(), 1024)(ginput.cast<int>()); k.launch(nullptr, input.get_shape().elements(), 1024)(ginput.cast<std::int8_t>());
auto output = migraphx::gpu::from_gpu(ginput); auto output = migraphx::gpu::from_gpu(ginput);
EXPECT(output != input); EXPECT(output != input);
auto data = output.get<int>(); auto data = output.get<std::int8_t>();
EXPECT(migraphx::all_of(data, [](auto x) { return x == 2; })); EXPECT(migraphx::all_of(data, [](auto x) { return x == 2; }));
} }
...@@ -106,7 +106,7 @@ TEST_CASE(code_object_hip) ...@@ -106,7 +106,7 @@ TEST_CASE(code_object_hip)
{make_src_file("main.cpp", add_2s_binary)}, "", get_device_name()); {make_src_file("main.cpp", add_2s_binary)}, "", get_device_name());
EXPECT(binaries.size() == 1); EXPECT(binaries.size() == 1);
migraphx::shape input{migraphx::shape::int32_type, {5}}; migraphx::shape input{migraphx::shape::int8_type, {5}};
std::vector<migraphx::shape> expected_inputs = {input, input}; std::vector<migraphx::shape> expected_inputs = {input, input};
auto co = migraphx::make_op("gpu::code_object", auto co = migraphx::make_op("gpu::code_object",
......
...@@ -135,21 +135,4 @@ TEST_CASE(value_literal) ...@@ -135,21 +135,4 @@ TEST_CASE(value_literal)
EXPECT(l4 == l2); EXPECT(l4 == l2);
} }
TEST_CASE(value_argument)
{
migraphx::shape s{migraphx::shape::int64_type, {3}};
migraphx::literal l1{s, {1, 2, 3}};
auto a1 = l1.get_argument();
auto v1 = migraphx::to_value(a1);
migraphx::literal l2{1};
auto a2 = l2.get_argument();
auto v2 = migraphx::to_value(a2);
EXPECT(v1 != v2);
auto a3 = migraphx::from_value<migraphx::argument>(v1);
EXPECT(a3 == a1);
auto a4 = migraphx::from_value<migraphx::argument>(v2);
EXPECT(a4 == a2);
}
int main(int argc, const char* argv[]) { test::run(argc, argv); } int main(int argc, const char* argv[]) { test::run(argc, argv); }
...@@ -2642,6 +2642,23 @@ def pow_i64_fp32_test(): ...@@ -2642,6 +2642,23 @@ def pow_i64_fp32_test():
return ([node], [arg0, arg1], [arg_out]) return ([node], [arg0, arg1], [arg_out])
@onnx_test
def prefix_scan_sum_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 2, 2])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 2, 2])
axis_val = np.array([0])
axis_tensor = helper.make_tensor(name="axis",
data_type=TensorProto.INT32,
dims=axis_val.shape,
vals=axis_val.astype(int))
node = onnx.helper.make_node('CumSum',
inputs=['x', 'axis'],
outputs=['y'],
exclusive=1,
reverse=1)
return ([node], [x], [y], [axis_tensor])
@onnx_test @onnx_test
def prelu_brcst_test(): def prelu_brcst_test():
arg0 = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 4, 5]) arg0 = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 4, 5])
......
...@@ -2278,6 +2278,21 @@ TEST_CASE(pow_i64_fp32_test) ...@@ -2278,6 +2278,21 @@ TEST_CASE(pow_i64_fp32_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(prefix_scan_sum)
{
migraphx::program p;
auto* mm = p.get_main_module();
mm->add_literal({migraphx::shape{migraphx::shape::int32_type, {1}, {1}}, {0}});
auto l0 = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {2, 2, 2}});
auto ret = mm->add_instruction(
migraphx::make_op("prefix_scan_sum", {{"axis", 0}, {"exclusive", true}, {"reverse", true}}),
l0);
mm->add_return({ret});
auto prog = migraphx::parse_onnx("prefix_scan_sum_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(prelu_brcst_test) TEST_CASE(prelu_brcst_test)
{ {
migraphx::program p; migraphx::program p;
......
...@@ -1532,4 +1532,21 @@ TEST_CASE(lstm) ...@@ -1532,4 +1532,21 @@ TEST_CASE(lstm)
} }
} }
TEST_CASE(prefix_scan_sum)
{
{
migraphx::shape s{migraphx::shape::float_type, {1, 2, 3}};
throws_shape(
migraphx::make_op("prefix_scan_sum", {{"axis", 3}, {"exclusive", 0}, {"reverse", 0}}),
s);
}
{
migraphx::shape s{migraphx::shape::float_type, {1, 2}};
throws_shape(
migraphx::make_op("prefix_scan_sum", {{"axis", -3}, {"exclusive", 0}, {"reverse", 0}}),
s);
}
}
int main(int argc, const char* argv[]) { test::run(argc, argv); } int main(int argc, const char* argv[]) { test::run(argc, argv); }
...@@ -72,6 +72,43 @@ TEST_CASE(make_op_invalid_key) ...@@ -72,6 +72,43 @@ TEST_CASE(make_op_invalid_key)
EXPECT(test::throws([] { migraphx::make_op("convolution", {{"paddings", {1, 1}}}); })); EXPECT(test::throws([] { migraphx::make_op("convolution", {{"paddings", {1, 1}}}); }));
} }
TEST_CASE(load_offset)
{
migraphx::shape s{migraphx::shape::float_type, {4}};
migraphx::shape bs{migraphx::shape::int8_type, {32}};
auto op = migraphx::make_op("load", {{"offset", 4}, {"shape", migraphx::to_value(s)}});
EXPECT(op.compute_shape({bs}) == s);
migraphx::argument a{bs};
EXPECT(op.compute(bs, {a}).data() == a.data() + 4);
}
TEST_CASE(load_out_of_bounds)
{
migraphx::shape s{migraphx::shape::float_type, {4}};
migraphx::shape bs{migraphx::shape::int8_type, {16}};
auto op = migraphx::make_op("load", {{"offset", 4}, {"shape", migraphx::to_value(s)}});
migraphx::argument a{bs};
EXPECT(test::throws([&] { op.compute(bs, {a}); }));
}
TEST_CASE(load_tuple)
{
migraphx::shape s{{migraphx::shape{migraphx::shape::int8_type, {3}},
migraphx::shape{migraphx::shape::float_type, {4}}}};
migraphx::shape bs{migraphx::shape::int8_type, {32}};
auto op = migraphx::make_op("load", {{"offset", 4}, {"shape", migraphx::to_value(s)}});
EXPECT(op.compute_shape({bs}) == s);
migraphx::argument a{bs};
auto r = op.compute(bs, {a});
EXPECT(r.get_sub_objects().size() == 2);
auto* start = a.data() + 4;
EXPECT(r.get_sub_objects()[0].data() == start + 16);
EXPECT(r.get_sub_objects()[1].data() == start);
}
TEST_CASE(ops) TEST_CASE(ops)
{ {
auto names = migraphx::get_operators(); auto names = migraphx::get_operators();
......
...@@ -2727,6 +2727,371 @@ TEST_CASE(pow_test) ...@@ -2727,6 +2727,371 @@ TEST_CASE(pow_test)
EXPECT(migraphx::verify_range(results_vector, gold)); EXPECT(migraphx::verify_range(results_vector, gold));
} }
TEST_CASE(prefix_scan_sum_1d)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {6}};
auto input = migraphx::literal{s, {1, 2, 3, 4, 5, 6}};
auto l0 = mm->add_literal(input);
mm->add_instruction(migraphx::make_op("prefix_scan_sum", {{"axis", 0}, {"exclusive", false}}),
l0);
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{1.0, 3.0, 6.0, 10.0, 15.0, 21.0};
EXPECT(results_vector == gold);
}
TEST_CASE(prefix_scan_sum_2d)
{
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {3, 3}};
auto input = migraphx::literal{s, {1, 2, 3, 1, 2, 3, 1, 2, 3}};
auto l0 = mm->add_literal(input);
mm->add_instruction(
migraphx::make_op("prefix_scan_sum", {{"axis", 0}, {"exclusive", false}}), l0);
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{1.0, 2.0, 3.0, 2.0, 4.0, 6.0, 3.0, 6.0, 9.0};
EXPECT(results_vector == gold);
}
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {3, 3}};
auto input = migraphx::literal{s, {1, 2, 3, 1, 2, 3, 1, 2, 3}};
auto l0 = mm->add_literal(input);
mm->add_instruction(
migraphx::make_op("prefix_scan_sum", {{"axis", 1}, {"exclusive", false}}), l0);
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{1.0, 3.0, 6.0, 1.0, 3.0, 6.0, 1.0, 3.0, 6.0};
EXPECT(results_vector == gold);
}
}
TEST_CASE(prefix_scan_sum_3d)
{
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {2, 3, 3}};
auto input = migraphx::literal{s, {1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3}};
auto l0 = mm->add_literal(input);
mm->add_instruction(
migraphx::make_op("prefix_scan_sum", {{"axis", 0}, {"exclusive", false}}), l0);
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{1.0,
2.0,
3.0,
1.0,
2.0,
3.0,
1.0,
2.0,
3.0,
2.0,
4.0,
6.0,
2.0,
4.0,
6.0,
2.0,
4.0,
6.0};
EXPECT(results_vector == gold);
}
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {2, 3, 3}};
auto input = migraphx::literal{s, {1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3}};
auto l0 = mm->add_literal(input);
mm->add_instruction(
migraphx::make_op("prefix_scan_sum", {{"axis", 1}, {"exclusive", false}}), l0);
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{1.0,
2.0,
3.0,
2.0,
4.0,
6.0,
3.0,
6.0,
9.0,
1.0,
2.0,
3.0,
2.0,
4.0,
6.0,
3.0,
6.0,
9.0};
EXPECT(results_vector == gold);
}
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {2, 3, 3}};
auto input = migraphx::literal{s, {1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3}};
auto l0 = mm->add_literal(input);
mm->add_instruction(
migraphx::make_op("prefix_scan_sum", {{"axis", 2}, {"exclusive", false}}), l0);
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{1.0,
3.0,
6.0,
1.0,
3.0,
6.0,
1.0,
3.0,
6.0,
1.0,
3.0,
6.0,
1.0,
3.0,
6.0,
1.0,
3.0,
6.0};
EXPECT(results_vector == gold);
}
}
TEST_CASE(prefix_scan_sum_exclusive)
{
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {8}};
auto input = migraphx::literal{s, {1, 2, 3, 4, 1, 2, 3, 4}};
auto l0 = mm->add_literal(input);
mm->add_instruction(
migraphx::make_op("prefix_scan_sum", {{"axis", 0}, {"exclusive", true}}), l0);
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{0.0, 1.0, 3.0, 6.0, 10.0, 11.0, 13.0, 16.0};
EXPECT(results_vector == gold);
}
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {2, 3, 3}};
auto input = migraphx::literal{s, {1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3}};
auto l0 = mm->add_literal(input);
mm->add_instruction(
migraphx::make_op("prefix_scan_sum", {{"axis", 1}, {"exclusive", true}}), l0);
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{0.0,
0.0,
0.0,
1.0,
2.0,
3.0,
2.0,
4.0,
6.0,
0.0,
0.0,
0.0,
1.0,
2.0,
3.0,
2.0,
4.0,
6.0};
EXPECT(results_vector == gold);
}
}
TEST_CASE(prefix_scan_sum_exclusive_reverse)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {6}};
auto input = migraphx::literal{s, {1, 2, 3, 4, 5, 6}};
auto l0 = mm->add_literal(input);
mm->add_instruction(
migraphx::make_op("prefix_scan_sum", {{"axis", 0}, {"exclusive", true}, {"reverse", true}}),
l0);
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{20.0, 18.0, 15.0, 11.0, 6.0, 0.0};
EXPECT(results_vector == gold);
}
TEST_CASE(prefix_scan_sum_negative_axis)
{
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {2, 3, 3}};
auto input = migraphx::literal{s, {1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3}};
auto l0 = mm->add_literal(input);
mm->add_instruction(
migraphx::make_op("prefix_scan_sum", {{"axis", -3}, {"exclusive", false}}), l0);
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{1.0,
2.0,
3.0,
1.0,
2.0,
3.0,
1.0,
2.0,
3.0,
2.0,
4.0,
6.0,
2.0,
4.0,
6.0,
2.0,
4.0,
6.0};
EXPECT(results_vector == gold);
}
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {2, 3, 3}};
auto input = migraphx::literal{s, {1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3}};
auto l0 = mm->add_literal(input);
mm->add_instruction(
migraphx::make_op("prefix_scan_sum", {{"axis", -2}, {"exclusive", false}}), l0);
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{1.0,
2.0,
3.0,
2.0,
4.0,
6.0,
3.0,
6.0,
9.0,
1.0,
2.0,
3.0,
2.0,
4.0,
6.0,
3.0,
6.0,
9.0};
EXPECT(results_vector == gold);
}
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {2, 3, 3}};
auto input = migraphx::literal{s, {1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3}};
auto l0 = mm->add_literal(input);
mm->add_instruction(
migraphx::make_op("prefix_scan_sum", {{"axis", -1}, {"exclusive", false}}), l0);
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{1.0,
3.0,
6.0,
1.0,
3.0,
6.0,
1.0,
3.0,
6.0,
1.0,
3.0,
6.0,
1.0,
3.0,
6.0,
1.0,
3.0,
6.0};
EXPECT(results_vector == gold);
}
}
TEST_CASE(prefix_scan_sum_reverse)
{
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {8}};
auto input = migraphx::literal{s, {1, 2, 3, 4, 1, 2, 3, 4}};
auto l0 = mm->add_literal(input);
mm->add_instruction(
migraphx::make_op("prefix_scan_sum",
{{"axis", 0}, {"exclusive", false}, {"reverse", true}}),
l0);
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{20.0, 19.0, 17.0, 14.0, 10.0, 9.0, 7.0, 4.0};
EXPECT(results_vector == gold);
}
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {2, 2, 2}};
auto input = migraphx::literal{s, {1, 2, 3, 4, 1, 2, 3, 4}};
auto l0 = mm->add_literal(input);
mm->add_instruction(
migraphx::make_op("prefix_scan_sum",
{{"axis", 0}, {"exclusive", false}, {"reverse", true}}),
l0);
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{2.0, 4.0, 6.0, 8.0, 1.0, 2.0, 3.0, 4.0};
EXPECT(results_vector == gold);
}
}
TEST_CASE(prelu_test) TEST_CASE(prelu_test)
{ {
migraphx::program p; migraphx::program p;
......
#include <migraphx/shape.hpp> #include <migraphx/shape.hpp>
#include <migraphx/serialize.hpp> #include <migraphx/serialize.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/permutation.hpp> #include <migraphx/permutation.hpp>
#include <migraphx/stringutils.hpp> #include <migraphx/stringutils.hpp>
#include <array> #include <array>
...@@ -388,6 +389,74 @@ TEST_CASE(test_serialize) ...@@ -388,6 +389,74 @@ TEST_CASE(test_serialize)
EXPECT(s3 != s4); EXPECT(s3 != s4);
} }
TEST_CASE(tuple)
{
migraphx::shape s{{migraphx::shape{migraphx::shape::float_type},
migraphx::shape{migraphx::shape::int8_type}}};
EXPECT(s.type() == migraphx::shape::tuple_type);
EXPECT(s.bytes() == 4 + 1);
EXPECT(s.type_size() == 0);
EXPECT(s.type_string() == "tuple_type");
EXPECT(s.lens().empty());
EXPECT(s.strides().empty());
EXPECT(not s.standard());
EXPECT(not s.packed());
EXPECT(not s.broadcasted());
EXPECT(not s.transposed());
EXPECT(not s.scalar());
EXPECT(s.sub_shapes().size() == 2);
EXPECT(s.sub_shapes()[0].type() == migraphx::shape::float_type);
EXPECT(s.sub_shapes()[0].elements() == 1);
EXPECT(s.sub_shapes()[1].type() == migraphx::shape::int8_type);
EXPECT(s.sub_shapes()[1].elements() == 1);
EXPECT(test::throws([&] { s.visit_type([](auto) {}); }));
}
TEST_CASE(tuple_copy)
{
migraphx::shape s1{{migraphx::shape{migraphx::shape::float_type},
migraphx::shape{migraphx::shape::int8_type}}};
migraphx::shape s2{{migraphx::shape{migraphx::shape::float_type},
migraphx::shape{migraphx::shape::int8_type}}};
EXPECT(s1 == s2);
auto s3 = s1;
EXPECT(s3 == s1);
EXPECT(s3 == s2);
migraphx::shape s4{{migraphx::shape{migraphx::shape::int8_type},
migraphx::shape{migraphx::shape::float_type}}};
EXPECT(s4 != s1);
EXPECT(s4 != s2);
EXPECT(s4 != s3);
}
TEST_CASE(tuple_print)
{
migraphx::shape s{{migraphx::shape{migraphx::shape::float_type},
migraphx::shape{migraphx::shape::int8_type}}};
std::string x = migraphx::to_string(s);
EXPECT(x.front() == '[');
EXPECT(x.back() == ']');
EXPECT(migraphx::contains(x, "float"));
EXPECT(migraphx::contains(x, "int8"));
}
TEST_CASE(tuple_serialize)
{
migraphx::shape s1{{migraphx::shape{migraphx::shape::float_type},
migraphx::shape{migraphx::shape::int8_type}}};
migraphx::shape s2{{migraphx::shape{migraphx::shape::int8_type},
migraphx::shape{migraphx::shape::float_type}}};
auto v1 = migraphx::to_value(s1);
auto v2 = migraphx::to_value(s2);
EXPECT(v1 != v2);
auto s3 = migraphx::from_value<migraphx::shape>(v1);
EXPECT(s3 == s1);
auto s4 = migraphx::from_value<migraphx::shape>(v2);
EXPECT(s4 == s2);
EXPECT(s3 != s4);
}
TEST_CASE(test_with_lens1) TEST_CASE(test_with_lens1)
{ {
migraphx::shape s1{migraphx::shape::float_type, {2, 2}, {1, 2}}; migraphx::shape s1{migraphx::shape::float_type, {2, 2}, {1, 2}};
...@@ -531,4 +600,12 @@ TEST_CASE(test_with_lens_ambigous13) ...@@ -531,4 +600,12 @@ TEST_CASE(test_with_lens_ambigous13)
EXPECT(s2 == s3); EXPECT(s2 == s3);
} }
TEST_CASE(cpp_type_name)
{
EXPECT(migraphx::shape::cpp_type(migraphx::shape::int8_type) == "int8_t");
EXPECT(migraphx::shape::cpp_type(migraphx::shape::float_type) == "float");
EXPECT(migraphx::shape::cpp_type(migraphx::shape::half_type) == "half");
EXPECT(test::throws([&] { migraphx::shape::cpp_type(migraphx::shape::tuple_type); }));
}
int main(int argc, const char* argv[]) { test::run(argc, argv); } int main(int argc, const char* argv[]) { test::run(argc, argv); }
...@@ -37,7 +37,8 @@ inline void compile_check(migraphx::program& p, const migraphx::target& t, bool ...@@ -37,7 +37,8 @@ inline void compile_check(migraphx::program& p, const migraphx::target& t, bool
auto shapes = p.get_output_shapes(); auto shapes = p.get_output_shapes();
std::stringstream ss; std::stringstream ss;
migraphx::compile_options options; migraphx::compile_options options;
options.trace = migraphx::tracer{ss}; if(show_trace)
options.trace = migraphx::tracer{std::cout};
p.compile(t, options); p.compile(t, options);
if(shapes.size() != p.get_output_shapes().size()) if(shapes.size() != p.get_output_shapes().size())
{ {
...@@ -55,11 +56,6 @@ inline void compile_check(migraphx::program& p, const migraphx::target& t, bool ...@@ -55,11 +56,6 @@ inline void compile_check(migraphx::program& p, const migraphx::target& t, bool
throw std::runtime_error("Compiling program with " + name + " alters its shape"); throw std::runtime_error("Compiling program with " + name + " alters its shape");
} }
} }
if(show_trace)
{
std::cout << ss.str() << std::endl;
}
} }
target_info run_verify::get_target_info(const std::string& name) const target_info run_verify::get_target_info(const std::string& name) const
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment