Commit f7f61d7a authored by Paul's avatar Paul
Browse files

Fix bug when appending module

parent df032e06
#include <migraphx/gpu/mlir.hpp>
#ifdef MIGRAPHX_MLIR
#include <mlir-c/IR.h>
#include <mlir-c/BuiltinAttributes.h>
#include <mlir-c/BuiltinTypes.h>
......@@ -8,6 +9,7 @@
#include <mlir-c/Dialect/MIGraphX.h>
#include <mlir-c/IntegerSet.h>
#include <mlir-c/Registration.h>
#endif
#include <migraphx/manage_ptr.hpp>
#include <migraphx/module.hpp>
......@@ -22,6 +24,7 @@ namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
namespace gpu {
#ifdef MIGRAPHX_MLIR
template <class T, class F, F f> // NOLINT
struct mlir_handle
{
......@@ -355,7 +358,7 @@ struct mlir_program
std::vector<MlirValue> result;
mlir_operation op = ops.create_operation();
auto weak_op = op.get();
mlirBlockInsertOwnedOperation(body, 0, op.release());
mlirBlockAppendOwnedOperation(body, op.release());
auto n = mlirOperationGetNumResults(weak_op);
result.reserve(n);
......@@ -432,6 +435,15 @@ std::string dump_mlir(const module& m)
return mlir_print(&mlirOperationPrint, mod_op);
}
#else
std::string dump_mlir(const module&)
{
return {};
}
#endif
} // namespace gpu
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
......@@ -23,9 +23,9 @@
#include <functional>
#include <algorithm>
#ifdef MIGRAPHX_MLIR_MIOPEN_SUPPORT
#ifdef MIGRAPHX_MLIR
#include <Miir.h>
#endif // MIGRAPHX_MLIR_MIOPEN_SUPPORT
#endif // MIGRAPHX_MLIR
#include <cstdio>
......@@ -72,7 +72,7 @@ struct mlir_apply
{
std::shared_ptr<execution_spec> result;
#ifdef MIGRAPHX_MLIR_MIOPEN_SUPPORT
#ifdef MIGRAPHX_MLIR
auto conv = any_cast<op::convolution>(op_r->get_operator());
auto inp_t = op_r->inputs().at(0)->get_shape();
auto flt_t = op_r->inputs().at(1)->get_shape();
......@@ -181,9 +181,9 @@ struct mlir_apply
{
result = bin_i->second;
}
#else // MIGRAPHX_MLIR_MIOPEN_SUPPORT
#else // MIGRAPHX_MLIR
(void)op_r;
#endif // MIGRAPHX_MLIR_MIOPEN_SUPPORT
#endif // MIGRAPHX_MLIR
return result;
}
......
......@@ -43,6 +43,34 @@ si64]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
auto w = m.add_parameter("w", {migraphx::shape::float_type, {2, 8, 3, 3}});
m.add_instruction(migraphx::make_op("convolution"), x, w);
auto s = migraphx::gpu::dump_mlir(m);
// Skip test if MLIR is not enabled
if (s.empty())
return;
EXPECT(encode(s) == encode(mlir_output));
}
TEST_CASE(conv_add_relu)
{
const std::string mlir_output = R"__migraphx__(
module {
func @main(%arg0: tensor<1x8x4x4xf32>, %arg1: tensor<2x8x3x3xf32>, %arg2: tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32> {
%0 = "migraphx.convolution"(%arg0, %arg1) {dilation = [1 : si64, 1 : si64], group = 1 : si64, padding = [0 : si64, 0 : si64], padding_mode = 0 : si64, stride = [1 : si64, 1 : si64]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
%1 = "migraphx.add"(%0, %arg2) : (tensor<1x2x2x2xf32>, tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
%2 = "migraphx.relu"(%1) : (tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
}
}
)__migraphx__";
migraphx::module m;
auto x = m.add_parameter("x", {migraphx::shape::float_type, {1, 8, 4, 4}});
auto w = m.add_parameter("w", {migraphx::shape::float_type, {2, 8, 3, 3}});
auto b = m.add_parameter("b", {migraphx::shape::float_type, {1, 2, 2, 2}});
auto conv = m.add_instruction(migraphx::make_op("convolution"), x, w);
auto add = m.add_instruction(migraphx::make_op("add"), conv, b);
m.add_instruction(migraphx::make_op("relu"), add);
auto s = migraphx::gpu::dump_mlir(m);
// Skip test if MLIR is not enabled
if (s.empty())
return;
EXPECT(encode(s) == encode(mlir_output));
}
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment