"...lm-evaluation-harness.git" did not exist on "4c22a521f0ae648e695f9847fbc1e2a8a3a18897"
Commit 31065c7d authored by charlie's avatar charlie
Browse files

Merge branch 'dyn_squeeze' of github.com:ROCmSoftwarePlatform/AMDMIGraphX into dyn_model_test

parents 6bec381f 6acbd4e4
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
*/ */
#include <migraphx/tf/op_parser.hpp> #include <migraphx/tf/op_parser.hpp>
#include <migraphx/tf/tf_parser.hpp> #include <migraphx/tf/tf_parser.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/ranges.hpp> #include <migraphx/ranges.hpp>
#include <migraphx/make_op.hpp> #include <migraphx/make_op.hpp>
...@@ -38,16 +39,37 @@ struct parse_batchnorm : op_parser<parse_batchnorm> ...@@ -38,16 +39,37 @@ struct parse_batchnorm : op_parser<parse_batchnorm>
instruction_ref parse(const op_desc& /*opd*/, instruction_ref parse(const op_desc& /*opd*/,
const tf_parser& /*parser*/, const tf_parser& /*parser*/,
tf_parser::node_info info, tf_parser::node_info info,
const std::vector<instruction_ref>& args) const std::vector<instruction_ref> args) const
{ {
float epsilon = 1e-5f; // different default epsilon than from ONNX
float momentum = 0.9f; float epsilon = 1e-4f;
if(contains(info.attributes, "epsilon")) if(contains(info.attributes, "epsilon"))
{ {
epsilon = info.attributes.at("epsilon").f(); epsilon = info.attributes.at("epsilon").f();
} }
auto op = make_op("batch_norm_inference", {{"epsilon", epsilon}, {"momentum", momentum}});
return info.add_instruction(op, args); auto x_lens = args[0]->get_shape().lens();
auto x_type = args[0]->get_shape().type();
// unsqueeze tensors of shape (C) to broadcast correctly
auto rt = info.add_literal(migraphx::literal{migraphx::shape{x_type}, {0.5}});
auto eps = info.add_literal(migraphx::literal{migraphx::shape{x_type}, {epsilon}});
auto scale_unsqueeze =
info.add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), args[1]);
auto bias_unsqueeze =
info.add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), args[2]);
auto mean_unsqueeze =
info.add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), args[3]);
auto var_unsqueeze =
info.add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), args[4]);
auto numer = info.add_broadcastable_binary_op("sub", args[0], mean_unsqueeze);
auto var_eps = info.add_broadcastable_binary_op("add", var_unsqueeze, eps);
auto denom = info.add_broadcastable_binary_op("pow", var_eps, rt);
auto div0 = info.add_broadcastable_binary_op("div", numer, denom);
auto r0 = info.add_broadcastable_binary_op("mul", div0, scale_unsqueeze);
return info.add_broadcastable_binary_op("add", r0, bias_unsqueeze);
} }
}; };
......
...@@ -75,7 +75,6 @@ struct parse_conv : op_parser<parse_conv> ...@@ -75,7 +75,6 @@ struct parse_conv : op_parser<parse_conv>
const std::string& pad_mode = info.attributes.at("padding").s(); const std::string& pad_mode = info.attributes.at("padding").s();
if(pad_mode.find("SAME") != std::string::npos) if(pad_mode.find("SAME") != std::string::npos)
{ {
op.padding_mode = op::padding_mode_t::same;
std::vector<size_t> weight_dims = weights->get_shape().lens(); std::vector<size_t> weight_dims = weights->get_shape().lens();
size_t weight_h = weight_dims[2]; size_t weight_h = weight_dims[2];
size_t weight_w = weight_dims[3]; size_t weight_w = weight_dims[3];
...@@ -87,10 +86,6 @@ struct parse_conv : op_parser<parse_conv> ...@@ -87,10 +86,6 @@ struct parse_conv : op_parser<parse_conv>
op.padding = std::vector<size_t>(pads.begin(), pads.end()); op.padding = std::vector<size_t>(pads.begin(), pads.end());
} }
else if(pad_mode.find("VALID") != std::string::npos)
{
op.padding_mode = op::padding_mode_t::valid;
}
else if(pad_mode.find("EXPLICIT") != std::string::npos) else if(pad_mode.find("EXPLICIT") != std::string::npos)
{ {
std::vector<size_t> padding; std::vector<size_t> padding;
...@@ -100,7 +95,7 @@ struct parse_conv : op_parser<parse_conv> ...@@ -100,7 +95,7 @@ struct parse_conv : op_parser<parse_conv>
{ {
MIGRAPHX_THROW("padding should have 4 values"); MIGRAPHX_THROW("padding should have 4 values");
} }
if(padding[0] != padding[2] || padding[1] != padding[3]) if(padding[0] != padding[2] or padding[1] != padding[3])
{ {
MIGRAPHX_THROW("migraphx does not support asymetric padding"); MIGRAPHX_THROW("migraphx does not support asymetric padding");
} }
......
...@@ -80,7 +80,6 @@ struct parse_depthwiseconv : op_parser<parse_depthwiseconv> ...@@ -80,7 +80,6 @@ struct parse_depthwiseconv : op_parser<parse_depthwiseconv>
if(pad_mode.find("SAME") != std::string::npos) if(pad_mode.find("SAME") != std::string::npos)
{ {
op.padding_mode = op::padding_mode_t::same;
std::vector<size_t> weight_dims = weights->get_shape().lens(); std::vector<size_t> weight_dims = weights->get_shape().lens();
size_t weight_h = weight_dims[2]; size_t weight_h = weight_dims[2];
size_t weight_w = weight_dims[3]; size_t weight_w = weight_dims[3];
...@@ -90,7 +89,7 @@ struct parse_depthwiseconv : op_parser<parse_depthwiseconv> ...@@ -90,7 +89,7 @@ struct parse_depthwiseconv : op_parser<parse_depthwiseconv>
calculate_padding(0, pads, input_dims[2], op.stride[0], op.dilation[0], weight_h); calculate_padding(0, pads, input_dims[2], op.stride[0], op.dilation[0], weight_h);
calculate_padding(1, pads, input_dims[3], op.stride[1], op.dilation[1], weight_w); calculate_padding(1, pads, input_dims[3], op.stride[1], op.dilation[1], weight_w);
if(pads[0] != pads[2] || pads[1] != pads[3]) if(pads[0] != pads[2] or pads[1] != pads[3])
{ {
std::vector<int64_t> padding = {0, 0, pads[0], pads[1], 0, 0, pads[2], pads[3]}; std::vector<int64_t> padding = {0, 0, pads[0], pads[1], 0, 0, pads[2], pads[3]};
l0 = info.add_instruction(migraphx::make_op("pad", {{"pads", padding}}), l0); l0 = info.add_instruction(migraphx::make_op("pad", {{"pads", padding}}), l0);
...@@ -101,10 +100,6 @@ struct parse_depthwiseconv : op_parser<parse_depthwiseconv> ...@@ -101,10 +100,6 @@ struct parse_depthwiseconv : op_parser<parse_depthwiseconv>
op.padding[1] = pads[1]; op.padding[1] = pads[1];
} }
} }
else if(pad_mode.find("VALID") != std::string::npos)
{
op.padding_mode = op::padding_mode_t::valid;
}
} }
std::vector<int64_t> new_weights_shape; std::vector<int64_t> new_weights_shape;
......
...@@ -42,7 +42,7 @@ struct parse_pooling : op_parser<parse_pooling> ...@@ -42,7 +42,7 @@ struct parse_pooling : op_parser<parse_pooling>
tf_parser::node_info info, tf_parser::node_info info,
std::vector<instruction_ref> args) const std::vector<instruction_ref> args) const
{ {
if(!starts_with(opd.tf_name, "Max") && !starts_with(opd.tf_name, "Av")) if(not starts_with(opd.tf_name, "Max") and not starts_with(opd.tf_name, "Av"))
{ {
MIGRAPHX_THROW("tf pooling mode must be Max or Average"); MIGRAPHX_THROW("tf pooling mode must be Max or Average");
} }
......
...@@ -41,8 +41,9 @@ struct parse_relu6 : op_parser<parse_relu6> ...@@ -41,8 +41,9 @@ struct parse_relu6 : op_parser<parse_relu6>
const tf_parser::node_info& info, const tf_parser::node_info& info,
std::vector<instruction_ref> args) const std::vector<instruction_ref> args) const
{ {
auto min_val = info.add_literal(0.0f); shape::type_t output_type = args[0]->get_shape().type();
auto max_val = info.add_literal(6.0f); auto min_val = info.add_literal(migraphx::literal{migraphx::shape{output_type}, {0.0f}});
auto max_val = info.add_literal(migraphx::literal{migraphx::shape{output_type}, {6.0f}});
return info.add_common_op("clip", args[0], min_val, max_val); return info.add_common_op("clip", args[0], min_val, max_val);
} }
......
...@@ -347,7 +347,7 @@ void tf_parser::parse_node(const std::string& name) ...@@ -347,7 +347,7 @@ void tf_parser::parse_node(const std::string& name)
// input was from a node with multiple outputs // input was from a node with multiple outputs
if(contains(input_name, ':')) if(contains(input_name, ':'))
{ {
input_name = input_name.substr(0, input.find(':')); input_name.resize(input.find(':'));
} }
else else
{ {
...@@ -371,7 +371,7 @@ void tf_parser::parse_node(const std::string& name) ...@@ -371,7 +371,7 @@ void tf_parser::parse_node(const std::string& name)
{ {
result = ops[node.op()](*this, {get_attributes(node), node.op(), mm}, args); result = ops[node.op()](*this, {get_attributes(node), node.op(), mm}, args);
} }
assert(!result.empty()); assert(not result.empty());
// First output has no ":" delimiter // First output has no ":" delimiter
instructions[name] = result.front(); instructions[name] = result.front();
for(size_t i = 1; i < result.size(); i++) for(size_t i = 1; i < result.size(); i++)
...@@ -458,7 +458,7 @@ literal tf_parser::parse_tensor(const tensorflow::TensorProto& t) const ...@@ -458,7 +458,7 @@ literal tf_parser::parse_tensor(const tensorflow::TensorProto& t) const
{ {
std::vector<size_t> dims = parse_dims(t.tensor_shape()); std::vector<size_t> dims = parse_dims(t.tensor_shape());
size_t shape_size = std::accumulate(dims.begin(), dims.end(), 1, std::multiplies<size_t>()); size_t shape_size = std::accumulate(dims.begin(), dims.end(), 1, std::multiplies<size_t>());
if(!t.tensor_content().empty()) // has raw data if(not t.tensor_content().empty()) // has raw data
{ {
const std::string& s = t.tensor_content(); const std::string& s = t.tensor_content();
switch(t.dtype()) switch(t.dtype())
......
...@@ -78,7 +78,7 @@ void tmp_dir::execute(const std::string& exe, const std::string& args) const ...@@ -78,7 +78,7 @@ void tmp_dir::execute(const std::string& exe, const std::string& args) const
tmp_dir::~tmp_dir() tmp_dir::~tmp_dir()
{ {
if(!enabled(MIGRAPHX_DEBUG_SAVE_TEMP_DIR{})) if(not enabled(MIGRAPHX_DEBUG_SAVE_TEMP_DIR{}))
{ {
fs::remove_all(this->path); fs::remove_all(this->path);
} }
......
...@@ -400,7 +400,7 @@ std::pair<value*, bool> value::insert(const value& v) ...@@ -400,7 +400,7 @@ std::pair<value*, bool> value::insert(const value& v)
{ {
if(v.key.empty()) if(v.key.empty())
{ {
if(!x) if(not x)
x = std::make_shared<array_value_holder>(); x = std::make_shared<array_value_holder>();
get_array_impl(x).push_back(v); get_array_impl(x).push_back(v);
assert(this->if_array()); assert(this->if_array());
...@@ -408,7 +408,7 @@ std::pair<value*, bool> value::insert(const value& v) ...@@ -408,7 +408,7 @@ std::pair<value*, bool> value::insert(const value& v)
} }
else else
{ {
if(!x) if(not x)
x = std::make_shared<object_value_holder>(); x = std::make_shared<object_value_holder>();
auto p = x->if_object()->emplace(v.key, get_array_impl(x).size()); auto p = x->if_object()->emplace(v.key, get_array_impl(x).size());
if(p.second) if(p.second)
...@@ -420,7 +420,7 @@ std::pair<value*, bool> value::insert(const value& v) ...@@ -420,7 +420,7 @@ std::pair<value*, bool> value::insert(const value& v)
value* value::insert(const value* pos, const value& v) value* value::insert(const value* pos, const value& v)
{ {
assert(v.key.empty()); assert(v.key.empty());
if(!x) if(not x)
x = std::make_shared<array_value_holder>(); x = std::make_shared<array_value_holder>();
auto&& a = get_array_impl(x); auto&& a = get_array_impl(x);
auto it = a.insert(a.begin() + (pos - begin()), v); auto it = a.insert(a.begin() + (pos - begin()), v);
...@@ -466,7 +466,7 @@ bool compare(const value& x, const value& y, F f) ...@@ -466,7 +466,7 @@ bool compare(const value& x, const value& y, F f)
value::type_t value::get_type() const value::type_t value::get_type() const
{ {
if(!x) if(not x)
return null_type; return null_type;
return x->get_type(); return x->get_type();
} }
...@@ -511,14 +511,7 @@ void print_value(std::ostream& os, const std::vector<value>& x) ...@@ -511,14 +511,7 @@ void print_value(std::ostream& os, const std::vector<value>& x)
os << "}"; os << "}";
} }
void print_value(std::ostream& os, const value::binary& x) void print_value(std::ostream& os, const value::binary& x) { os << x; }
{
// Convert binary to integers
std::vector<int> v(x.begin(), x.end());
os << "{";
os << to_string_range(v);
os << "}";
}
std::ostream& operator<<(std::ostream& os, const value& d) std::ostream& operator<<(std::ostream& os, const value& d)
{ {
......
...@@ -43,6 +43,8 @@ struct sigmoid_custom_op final : migraphx::experimental_custom_op_base ...@@ -43,6 +43,8 @@ struct sigmoid_custom_op final : migraphx::experimental_custom_op_base
return inputs[1]; return inputs[1];
} }
virtual bool runs_on_offload_target() const override { return true; }
virtual migraphx::shape compute_shape(migraphx::shapes inputs) const override virtual migraphx::shape compute_shape(migraphx::shapes inputs) const override
{ {
if(inputs.size() != 2) if(inputs.size() != 2)
...@@ -111,4 +113,45 @@ TEST_CASE(run_sigmoid_with_incorrect_shape) ...@@ -111,4 +113,45 @@ TEST_CASE(run_sigmoid_with_incorrect_shape)
"Error in compute_shape of: sigmoid_custom_op: op must have two inputs")); "Error in compute_shape of: sigmoid_custom_op: op must have two inputs"));
} }
struct identity_custom_op final : migraphx::experimental_custom_op_base
{
virtual std::string name() const override { return "identity_custom_op"; }
virtual migraphx::argument
compute(migraphx::context, migraphx::shape, migraphx::arguments inputs) const override
{
return inputs[0];
}
virtual bool runs_on_offload_target() const override { return true; }
virtual migraphx::shape compute_shape(migraphx::shapes inputs) const override
{
if(inputs.size() != 1)
{
throw std::runtime_error("Identity op must have only one input");
}
return inputs.back();
}
virtual std::vector<size_t> output_alias(migraphx::shapes) const override { return {0, 1}; }
};
TEST_CASE(run_custom_op_with_invalid_output_alias)
{
identity_custom_op i_op;
migraphx::register_experimental_custom_op(i_op);
auto op = migraphx::operation("identity_custom_op");
EXPECT(op.name() == "identity_custom_op");
migraphx::program p;
migraphx::shape s{migraphx_shape_float_type, {12}};
migraphx::module m = p.get_main_module();
auto x = m.add_parameter("x", s);
auto i_ins = m.add_instruction(migraphx::operation("identity_custom_op"), {x});
migraphx_test_private_disable_exception_catch(true);
EXPECT(test::throws<std::exception>(
[&] { p.compile(migraphx::target("ref")); },
"Currently, CustomOps in MIGraphX only supports one output_alias"));
}
int main(int argc, const char* argv[]) { test::run(argc, argv); } int main(int argc, const char* argv[]) { test::run(argc, argv); }
...@@ -24,40 +24,91 @@ ...@@ -24,40 +24,91 @@
#include <hip/hip_runtime_api.h> #include <hip/hip_runtime_api.h>
#include <migraphx/migraphx.h> #include <migraphx/migraphx.h>
#include <migraphx/migraphx.hpp> #include <migraphx/migraphx.hpp>
#include <numeric>
#include <stdexcept> #include <stdexcept>
#include "test.hpp" #include "test.hpp"
#define MIGRAPHX_HIP_ASSERT(x) (EXPECT(x == hipSuccess)) #define MIGRAPHX_HIP_ASSERT(x) (EXPECT(x == hipSuccess))
struct simple_custom_op final : migraphx::experimental_custom_op_base
struct half_copy_host final : migraphx::experimental_custom_op_base
{ {
virtual std::string name() const override { return "simple_custom_op"; } virtual std::string name() const override { return "half_copy_host"; }
virtual bool runs_on_offload_target() const override { return false; }
virtual migraphx::argument virtual migraphx::argument
compute(migraphx::context ctx, migraphx::shape, migraphx::arguments inputs) const override compute(migraphx::context ctx, migraphx::shape, migraphx::arguments inputs) const override
{ {
// sets first half size_bytes of the input 0, and rest of the half bytes are copied. // This custom op simply sets first half size_bytes of the input to 0, and rest of the half
int* h_output = nullptr; // bytes are copied. for this custom_op, it does its computation on the host. Therefore,
auto* d_output = reinterpret_cast<int*>(inputs[0].data()); // `runs_on_offload_target()` is set to false. MIGraphX would inject necessary buffer copies
auto input_bytes = inputs[0].get_shape().bytes(); // to and from GPU to Host based on `runs_on_offload_targe()` flag for input buffers as well
auto* output_ptr = inputs[1].data(); // as the output buffers
auto copy_bytes = input_bytes / 2; auto* input_buffer_ptr = inputs[0].data();
auto* output_buffer_ptr = inputs[1].data();
auto input_bytes = inputs[0].get_shape().bytes();
auto copy_bytes = input_bytes / 2;
MIGRAPHX_HIP_ASSERT(hipSetDevice(0)); MIGRAPHX_HIP_ASSERT(hipSetDevice(0));
MIGRAPHX_HIP_ASSERT(hipHostMalloc(&h_output, input_bytes)); MIGRAPHX_HIP_ASSERT(hipMemcpyAsync(output_buffer_ptr,
MIGRAPHX_HIP_ASSERT(hipMemcpyAsync( input_buffer_ptr,
h_output, d_output, input_bytes, hipMemcpyDeviceToHost, ctx.get_queue<hipStream_t>())); input_bytes,
hipMemcpyHostToHost,
ctx.get_queue<hipStream_t>()));
MIGRAPHX_HIP_ASSERT(hipDeviceSynchronize()); MIGRAPHX_HIP_ASSERT(hipDeviceSynchronize());
MIGRAPHX_HIP_ASSERT(hipMemset(h_output, 0, copy_bytes)); MIGRAPHX_HIP_ASSERT(
hipMemsetAsync(output_buffer_ptr, 0, copy_bytes, ctx.get_queue<hipStream_t>()));
MIGRAPHX_HIP_ASSERT(hipDeviceSynchronize()); MIGRAPHX_HIP_ASSERT(hipDeviceSynchronize());
MIGRAPHX_HIP_ASSERT(hipMemcpy(output_ptr, h_output, input_bytes, hipMemcpyHostToDevice)); return inputs[1];
}
virtual migraphx::shape compute_shape(migraphx::shapes inputs) const override
{
if(not inputs[0].standard() or not inputs[1].standard())
{
throw std::runtime_error("Input args must be standard shaped");
}
if(inputs.size() != 2)
{
throw std::runtime_error("number of inputs must be 2");
}
return inputs.back();
}
};
struct half_copy_device final : migraphx::experimental_custom_op_base
{
virtual std::string name() const override { return "half_copy_device"; }
virtual bool runs_on_offload_target() const override { return true; }
virtual migraphx::argument
compute(migraphx::context ctx, migraphx::shape, migraphx::arguments inputs) const override
{
// This custom op simply sets first half size_bytes of the input to 0, and rest of the half
// bytes are copied. for this custom_op, it does its computation on the "GPU". Therefore,
// `runs_on_offload_target()` is set to "true".
auto* input_buffer_ptr = inputs[0].data();
auto* output_buffer_ptr = inputs[1].data();
auto input_bytes = inputs[0].get_shape().bytes();
auto copy_bytes = input_bytes / 2;
MIGRAPHX_HIP_ASSERT(hipSetDevice(0));
MIGRAPHX_HIP_ASSERT(hipMemcpyAsync(output_buffer_ptr,
input_buffer_ptr,
input_bytes,
hipMemcpyDeviceToDevice,
ctx.get_queue<hipStream_t>()));
MIGRAPHX_HIP_ASSERT(hipDeviceSynchronize());
MIGRAPHX_HIP_ASSERT(
hipMemsetAsync(output_buffer_ptr, 0, copy_bytes, ctx.get_queue<hipStream_t>()));
MIGRAPHX_HIP_ASSERT(hipDeviceSynchronize()); MIGRAPHX_HIP_ASSERT(hipDeviceSynchronize());
MIGRAPHX_HIP_ASSERT(hipHostFree(h_output));
return inputs[1]; return inputs[1];
} }
virtual migraphx::shape compute_shape(migraphx::shapes inputs) const override virtual migraphx::shape compute_shape(migraphx::shapes inputs) const override
{ {
if(!inputs[0].standard()) if(not inputs[0].standard() or not inputs[1].standard())
{ {
throw std::runtime_error("first arg must be standard shaped"); throw std::runtime_error("Input args must be standard shaped");
} }
if(inputs.size() != 2) if(inputs.size() != 2)
{ {
...@@ -67,36 +118,209 @@ struct simple_custom_op final : migraphx::experimental_custom_op_base ...@@ -67,36 +118,209 @@ struct simple_custom_op final : migraphx::experimental_custom_op_base
} }
}; };
TEST_CASE(run_simple_custom_op) // overwrites input buffer
struct half_copy_device_same_buffer final : migraphx::experimental_custom_op_base
{
virtual std::string name() const override { return "half_copy_device_same_buffer"; }
virtual bool runs_on_offload_target() const override { return true; }
virtual migraphx::argument
compute(migraphx::context ctx, migraphx::shape, migraphx::arguments inputs) const override
{
// This custom op simply sets first half size_bytes of the input 0, and rest of the half
// bytes are copied. for this custom_op, it does its computation on the "device". Therefore,
// `runs_on_offload_target()` is set to "true"
auto* buffer_ptr = inputs[0].data();
auto input_bytes = inputs[0].get_shape().bytes();
auto copy_bytes = input_bytes / 2;
MIGRAPHX_HIP_ASSERT(hipSetDevice(0));
MIGRAPHX_HIP_ASSERT(
hipMemsetAsync(buffer_ptr, 0, copy_bytes, ctx.get_queue<hipStream_t>()));
MIGRAPHX_HIP_ASSERT(hipDeviceSynchronize());
return inputs[0];
}
virtual migraphx::shape compute_shape(migraphx::shapes inputs) const override
{
if(not inputs[0].standard())
{
throw std::runtime_error("Input arg must be standard shaped");
}
return inputs.front();
}
};
TEST_CASE(register_half_copy_op)
{
half_copy_host hch;
migraphx::register_experimental_custom_op(hch);
auto op = migraphx::operation("half_copy_host");
EXPECT(op.name() == "half_copy_host");
half_copy_device hcd;
migraphx::register_experimental_custom_op(hcd);
op = migraphx::operation("half_copy_device");
EXPECT(op.name() == "half_copy_device");
half_copy_device_same_buffer hcdsb;
migraphx::register_experimental_custom_op(hcdsb);
op = migraphx::operation("half_copy_device_same_buffer");
EXPECT(op.name() == "half_copy_device_same_buffer");
}
TEST_CASE(half_copy_custom_op_test)
{ {
simple_custom_op simple_op; auto run_test_prog = [](const std::string& op_name, bool buffer_alloc) {
migraphx::register_experimental_custom_op(simple_op); migraphx::program p;
migraphx::module m = p.get_main_module();
migraphx::shape s{migraphx_shape_float_type, {4, 3}};
auto x = m.add_parameter("x", s);
migraphx::instructions inputs = {x};
if(buffer_alloc)
{
auto alloc = m.add_allocation(s);
inputs = {x, alloc};
}
auto half_copy_ins = m.add_instruction(migraphx::operation(op_name.c_str()), inputs);
m.add_return({half_copy_ins});
migraphx::compile_options options;
options.set_offload_copy();
p.compile(migraphx::target("gpu"), options);
migraphx::program_parameters pp;
std::vector<float> x_data(12);
std::iota(x_data.begin(), x_data.end(), 0);
pp.add("x", migraphx::argument(s, x_data.data()));
auto results = p.eval(pp);
auto result = results[0];
auto result_vec = result.as_vector<float>();
std::vector<float> expected_result(12, 0);
std::iota(expected_result.begin() + 6, expected_result.end(), 6);
EXPECT(bool{result == migraphx::argument(s, expected_result.data())});
};
// register all the ops
half_copy_host hch;
migraphx::register_experimental_custom_op(hch);
half_copy_device hcd;
migraphx::register_experimental_custom_op(hcd);
half_copy_device_same_buffer hcdsb;
migraphx::register_experimental_custom_op(hcdsb);
std::vector<std::pair<std::string, bool>> tests_config = {
{"half_copy_host", true},
{"half_copy_device", true},
{"half_copy_device_same_buffer", false}};
for(const auto& i : tests_config)
{
run_test_prog(i.first, i.second);
}
}
struct stride_two final : migraphx::experimental_custom_op_base
{
virtual std::string name() const override { return "stride_two"; }
virtual migraphx::argument
compute(migraphx::context, migraphx::shape out_shape, migraphx::arguments inputs) const override
{
return {out_shape, inputs[0].data()};
}
virtual migraphx::shape compute_shape(migraphx::shapes inputs) const override
{
if(inputs.size() != 1)
{
throw std::runtime_error("stride_two op must have only one input argument");
};
if(not inputs[0].standard())
{
throw std::runtime_error("stride_two op only works on the standard input shapes");
}
migraphx::shape input_s = inputs[0];
std::vector<size_t> dims = input_s.lengths();
std::vector<size_t> new_dims;
std::vector<size_t> strides = input_s.strides();
std::vector<size_t> new_strides;
std::for_each(dims.begin(), dims.end(), [&](auto i) { new_dims.push_back(i / 2); });
std::for_each(
strides.begin(), strides.end(), [&](auto i) { new_strides.push_back(i * 2); });
migraphx::shape output_shape{input_s.type(), new_dims, new_strides};
return output_shape;
}
virtual bool runs_on_offload_target() const override { return true; }
virtual std::vector<size_t> output_alias(migraphx::shapes) const override { return {0}; };
};
TEST_CASE(stride_two_custom_op_test)
{
stride_two st;
migraphx::register_experimental_custom_op(st);
migraphx::program p;
migraphx::module m = p.get_main_module();
migraphx::shape s{migraphx_shape_float_type, {4, 4, 4}};
auto x = m.add_parameter("x", s);
auto stride_two_ins = m.add_instruction(migraphx::operation("stride_two"), {x});
m.add_return({stride_two_ins});
migraphx::compile_options options;
options.set_offload_copy();
p.compile(migraphx::target("gpu"), options);
migraphx::program_parameters pp;
std::vector<float> x_data(64);
std::iota(x_data.begin(), x_data.end(), 0);
pp.add("x", migraphx::argument(s, x_data.data()));
auto results = p.eval(pp);
auto result = results[0];
auto result_vec = result.as_vector<float>();
std::vector<float> expected_result = {0, 2, 8, 10, 32, 34, 40, 42};
EXPECT(result_vec == expected_result);
}
TEST_CASE(custom_op_with_pre_and_post_subgraph_test)
{
half_copy_host hco;
migraphx::register_experimental_custom_op(hco);
stride_two st;
migraphx::register_experimental_custom_op(st);
migraphx::program p; migraphx::program p;
migraphx::shape s{migraphx_shape_int32_type, {4, 3}}; migraphx::shape s{migraphx_shape_float_type, {4, 6}};
migraphx::shape trans_shape{migraphx_shape_int32_type, {3, 4}};
migraphx::module m = p.get_main_module(); migraphx::module m = p.get_main_module();
auto x = m.add_parameter("x", s); auto x = m.add_parameter("x", s);
auto neg = m.add_instruction(migraphx::operation("neg"), x); // pre-subgraph
auto alloc = m.add_allocation(trans_shape); auto neg_ins = m.add_instruction(migraphx::operation("neg"), x);
auto neg_trans = auto trans_ins =
m.add_instruction(migraphx::operation("transpose", "{permutation: [1, 0]}"), {neg}); m.add_instruction(migraphx::operation("transpose", "{permutation: [1, 0]}"), {neg_ins});
auto neg_cont = m.add_instruction(migraphx::operation("contiguous"), {neg_trans}); auto cont_ins = m.add_instruction(migraphx::operation("contiguous"), {trans_ins});
auto custom_kernel = // custom_op
m.add_instruction(migraphx::operation("simple_custom_op"), {neg_cont, alloc}); migraphx::shape trans_shape{migraphx_shape_float_type, {6, 4}};
auto relu = m.add_instruction(migraphx::operation("relu"), custom_kernel); auto alloc = m.add_allocation(trans_shape);
m.add_return({relu}); auto half_copy_ins =
m.add_instruction(migraphx::operation("half_copy_host"), {cont_ins, alloc});
// post-subgraph
auto abs_ins = m.add_instruction(migraphx::operation("abs"), {half_copy_ins});
// another custom_op
auto stride_two_ins = m.add_instruction(migraphx::operation("stride_two"), {abs_ins});
// post-subgraph
auto relu_ins = m.add_instruction(migraphx::operation("relu"), {stride_two_ins});
m.add_return({relu_ins});
migraphx::compile_options options; migraphx::compile_options options;
options.set_offload_copy(); options.set_offload_copy();
p.compile(migraphx::target("gpu"), options); p.compile(migraphx::target("gpu"), options);
migraphx::program_parameters pp; migraphx::program_parameters pp;
std::vector<int> x_data(12, -3); std::vector<float> x_data(s.elements());
std::iota(x_data.begin(), x_data.end(), 0);
pp.add("x", migraphx::argument(s, x_data.data())); pp.add("x", migraphx::argument(s, x_data.data()));
auto results = p.eval(pp); auto results = p.eval(pp);
auto result = results[0]; auto result = results[0];
auto result_vec = result.as_vector<int>(); auto result_vec = result.as_vector<float>();
std::vector<int> expected_result(12, 0); std::vector<float> expected_result = {0, 0, 0, 0, 4, 16};
std::fill(expected_result.begin() + 6, expected_result.end(), 3); EXPECT(bool{result == migraphx::argument(migraphx::shape{migraphx_shape_float_type, {3, 2}},
EXPECT(bool{result == migraphx::argument(trans_shape, expected_result.data())}); expected_result.data())});
} }
int main(int argc, const char* argv[]) { test::run(argc, argv); } int main(int argc, const char* argv[]) { test::run(argc, argv); }
...@@ -25,6 +25,8 @@ ...@@ -25,6 +25,8 @@
#include <hip/hip_runtime_api.h> #include <hip/hip_runtime_api.h>
#include <migraphx/migraphx.h> #include <migraphx/migraphx.h>
#include <migraphx/migraphx.hpp> #include <migraphx/migraphx.hpp>
#include <migraphx/manage_ptr.hpp>
#include "test.hpp" #include "test.hpp"
TEST_CASE(load_and_run) TEST_CASE(load_and_run)
...@@ -44,11 +46,67 @@ TEST_CASE(load_and_run) ...@@ -44,11 +46,67 @@ TEST_CASE(load_and_run)
{ {
pp.add(name, migraphx::argument::generate(param_shapes[name])); pp.add(name, migraphx::argument::generate(param_shapes[name]));
} }
auto outputs = p.eval(pp); auto outputs = p.eval(pp);
CHECK(shapes_before.size() == outputs.size()); CHECK(shapes_before.size() == outputs.size());
CHECK(bool{shapes_before.front() == outputs.front().get_shape()}); CHECK(bool{shapes_before.front() == outputs.front().get_shape()});
} }
using hip_ptr = MIGRAPHX_MANAGE_PTR(void, hipFree);
using stream_ptr = MIGRAPHX_MANAGE_PTR(hipStream_t, hipStreamDestroy);
stream_ptr get_stream()
{
hipStream_t stream;
auto err = hipStreamCreateWithFlags(&stream, 0);
EXPECT(err == hipSuccess);
return stream_ptr{stream};
}
hip_ptr get_hip_buffer(size_t size)
{
void* ptr;
auto err = hipMalloc(&ptr, size);
EXPECT(err == hipSuccess);
return hip_ptr{ptr};
}
TEST_CASE(load_and_run_async)
{
auto p = migraphx::parse_onnx("conv_relu_maxpool_test.onnx");
auto shapes_before = p.get_output_shapes();
migraphx::compile_options options;
options.set_offload_copy(false);
p.compile(migraphx::target("gpu"), options);
auto shapes_after = p.get_output_shapes();
CHECK(shapes_before.size() == 1);
CHECK(shapes_before.size() == shapes_after.size());
CHECK(bool{shapes_before.front() == shapes_after.front()});
migraphx::program_parameters pp;
auto param_shapes = p.get_parameter_shapes();
stream_ptr stream = get_stream();
std::vector<hip_ptr> buffs;
std::vector<migraphx::argument> args;
for(auto&& name : param_shapes.names())
{
args.push_back(migraphx::argument::generate(param_shapes[name]));
buffs.push_back(get_hip_buffer(args.rbegin()->get_shape().bytes()));
auto err = hipMemcpy(buffs.rbegin()->get(),
args.rbegin()->data(),
args.rbegin()->get_shape().bytes(),
hipMemcpyHostToDevice);
EXPECT(err == hipSuccess);
pp.add(name, migraphx::argument(args.rbegin()->get_shape(), buffs.rbegin()->get()));
}
auto outputs = p.run_async(pp, stream.get());
CHECK(shapes_before.size() == outputs.size());
CHECK(bool{shapes_before.front() == outputs.front().get_shape()});
}
TEST_CASE(load_and_run_ctx) TEST_CASE(load_and_run_ctx)
{ {
auto p = migraphx::parse_onnx("conv_relu_maxpool_test.onnx"); auto p = migraphx::parse_onnx("conv_relu_maxpool_test.onnx");
...@@ -82,10 +140,10 @@ TEST_CASE(if_pl_test) ...@@ -82,10 +140,10 @@ TEST_CASE(if_pl_test)
migraphx::program_parameters pp; migraphx::program_parameters pp;
auto param_shapes = p.get_parameter_shapes(); auto param_shapes = p.get_parameter_shapes();
auto xs = param_shapes["x"]; auto xs = param_shapes["x"];
std::vector<float> xd(xs.bytes() / sizeof(float), 1.0); std::vector<float> xd(xs.elements(), 1.0);
pp.add("x", migraphx::argument(xs, xd.data())); pp.add("x", migraphx::argument(xs, xd.data()));
auto ys = param_shapes["y"]; auto ys = param_shapes["y"];
std::vector<float> yd(ys.bytes() / sizeof(float), 2.0); std::vector<float> yd(ys.elements(), 2.0);
pp.add("y", migraphx::argument(ys, yd.data())); pp.add("y", migraphx::argument(ys, yd.data()));
char ccond = cond; char ccond = cond;
pp.add("cond", migraphx::argument(param_shapes["cond"], &ccond)); pp.add("cond", migraphx::argument(param_shapes["cond"], &ccond));
......
...@@ -49,6 +49,6 @@ bool create_shapes(bool dynamic_allowed) ...@@ -49,6 +49,6 @@ bool create_shapes(bool dynamic_allowed)
TEST_CASE(allow_dynamic_shape) { EXPECT(create_shapes(true)); } TEST_CASE(allow_dynamic_shape) { EXPECT(create_shapes(true)); }
TEST_CASE(fail_dynamic_shape) { EXPECT(!create_shapes(false)); } TEST_CASE(fail_dynamic_shape) { EXPECT(not create_shapes(false)); }
int main(int argc, const char* argv[]) { test::run(argc, argv); } int main(int argc, const char* argv[]) { test::run(argc, argv); }
...@@ -187,7 +187,7 @@ TEST_CASE(print_test) ...@@ -187,7 +187,7 @@ TEST_CASE(print_test)
std::stringstream ss; std::stringstream ss;
ss << p; ss << p;
std::string s = ss.str(); std::string s = ss.str();
EXPECT(!s.empty()); EXPECT(not s.empty());
} }
TEST_CASE(param_test) TEST_CASE(param_test)
......
...@@ -26,8 +26,9 @@ ...@@ -26,8 +26,9 @@
#include <migraphx/make_op.hpp> #include <migraphx/make_op.hpp>
#include <migraphx/program.hpp> #include <migraphx/program.hpp>
#include <migraphx/register_target.hpp> #include <migraphx/register_target.hpp>
#include <migraphx/ref/target.hpp> #include <migraphx/fpga/target.hpp>
#include <migraphx/target_assignments.hpp> #include <migraphx/target_assignments.hpp>
#include <migraphx/iterator_for.hpp>
migraphx::program create_program() migraphx::program create_program()
{ {
...@@ -37,8 +38,8 @@ migraphx::program create_program() ...@@ -37,8 +38,8 @@ migraphx::program create_program()
auto x = mm->add_parameter("x", s); auto x = mm->add_parameter("x", s);
auto y = mm->add_parameter("y", s); auto y = mm->add_parameter("y", s);
auto z = mm->add_parameter("z", s); auto z = mm->add_parameter("z", s);
auto diff = mm->add_instruction(migraphx::make_op("div"), x, y); auto diff = mm->add_instruction(migraphx::make_op("add"), x, y);
mm->add_instruction(migraphx::make_op("div"), diff, z); mm->add_instruction(migraphx::make_op("add"), diff, z);
return p; return p;
} }
...@@ -46,15 +47,17 @@ TEST_CASE(is_supported) ...@@ -46,15 +47,17 @@ TEST_CASE(is_supported)
{ {
auto p = create_program(); auto p = create_program();
auto targets = migraphx::get_targets(); auto targets = migraphx::get_targets();
EXPECT(!targets.empty()); EXPECT(not targets.empty());
auto first_target = targets[0]; auto t = migraphx::make_target("fpga");
auto t = migraphx::make_target(first_target);
const auto assignments = p.get_target_assignments({t}); const auto assignments = p.get_target_assignments({t});
for(const auto& [ins, target] : assignments) const auto* mod = p.get_main_module();
EXPECT(mod->size() == assignments.size());
for(const auto ins : iterator_for(*mod))
{ {
(void)ins; const auto& target = assignments.at(ins);
EXPECT(target == first_target); EXPECT(target == "fpga");
} }
} }
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE. * THE SOFTWARE.
*/ */
#include "migraphx/dead_code_elimination.hpp" #include <migraphx/dead_code_elimination.hpp>
#include <migraphx/fuse_pointwise.hpp> #include <migraphx/fuse_pointwise.hpp>
#include <migraphx/instruction.hpp> #include <migraphx/instruction.hpp>
#include <migraphx/pass_manager.hpp> #include <migraphx/pass_manager.hpp>
......
...@@ -40,6 +40,10 @@ ...@@ -40,6 +40,10 @@
#include <migraphx/make_op.hpp> #include <migraphx/make_op.hpp>
#include <basic_ops.hpp> #include <basic_ops.hpp>
#include <test.hpp> #include <test.hpp>
#include "make_precompile_op.hpp"
// Treat some operators as compilable to enable lowering
MIGRAPHX_GPU_TEST_PRECOMPILE("add", "mul", "convert")
void run_lowering(migraphx::program& p, bool offload_copy = false) void run_lowering(migraphx::program& p, bool offload_copy = false)
{ {
...@@ -118,7 +122,7 @@ TEST_CASE(no_copy_dead_param) ...@@ -118,7 +122,7 @@ TEST_CASE(no_copy_dead_param)
auto xb = mm->add_instruction(migraphx::make_op("hip::allocate", {{"shape", to_value(s)}})); auto xb = mm->add_instruction(migraphx::make_op("hip::allocate", {{"shape", to_value(s)}}));
auto gx = mm->add_instruction(migraphx::make_op("hip::copy_to_gpu"), x, xb); auto gx = mm->add_instruction(migraphx::make_op("hip::copy_to_gpu"), x, xb);
auto ab = mm->add_instruction(migraphx::make_op("hip::allocate", {{"shape", to_value(s)}})); auto ab = mm->add_instruction(migraphx::make_op("hip::allocate", {{"shape", to_value(s)}}));
auto sum = mm->add_instruction(migraphx::make_op("gpu::add"), gx, gx, ab); auto sum = mm->add_instruction(make_precompile_op("add"), gx, gx, ab);
auto r = mm->add_instruction(migraphx::make_op("hip::copy_from_gpu"), sum); auto r = mm->add_instruction(migraphx::make_op("hip::copy_from_gpu"), sum);
mm->add_return({r}); mm->add_return({r});
......
...@@ -307,12 +307,14 @@ TEST_CASE(compile_math) ...@@ -307,12 +307,14 @@ TEST_CASE(compile_math)
"erf(x)", "erf(x)",
"exp(x)", "exp(x)",
"floor(x)", "floor(x)",
"fmod(x, x)",
"isnan(x)", "isnan(x)",
"log(x)", "log(x)",
"max(x, x)", "max(x, x)",
"min(x, x)", "min(x, x)",
"pow(x, 0)", "pow(x, 0)",
"pow(x, x)", "pow(x, x)",
"remainder(x,x)",
"round(x)", "round(x)",
"rsqrt(x)", "rsqrt(x)",
"sin(x)", "sin(x)",
......
...@@ -48,4 +48,4 @@ void gpu_literal_test() ...@@ -48,4 +48,4 @@ void gpu_literal_test()
} }
} }
int main() { gpu_literal_test(); } int main() { gpu_literal_test(); } // NOLINT (bugprone-exception-escape)
...@@ -21,48 +21,46 @@ ...@@ -21,48 +21,46 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE. * THE SOFTWARE.
*/ */
#ifndef MIGRAPHX_GUARD_TEST_GPU_MAKE_PRECOMPILE_OP_HPP
#define MIGRAPHX_GUARD_TEST_GPU_MAKE_PRECOMPILE_OP_HPP
#include "verify_program.hpp" #include <migraphx/operation.hpp>
#include <migraphx/program.hpp> #include <migraphx/gpu/compiler.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/serialize.hpp>
#include <migraphx/make_op.hpp> #include <migraphx/make_op.hpp>
#include <migraphx/op/batch_norm_inference.hpp> // NOLINTNEXTLINE
#define MIGRAPHX_GPU_TEST_PRECOMPILE(...) \
struct test_compiler : migraphx::gpu::compiler<test_compiler> \
{ \
std::vector<std::string> names() const { return {__VA_ARGS__}; } \
\
template <class... Ts> \
migraphx::operation compile_op(Ts&&...) const \
{ \
MIGRAPHX_THROW("Not compilable"); \
} \
\
template <class... Ts> \
migraphx::gpu::compiler_replace compile(Ts&&...) const \
{ \
MIGRAPHX_THROW("Not compilable"); \
} \
};
inline migraphx::operation make_precompile_op(migraphx::rank<0>, const migraphx::operation& op)
{
return migraphx::make_op("gpu::precompile_op", {{"op", migraphx::to_value(op)}});
}
struct test_batchnorm_3d_per_actv : verify_program<test_batchnorm_3d_per_actv> inline migraphx::operation make_precompile_op(migraphx::rank<1>, const std::string& name)
{ {
const size_t d1 = 2; return make_precompile_op(migraphx::rank<0>{}, migraphx::make_op(name));
const size_t d2 = 4; }
const size_t d3 = 5;
const size_t channels = 2;
const size_t batches = 3;
migraphx::program create_program() const template <class T>
{ auto make_precompile_op(const T& x)
migraphx::program p; {
auto* mm = p.get_main_module(); return make_precompile_op(migraphx::rank<1>{}, x);
}
migraphx::shape s{migraphx::shape::float_type, {batches, channels, d1, d2, d3}}; #endif // MIGRAPHX_GUARD_TEST_GPU_MAKE_PRECOMPILE_OP_HPP
migraphx::shape vars{migraphx::shape::float_type, {channels, d1, d2, d3}};
auto x = mm->add_parameter("x", s);
auto scale = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 1)));
auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2)));
auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3)));
auto variance = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4)));
mm->add_instruction(
migraphx::make_op(
"batch_norm_inference",
{{"epsilon", 1.0e-6},
{"momentum", 0.8f},
{"bn_mode",
migraphx::to_value(migraphx::op::batch_norm_inference::per_activation)}}),
x,
scale,
bias,
mean,
variance);
return p;
}
};
...@@ -37,10 +37,6 @@ ...@@ -37,10 +37,6 @@
#include <migraphx/functional.hpp> #include <migraphx/functional.hpp>
#include <test.hpp> #include <test.hpp>
using migraphx::trim;
// m test_gpu_mlir && ./bin/test_gpu_mlir
struct mlir_gpu_target : migraphx::gpu::target struct mlir_gpu_target : migraphx::gpu::target
{ {
std::string name() const { return "mlir"; } std::string name() const { return "mlir"; }
...@@ -88,7 +84,7 @@ migraphx::program create_program_from_mlir(const migraphx::module& mmlir) ...@@ -88,7 +84,7 @@ migraphx::program create_program_from_mlir(const migraphx::module& mmlir)
inputs.push_back(mm->add_parameter("output", mmlir.get_output_shapes().front())); inputs.push_back(mm->add_parameter("output", mmlir.get_output_shapes().front()));
migraphx::gpu::context ctx; migraphx::gpu::context ctx;
migraphx::gpu::insert_mlir(*mm, mm->end(), compile_mlir(ctx, mmlir), inputs); migraphx::gpu::insert_mlir(*mm, mm->end(), compile_mlir(ctx, mmlir, inputs), inputs);
return p; return p;
} }
...@@ -144,8 +140,8 @@ TEST_CASE(conv) ...@@ -144,8 +140,8 @@ TEST_CASE(conv)
{ {
const std::string mlir_output = R"__migraphx__( const std::string mlir_output = R"__migraphx__(
module { module {
func @main(%arg0: tensor<2x8x3x3xf32>, %arg1: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {kernel = "mixr"} { func.func @main(%arg0: tensor<2x8x3x3xf32>, %arg1: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {kernel = "mixr"} {
%0 = migraphx.convolution(%arg1, %arg0) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1], use_dynamic_same_auto_pad = 0 : i64} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32> %0 = migraphx.convolution(%arg1, %arg0) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
return %0 : tensor<1x2x2x2xf32> return %0 : tensor<1x2x2x2xf32>
} }
} }
...@@ -167,8 +163,8 @@ TEST_CASE(conv_add_relu) ...@@ -167,8 +163,8 @@ TEST_CASE(conv_add_relu)
{ {
const std::string mlir_output = R"__migraphx__( const std::string mlir_output = R"__migraphx__(
module { module {
func @main(%arg0: tensor<1x2x2x2xf32>, %arg1: tensor<2x8x3x3xf32>, %arg2: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {kernel = "mixr"} { func.func @main(%arg0: tensor<1x2x2x2xf32>, %arg1: tensor<2x8x3x3xf32>, %arg2: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {kernel = "mixr"} {
%0 = migraphx.convolution(%arg2, %arg1) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1], use_dynamic_same_auto_pad = 0 : i64} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32> %0 = migraphx.convolution(%arg2, %arg1) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
%1 = migraphx.add(%0, %arg0) : (tensor<1x2x2x2xf32>, tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32> %1 = migraphx.add(%0, %arg0) : (tensor<1x2x2x2xf32>, tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
%2 = migraphx.relu(%1) : (tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32> %2 = migraphx.relu(%1) : (tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
return %2 : tensor<1x2x2x2xf32> return %2 : tensor<1x2x2x2xf32>
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment