"megatron/git@developer.sourcefind.cn:wuxk1/megatron-lm.git" did not exist on "90ce932d3192748c640ee432cffc53224f915be5"
Commit 5a14c0bf authored by umangyadav's avatar umangyadav
Browse files

Merge branch 'develop' into workspace_size

parents cb01e280 5fa42993
...@@ -75,7 +75,6 @@ struct parse_conv : op_parser<parse_conv> ...@@ -75,7 +75,6 @@ struct parse_conv : op_parser<parse_conv>
const std::string& pad_mode = info.attributes.at("padding").s(); const std::string& pad_mode = info.attributes.at("padding").s();
if(pad_mode.find("SAME") != std::string::npos) if(pad_mode.find("SAME") != std::string::npos)
{ {
op.padding_mode = op::padding_mode_t::same;
std::vector<size_t> weight_dims = weights->get_shape().lens(); std::vector<size_t> weight_dims = weights->get_shape().lens();
size_t weight_h = weight_dims[2]; size_t weight_h = weight_dims[2];
size_t weight_w = weight_dims[3]; size_t weight_w = weight_dims[3];
...@@ -87,10 +86,6 @@ struct parse_conv : op_parser<parse_conv> ...@@ -87,10 +86,6 @@ struct parse_conv : op_parser<parse_conv>
op.padding = std::vector<size_t>(pads.begin(), pads.end()); op.padding = std::vector<size_t>(pads.begin(), pads.end());
} }
else if(pad_mode.find("VALID") != std::string::npos)
{
op.padding_mode = op::padding_mode_t::valid;
}
else if(pad_mode.find("EXPLICIT") != std::string::npos) else if(pad_mode.find("EXPLICIT") != std::string::npos)
{ {
std::vector<size_t> padding; std::vector<size_t> padding;
......
...@@ -80,7 +80,6 @@ struct parse_depthwiseconv : op_parser<parse_depthwiseconv> ...@@ -80,7 +80,6 @@ struct parse_depthwiseconv : op_parser<parse_depthwiseconv>
if(pad_mode.find("SAME") != std::string::npos) if(pad_mode.find("SAME") != std::string::npos)
{ {
op.padding_mode = op::padding_mode_t::same;
std::vector<size_t> weight_dims = weights->get_shape().lens(); std::vector<size_t> weight_dims = weights->get_shape().lens();
size_t weight_h = weight_dims[2]; size_t weight_h = weight_dims[2];
size_t weight_w = weight_dims[3]; size_t weight_w = weight_dims[3];
...@@ -101,10 +100,6 @@ struct parse_depthwiseconv : op_parser<parse_depthwiseconv> ...@@ -101,10 +100,6 @@ struct parse_depthwiseconv : op_parser<parse_depthwiseconv>
op.padding[1] = pads[1]; op.padding[1] = pads[1];
} }
} }
else if(pad_mode.find("VALID") != std::string::npos)
{
op.padding_mode = op::padding_mode_t::valid;
}
} }
std::vector<int64_t> new_weights_shape; std::vector<int64_t> new_weights_shape;
......
...@@ -347,7 +347,7 @@ void tf_parser::parse_node(const std::string& name) ...@@ -347,7 +347,7 @@ void tf_parser::parse_node(const std::string& name)
// input was from a node with multiple outputs // input was from a node with multiple outputs
if(contains(input_name, ':')) if(contains(input_name, ':'))
{ {
input_name = input_name.substr(0, input.find(':')); input_name.resize(input.find(':'));
} }
else else
{ {
......
...@@ -511,14 +511,7 @@ void print_value(std::ostream& os, const std::vector<value>& x) ...@@ -511,14 +511,7 @@ void print_value(std::ostream& os, const std::vector<value>& x)
os << "}"; os << "}";
} }
void print_value(std::ostream& os, const value::binary& x) void print_value(std::ostream& os, const value::binary& x) { os << x; }
{
// Convert binary to integers
std::vector<int> v(x.begin(), x.end());
os << "{";
os << to_string_range(v);
os << "}";
}
std::ostream& operator<<(std::ostream& os, const value& d) std::ostream& operator<<(std::ostream& os, const value& d)
{ {
......
...@@ -43,6 +43,8 @@ struct sigmoid_custom_op final : migraphx::experimental_custom_op_base ...@@ -43,6 +43,8 @@ struct sigmoid_custom_op final : migraphx::experimental_custom_op_base
return inputs[1]; return inputs[1];
} }
virtual bool runs_on_offload_target() const override { return true; }
virtual migraphx::shape compute_shape(migraphx::shapes inputs) const override virtual migraphx::shape compute_shape(migraphx::shapes inputs) const override
{ {
if(inputs.size() != 2) if(inputs.size() != 2)
...@@ -111,4 +113,45 @@ TEST_CASE(run_sigmoid_with_incorrect_shape) ...@@ -111,4 +113,45 @@ TEST_CASE(run_sigmoid_with_incorrect_shape)
"Error in compute_shape of: sigmoid_custom_op: op must have two inputs")); "Error in compute_shape of: sigmoid_custom_op: op must have two inputs"));
} }
struct identity_custom_op final : migraphx::experimental_custom_op_base
{
virtual std::string name() const override { return "identity_custom_op"; }
virtual migraphx::argument
compute(migraphx::context, migraphx::shape, migraphx::arguments inputs) const override
{
return inputs[0];
}
virtual bool runs_on_offload_target() const override { return true; }
virtual migraphx::shape compute_shape(migraphx::shapes inputs) const override
{
if(inputs.size() != 1)
{
throw std::runtime_error("Identity op must have only one input");
}
return inputs.back();
}
virtual std::vector<size_t> output_alias(migraphx::shapes) const override { return {0, 1}; }
};
TEST_CASE(run_custom_op_with_invalid_output_alias)
{
identity_custom_op i_op;
migraphx::register_experimental_custom_op(i_op);
auto op = migraphx::operation("identity_custom_op");
EXPECT(op.name() == "identity_custom_op");
migraphx::program p;
migraphx::shape s{migraphx_shape_float_type, {12}};
migraphx::module m = p.get_main_module();
auto x = m.add_parameter("x", s);
auto i_ins = m.add_instruction(migraphx::operation("identity_custom_op"), {x});
migraphx_test_private_disable_exception_catch(true);
EXPECT(test::throws<std::exception>(
[&] { p.compile(migraphx::target("ref")); },
"Currently, CustomOps in MIGraphX only supports one output_alias"));
}
int main(int argc, const char* argv[]) { test::run(argc, argv); } int main(int argc, const char* argv[]) { test::run(argc, argv); }
...@@ -24,40 +24,91 @@ ...@@ -24,40 +24,91 @@
#include <hip/hip_runtime_api.h> #include <hip/hip_runtime_api.h>
#include <migraphx/migraphx.h> #include <migraphx/migraphx.h>
#include <migraphx/migraphx.hpp> #include <migraphx/migraphx.hpp>
#include <numeric>
#include <stdexcept> #include <stdexcept>
#include "test.hpp" #include "test.hpp"
#define MIGRAPHX_HIP_ASSERT(x) (EXPECT(x == hipSuccess)) #define MIGRAPHX_HIP_ASSERT(x) (EXPECT(x == hipSuccess))
struct simple_custom_op final : migraphx::experimental_custom_op_base
struct half_copy_host final : migraphx::experimental_custom_op_base
{ {
virtual std::string name() const override { return "simple_custom_op"; } virtual std::string name() const override { return "half_copy_host"; }
virtual bool runs_on_offload_target() const override { return false; }
virtual migraphx::argument virtual migraphx::argument
compute(migraphx::context ctx, migraphx::shape, migraphx::arguments inputs) const override compute(migraphx::context ctx, migraphx::shape, migraphx::arguments inputs) const override
{ {
// sets first half size_bytes of the input 0, and rest of the half bytes are copied. // This custom op simply sets first half size_bytes of the input to 0, and rest of the half
int* h_output = nullptr; // bytes are copied. for this custom_op, it does its computation on the host. Therefore,
auto* d_output = reinterpret_cast<int*>(inputs[0].data()); // `runs_on_offload_target()` is set to false. MIGraphX would inject necessary buffer copies
auto input_bytes = inputs[0].get_shape().bytes(); // to and from GPU to Host based on `runs_on_offload_targe()` flag for input buffers as well
auto* output_ptr = inputs[1].data(); // as the output buffers
auto copy_bytes = input_bytes / 2; auto* input_buffer_ptr = inputs[0].data();
auto* output_buffer_ptr = inputs[1].data();
auto input_bytes = inputs[0].get_shape().bytes();
auto copy_bytes = input_bytes / 2;
MIGRAPHX_HIP_ASSERT(hipSetDevice(0)); MIGRAPHX_HIP_ASSERT(hipSetDevice(0));
MIGRAPHX_HIP_ASSERT(hipHostMalloc(&h_output, input_bytes)); MIGRAPHX_HIP_ASSERT(hipMemcpyAsync(output_buffer_ptr,
MIGRAPHX_HIP_ASSERT(hipMemcpyAsync( input_buffer_ptr,
h_output, d_output, input_bytes, hipMemcpyDeviceToHost, ctx.get_queue<hipStream_t>())); input_bytes,
hipMemcpyHostToHost,
ctx.get_queue<hipStream_t>()));
MIGRAPHX_HIP_ASSERT(hipDeviceSynchronize()); MIGRAPHX_HIP_ASSERT(hipDeviceSynchronize());
MIGRAPHX_HIP_ASSERT(hipMemset(h_output, 0, copy_bytes)); MIGRAPHX_HIP_ASSERT(
hipMemsetAsync(output_buffer_ptr, 0, copy_bytes, ctx.get_queue<hipStream_t>()));
MIGRAPHX_HIP_ASSERT(hipDeviceSynchronize()); MIGRAPHX_HIP_ASSERT(hipDeviceSynchronize());
MIGRAPHX_HIP_ASSERT(hipMemcpy(output_ptr, h_output, input_bytes, hipMemcpyHostToDevice)); return inputs[1];
}
virtual migraphx::shape compute_shape(migraphx::shapes inputs) const override
{
if(not inputs[0].standard() or not inputs[1].standard())
{
throw std::runtime_error("Input args must be standard shaped");
}
if(inputs.size() != 2)
{
throw std::runtime_error("number of inputs must be 2");
}
return inputs.back();
}
};
struct half_copy_device final : migraphx::experimental_custom_op_base
{
virtual std::string name() const override { return "half_copy_device"; }
virtual bool runs_on_offload_target() const override { return true; }
virtual migraphx::argument
compute(migraphx::context ctx, migraphx::shape, migraphx::arguments inputs) const override
{
// This custom op simply sets first half size_bytes of the input to 0, and rest of the half
// bytes are copied. for this custom_op, it does its computation on the "GPU". Therefore,
// `runs_on_offload_target()` is set to "true".
auto* input_buffer_ptr = inputs[0].data();
auto* output_buffer_ptr = inputs[1].data();
auto input_bytes = inputs[0].get_shape().bytes();
auto copy_bytes = input_bytes / 2;
MIGRAPHX_HIP_ASSERT(hipSetDevice(0));
MIGRAPHX_HIP_ASSERT(hipMemcpyAsync(output_buffer_ptr,
input_buffer_ptr,
input_bytes,
hipMemcpyDeviceToDevice,
ctx.get_queue<hipStream_t>()));
MIGRAPHX_HIP_ASSERT(hipDeviceSynchronize());
MIGRAPHX_HIP_ASSERT(
hipMemsetAsync(output_buffer_ptr, 0, copy_bytes, ctx.get_queue<hipStream_t>()));
MIGRAPHX_HIP_ASSERT(hipDeviceSynchronize()); MIGRAPHX_HIP_ASSERT(hipDeviceSynchronize());
MIGRAPHX_HIP_ASSERT(hipHostFree(h_output));
return inputs[1]; return inputs[1];
} }
virtual migraphx::shape compute_shape(migraphx::shapes inputs) const override virtual migraphx::shape compute_shape(migraphx::shapes inputs) const override
{ {
if(not inputs[0].standard()) if(not inputs[0].standard() or not inputs[1].standard())
{ {
throw std::runtime_error("first arg must be standard shaped"); throw std::runtime_error("Input args must be standard shaped");
} }
if(inputs.size() != 2) if(inputs.size() != 2)
{ {
...@@ -67,36 +118,209 @@ struct simple_custom_op final : migraphx::experimental_custom_op_base ...@@ -67,36 +118,209 @@ struct simple_custom_op final : migraphx::experimental_custom_op_base
} }
}; };
TEST_CASE(run_simple_custom_op) // overwrites input buffer
struct half_copy_device_same_buffer final : migraphx::experimental_custom_op_base
{ {
simple_custom_op simple_op; virtual std::string name() const override { return "half_copy_device_same_buffer"; }
migraphx::register_experimental_custom_op(simple_op);
virtual bool runs_on_offload_target() const override { return true; }
virtual migraphx::argument
compute(migraphx::context ctx, migraphx::shape, migraphx::arguments inputs) const override
{
// This custom op simply sets first half size_bytes of the input 0, and rest of the half
// bytes are copied. for this custom_op, it does its computation on the "device". Therefore,
// `runs_on_offload_target()` is set to "true"
auto* buffer_ptr = inputs[0].data();
auto input_bytes = inputs[0].get_shape().bytes();
auto copy_bytes = input_bytes / 2;
MIGRAPHX_HIP_ASSERT(hipSetDevice(0));
MIGRAPHX_HIP_ASSERT(
hipMemsetAsync(buffer_ptr, 0, copy_bytes, ctx.get_queue<hipStream_t>()));
MIGRAPHX_HIP_ASSERT(hipDeviceSynchronize());
return inputs[0];
}
virtual migraphx::shape compute_shape(migraphx::shapes inputs) const override
{
if(not inputs[0].standard())
{
throw std::runtime_error("Input arg must be standard shaped");
}
return inputs.front();
}
};
TEST_CASE(register_half_copy_op)
{
half_copy_host hch;
migraphx::register_experimental_custom_op(hch);
auto op = migraphx::operation("half_copy_host");
EXPECT(op.name() == "half_copy_host");
half_copy_device hcd;
migraphx::register_experimental_custom_op(hcd);
op = migraphx::operation("half_copy_device");
EXPECT(op.name() == "half_copy_device");
half_copy_device_same_buffer hcdsb;
migraphx::register_experimental_custom_op(hcdsb);
op = migraphx::operation("half_copy_device_same_buffer");
EXPECT(op.name() == "half_copy_device_same_buffer");
}
TEST_CASE(half_copy_custom_op_test)
{
auto run_test_prog = [](const std::string& op_name, bool buffer_alloc) {
migraphx::program p;
migraphx::module m = p.get_main_module();
migraphx::shape s{migraphx_shape_float_type, {4, 3}};
auto x = m.add_parameter("x", s);
migraphx::instructions inputs = {x};
if(buffer_alloc)
{
auto alloc = m.add_allocation(s);
inputs = {x, alloc};
}
auto half_copy_ins = m.add_instruction(migraphx::operation(op_name.c_str()), inputs);
m.add_return({half_copy_ins});
migraphx::compile_options options;
options.set_offload_copy();
p.compile(migraphx::target("gpu"), options);
migraphx::program_parameters pp;
std::vector<float> x_data(12);
std::iota(x_data.begin(), x_data.end(), 0);
pp.add("x", migraphx::argument(s, x_data.data()));
auto results = p.eval(pp);
auto result = results[0];
auto result_vec = result.as_vector<float>();
std::vector<float> expected_result(12, 0);
std::iota(expected_result.begin() + 6, expected_result.end(), 6);
EXPECT(bool{result == migraphx::argument(s, expected_result.data())});
};
// register all the ops
half_copy_host hch;
migraphx::register_experimental_custom_op(hch);
half_copy_device hcd;
migraphx::register_experimental_custom_op(hcd);
half_copy_device_same_buffer hcdsb;
migraphx::register_experimental_custom_op(hcdsb);
std::vector<std::pair<std::string, bool>> tests_config = {
{"half_copy_host", true},
{"half_copy_device", true},
{"half_copy_device_same_buffer", false}};
for(const auto& i : tests_config)
{
run_test_prog(i.first, i.second);
}
}
struct stride_two final : migraphx::experimental_custom_op_base
{
virtual std::string name() const override { return "stride_two"; }
virtual migraphx::argument
compute(migraphx::context, migraphx::shape out_shape, migraphx::arguments inputs) const override
{
return {out_shape, inputs[0].data()};
}
virtual migraphx::shape compute_shape(migraphx::shapes inputs) const override
{
if(inputs.size() != 1)
{
throw std::runtime_error("stride_two op must have only one input argument");
};
if(not inputs[0].standard())
{
throw std::runtime_error("stride_two op only works on the standard input shapes");
}
migraphx::shape input_s = inputs[0];
std::vector<size_t> dims = input_s.lengths();
std::vector<size_t> new_dims;
std::vector<size_t> strides = input_s.strides();
std::vector<size_t> new_strides;
std::for_each(dims.begin(), dims.end(), [&](auto i) { new_dims.push_back(i / 2); });
std::for_each(
strides.begin(), strides.end(), [&](auto i) { new_strides.push_back(i * 2); });
migraphx::shape output_shape{input_s.type(), new_dims, new_strides};
return output_shape;
}
virtual bool runs_on_offload_target() const override { return true; }
virtual std::vector<size_t> output_alias(migraphx::shapes) const override { return {0}; };
};
TEST_CASE(stride_two_custom_op_test)
{
stride_two st;
migraphx::register_experimental_custom_op(st);
migraphx::program p;
migraphx::module m = p.get_main_module();
migraphx::shape s{migraphx_shape_float_type, {4, 4, 4}};
auto x = m.add_parameter("x", s);
auto stride_two_ins = m.add_instruction(migraphx::operation("stride_two"), {x});
m.add_return({stride_two_ins});
migraphx::compile_options options;
options.set_offload_copy();
p.compile(migraphx::target("gpu"), options);
migraphx::program_parameters pp;
std::vector<float> x_data(64);
std::iota(x_data.begin(), x_data.end(), 0);
pp.add("x", migraphx::argument(s, x_data.data()));
auto results = p.eval(pp);
auto result = results[0];
auto result_vec = result.as_vector<float>();
std::vector<float> expected_result = {0, 2, 8, 10, 32, 34, 40, 42};
EXPECT(result_vec == expected_result);
}
TEST_CASE(custom_op_with_pre_and_post_subgraph_test)
{
half_copy_host hco;
migraphx::register_experimental_custom_op(hco);
stride_two st;
migraphx::register_experimental_custom_op(st);
migraphx::program p; migraphx::program p;
migraphx::shape s{migraphx_shape_int32_type, {4, 3}}; migraphx::shape s{migraphx_shape_float_type, {4, 6}};
migraphx::shape trans_shape{migraphx_shape_int32_type, {3, 4}};
migraphx::module m = p.get_main_module(); migraphx::module m = p.get_main_module();
auto x = m.add_parameter("x", s); auto x = m.add_parameter("x", s);
auto neg = m.add_instruction(migraphx::operation("neg"), x); // pre-subgraph
auto alloc = m.add_allocation(trans_shape); auto neg_ins = m.add_instruction(migraphx::operation("neg"), x);
auto neg_trans = auto trans_ins =
m.add_instruction(migraphx::operation("transpose", "{permutation: [1, 0]}"), {neg}); m.add_instruction(migraphx::operation("transpose", "{permutation: [1, 0]}"), {neg_ins});
auto neg_cont = m.add_instruction(migraphx::operation("contiguous"), {neg_trans}); auto cont_ins = m.add_instruction(migraphx::operation("contiguous"), {trans_ins});
auto custom_kernel = // custom_op
m.add_instruction(migraphx::operation("simple_custom_op"), {neg_cont, alloc}); migraphx::shape trans_shape{migraphx_shape_float_type, {6, 4}};
auto relu = m.add_instruction(migraphx::operation("relu"), custom_kernel); auto alloc = m.add_allocation(trans_shape);
m.add_return({relu}); auto half_copy_ins =
m.add_instruction(migraphx::operation("half_copy_host"), {cont_ins, alloc});
// post-subgraph
auto abs_ins = m.add_instruction(migraphx::operation("abs"), {half_copy_ins});
// another custom_op
auto stride_two_ins = m.add_instruction(migraphx::operation("stride_two"), {abs_ins});
// post-subgraph
auto relu_ins = m.add_instruction(migraphx::operation("relu"), {stride_two_ins});
m.add_return({relu_ins});
migraphx::compile_options options; migraphx::compile_options options;
options.set_offload_copy(); options.set_offload_copy();
p.compile(migraphx::target("gpu"), options); p.compile(migraphx::target("gpu"), options);
migraphx::program_parameters pp; migraphx::program_parameters pp;
std::vector<int> x_data(12, -3); std::vector<float> x_data(s.elements());
std::iota(x_data.begin(), x_data.end(), 0);
pp.add("x", migraphx::argument(s, x_data.data())); pp.add("x", migraphx::argument(s, x_data.data()));
auto results = p.eval(pp); auto results = p.eval(pp);
auto result = results[0]; auto result = results[0];
auto result_vec = result.as_vector<int>(); auto result_vec = result.as_vector<float>();
std::vector<int> expected_result(12, 0); std::vector<float> expected_result = {0, 0, 0, 0, 4, 16};
std::fill(expected_result.begin() + 6, expected_result.end(), 3); EXPECT(bool{result == migraphx::argument(migraphx::shape{migraphx_shape_float_type, {3, 2}},
EXPECT(bool{result == migraphx::argument(trans_shape, expected_result.data())}); expected_result.data())});
} }
int main(int argc, const char* argv[]) { test::run(argc, argv); } int main(int argc, const char* argv[]) { test::run(argc, argv); }
...@@ -25,6 +25,8 @@ ...@@ -25,6 +25,8 @@
#include <hip/hip_runtime_api.h> #include <hip/hip_runtime_api.h>
#include <migraphx/migraphx.h> #include <migraphx/migraphx.h>
#include <migraphx/migraphx.hpp> #include <migraphx/migraphx.hpp>
#include <migraphx/manage_ptr.hpp>
#include "test.hpp" #include "test.hpp"
TEST_CASE(load_and_run) TEST_CASE(load_and_run)
...@@ -44,11 +46,67 @@ TEST_CASE(load_and_run) ...@@ -44,11 +46,67 @@ TEST_CASE(load_and_run)
{ {
pp.add(name, migraphx::argument::generate(param_shapes[name])); pp.add(name, migraphx::argument::generate(param_shapes[name]));
} }
auto outputs = p.eval(pp); auto outputs = p.eval(pp);
CHECK(shapes_before.size() == outputs.size()); CHECK(shapes_before.size() == outputs.size());
CHECK(bool{shapes_before.front() == outputs.front().get_shape()}); CHECK(bool{shapes_before.front() == outputs.front().get_shape()});
} }
using hip_ptr = MIGRAPHX_MANAGE_PTR(void, hipFree);
using stream_ptr = MIGRAPHX_MANAGE_PTR(hipStream_t, hipStreamDestroy);
stream_ptr get_stream()
{
hipStream_t stream;
auto err = hipStreamCreateWithFlags(&stream, 0);
EXPECT(err == hipSuccess);
return stream_ptr{stream};
}
hip_ptr get_hip_buffer(size_t size)
{
void* ptr;
auto err = hipMalloc(&ptr, size);
EXPECT(err == hipSuccess);
return hip_ptr{ptr};
}
TEST_CASE(load_and_run_async)
{
auto p = migraphx::parse_onnx("conv_relu_maxpool_test.onnx");
auto shapes_before = p.get_output_shapes();
migraphx::compile_options options;
options.set_offload_copy(false);
p.compile(migraphx::target("gpu"), options);
auto shapes_after = p.get_output_shapes();
CHECK(shapes_before.size() == 1);
CHECK(shapes_before.size() == shapes_after.size());
CHECK(bool{shapes_before.front() == shapes_after.front()});
migraphx::program_parameters pp;
auto param_shapes = p.get_parameter_shapes();
stream_ptr stream = get_stream();
std::vector<hip_ptr> buffs;
std::vector<migraphx::argument> args;
for(auto&& name : param_shapes.names())
{
args.push_back(migraphx::argument::generate(param_shapes[name]));
buffs.push_back(get_hip_buffer(args.rbegin()->get_shape().bytes()));
auto err = hipMemcpy(buffs.rbegin()->get(),
args.rbegin()->data(),
args.rbegin()->get_shape().bytes(),
hipMemcpyHostToDevice);
EXPECT(err == hipSuccess);
pp.add(name, migraphx::argument(args.rbegin()->get_shape(), buffs.rbegin()->get()));
}
auto outputs = p.run_async(pp, stream.get());
CHECK(shapes_before.size() == outputs.size());
CHECK(bool{shapes_before.front() == outputs.front().get_shape()});
}
TEST_CASE(load_and_run_ctx) TEST_CASE(load_and_run_ctx)
{ {
auto p = migraphx::parse_onnx("conv_relu_maxpool_test.onnx"); auto p = migraphx::parse_onnx("conv_relu_maxpool_test.onnx");
...@@ -82,10 +140,10 @@ TEST_CASE(if_pl_test) ...@@ -82,10 +140,10 @@ TEST_CASE(if_pl_test)
migraphx::program_parameters pp; migraphx::program_parameters pp;
auto param_shapes = p.get_parameter_shapes(); auto param_shapes = p.get_parameter_shapes();
auto xs = param_shapes["x"]; auto xs = param_shapes["x"];
std::vector<float> xd(xs.bytes() / sizeof(float), 1.0); std::vector<float> xd(xs.elements(), 1.0);
pp.add("x", migraphx::argument(xs, xd.data())); pp.add("x", migraphx::argument(xs, xd.data()));
auto ys = param_shapes["y"]; auto ys = param_shapes["y"];
std::vector<float> yd(ys.bytes() / sizeof(float), 2.0); std::vector<float> yd(ys.elements(), 2.0);
pp.add("y", migraphx::argument(ys, yd.data())); pp.add("y", migraphx::argument(ys, yd.data()));
char ccond = cond; char ccond = cond;
pp.add("cond", migraphx::argument(param_shapes["cond"], &ccond)); pp.add("cond", migraphx::argument(param_shapes["cond"], &ccond));
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE. * THE SOFTWARE.
*/ */
#include "migraphx/dead_code_elimination.hpp" #include <migraphx/dead_code_elimination.hpp>
#include <migraphx/fuse_pointwise.hpp> #include <migraphx/fuse_pointwise.hpp>
#include <migraphx/instruction.hpp> #include <migraphx/instruction.hpp>
#include <migraphx/pass_manager.hpp> #include <migraphx/pass_manager.hpp>
......
...@@ -40,6 +40,10 @@ ...@@ -40,6 +40,10 @@
#include <migraphx/make_op.hpp> #include <migraphx/make_op.hpp>
#include <basic_ops.hpp> #include <basic_ops.hpp>
#include <test.hpp> #include <test.hpp>
#include "make_precompile_op.hpp"
// Treat some operators as compilable to enable lowering
MIGRAPHX_GPU_TEST_PRECOMPILE("add", "mul", "convert")
void run_lowering(migraphx::program& p, bool offload_copy = false) void run_lowering(migraphx::program& p, bool offload_copy = false)
{ {
...@@ -118,7 +122,7 @@ TEST_CASE(no_copy_dead_param) ...@@ -118,7 +122,7 @@ TEST_CASE(no_copy_dead_param)
auto xb = mm->add_instruction(migraphx::make_op("hip::allocate", {{"shape", to_value(s)}})); auto xb = mm->add_instruction(migraphx::make_op("hip::allocate", {{"shape", to_value(s)}}));
auto gx = mm->add_instruction(migraphx::make_op("hip::copy_to_gpu"), x, xb); auto gx = mm->add_instruction(migraphx::make_op("hip::copy_to_gpu"), x, xb);
auto ab = mm->add_instruction(migraphx::make_op("hip::allocate", {{"shape", to_value(s)}})); auto ab = mm->add_instruction(migraphx::make_op("hip::allocate", {{"shape", to_value(s)}}));
auto sum = mm->add_instruction(migraphx::make_op("gpu::add"), gx, gx, ab); auto sum = mm->add_instruction(make_precompile_op("add"), gx, gx, ab);
auto r = mm->add_instruction(migraphx::make_op("hip::copy_from_gpu"), sum); auto r = mm->add_instruction(migraphx::make_op("hip::copy_from_gpu"), sum);
mm->add_return({r}); mm->add_return({r});
......
...@@ -307,12 +307,14 @@ TEST_CASE(compile_math) ...@@ -307,12 +307,14 @@ TEST_CASE(compile_math)
"erf(x)", "erf(x)",
"exp(x)", "exp(x)",
"floor(x)", "floor(x)",
"fmod(x, x)",
"isnan(x)", "isnan(x)",
"log(x)", "log(x)",
"max(x, x)", "max(x, x)",
"min(x, x)", "min(x, x)",
"pow(x, 0)", "pow(x, 0)",
"pow(x, x)", "pow(x, x)",
"remainder(x,x)",
"round(x)", "round(x)",
"rsqrt(x)", "rsqrt(x)",
"sin(x)", "sin(x)",
......
...@@ -21,48 +21,46 @@ ...@@ -21,48 +21,46 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE. * THE SOFTWARE.
*/ */
#ifndef MIGRAPHX_GUARD_TEST_GPU_MAKE_PRECOMPILE_OP_HPP
#define MIGRAPHX_GUARD_TEST_GPU_MAKE_PRECOMPILE_OP_HPP
#include "verify_program.hpp" #include <migraphx/operation.hpp>
#include <migraphx/program.hpp> #include <migraphx/gpu/compiler.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/serialize.hpp>
#include <migraphx/make_op.hpp> #include <migraphx/make_op.hpp>
#include <migraphx/op/batch_norm_inference.hpp> // NOLINTNEXTLINE
#define MIGRAPHX_GPU_TEST_PRECOMPILE(...) \
struct test_compiler : migraphx::gpu::compiler<test_compiler> \
{ \
std::vector<std::string> names() const { return {__VA_ARGS__}; } \
\
template <class... Ts> \
migraphx::operation compile_op(Ts&&...) const \
{ \
MIGRAPHX_THROW("Not compilable"); \
} \
\
template <class... Ts> \
migraphx::gpu::compiler_replace compile(Ts&&...) const \
{ \
MIGRAPHX_THROW("Not compilable"); \
} \
};
inline migraphx::operation make_precompile_op(migraphx::rank<0>, const migraphx::operation& op)
{
return migraphx::make_op("gpu::precompile_op", {{"op", migraphx::to_value(op)}});
}
struct test_batchnorm_3d_per_actv : verify_program<test_batchnorm_3d_per_actv> inline migraphx::operation make_precompile_op(migraphx::rank<1>, const std::string& name)
{ {
const size_t d1 = 2; return make_precompile_op(migraphx::rank<0>{}, migraphx::make_op(name));
const size_t d2 = 4; }
const size_t d3 = 5;
const size_t channels = 2;
const size_t batches = 3;
migraphx::program create_program() const template <class T>
{ auto make_precompile_op(const T& x)
migraphx::program p; {
auto* mm = p.get_main_module(); return make_precompile_op(migraphx::rank<1>{}, x);
}
migraphx::shape s{migraphx::shape::float_type, {batches, channels, d1, d2, d3}}; #endif // MIGRAPHX_GUARD_TEST_GPU_MAKE_PRECOMPILE_OP_HPP
migraphx::shape vars{migraphx::shape::float_type, {channels, d1, d2, d3}};
auto x = mm->add_parameter("x", s);
auto scale = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 1)));
auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2)));
auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3)));
auto variance = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4)));
mm->add_instruction(
migraphx::make_op(
"batch_norm_inference",
{{"epsilon", 1.0e-6},
{"momentum", 0.8f},
{"bn_mode",
migraphx::to_value(migraphx::op::batch_norm_inference::per_activation)}}),
x,
scale,
bias,
mean,
variance);
return p;
}
};
...@@ -37,10 +37,6 @@ ...@@ -37,10 +37,6 @@
#include <migraphx/functional.hpp> #include <migraphx/functional.hpp>
#include <test.hpp> #include <test.hpp>
using migraphx::trim;
// m test_gpu_mlir && ./bin/test_gpu_mlir
struct mlir_gpu_target : migraphx::gpu::target struct mlir_gpu_target : migraphx::gpu::target
{ {
std::string name() const { return "mlir"; } std::string name() const { return "mlir"; }
...@@ -88,7 +84,7 @@ migraphx::program create_program_from_mlir(const migraphx::module& mmlir) ...@@ -88,7 +84,7 @@ migraphx::program create_program_from_mlir(const migraphx::module& mmlir)
inputs.push_back(mm->add_parameter("output", mmlir.get_output_shapes().front())); inputs.push_back(mm->add_parameter("output", mmlir.get_output_shapes().front()));
migraphx::gpu::context ctx; migraphx::gpu::context ctx;
migraphx::gpu::insert_mlir(*mm, mm->end(), compile_mlir(ctx, mmlir), inputs); migraphx::gpu::insert_mlir(*mm, mm->end(), compile_mlir(ctx, mmlir, inputs), inputs);
return p; return p;
} }
...@@ -144,8 +140,8 @@ TEST_CASE(conv) ...@@ -144,8 +140,8 @@ TEST_CASE(conv)
{ {
const std::string mlir_output = R"__migraphx__( const std::string mlir_output = R"__migraphx__(
module { module {
func @main(%arg0: tensor<2x8x3x3xf32>, %arg1: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {kernel = "mixr"} { func.func @main(%arg0: tensor<2x8x3x3xf32>, %arg1: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {kernel = "mixr"} {
%0 = migraphx.convolution(%arg1, %arg0) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1], use_dynamic_same_auto_pad = 0 : i64} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32> %0 = migraphx.convolution(%arg1, %arg0) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
return %0 : tensor<1x2x2x2xf32> return %0 : tensor<1x2x2x2xf32>
} }
} }
...@@ -167,8 +163,8 @@ TEST_CASE(conv_add_relu) ...@@ -167,8 +163,8 @@ TEST_CASE(conv_add_relu)
{ {
const std::string mlir_output = R"__migraphx__( const std::string mlir_output = R"__migraphx__(
module { module {
func @main(%arg0: tensor<1x2x2x2xf32>, %arg1: tensor<2x8x3x3xf32>, %arg2: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {kernel = "mixr"} { func.func @main(%arg0: tensor<1x2x2x2xf32>, %arg1: tensor<2x8x3x3xf32>, %arg2: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {kernel = "mixr"} {
%0 = migraphx.convolution(%arg2, %arg1) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1], use_dynamic_same_auto_pad = 0 : i64} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32> %0 = migraphx.convolution(%arg2, %arg1) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
%1 = migraphx.add(%0, %arg0) : (tensor<1x2x2x2xf32>, tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32> %1 = migraphx.add(%0, %arg0) : (tensor<1x2x2x2xf32>, tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
%2 = migraphx.relu(%1) : (tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32> %2 = migraphx.relu(%1) : (tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
return %2 : tensor<1x2x2x2xf32> return %2 : tensor<1x2x2x2xf32>
......
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE. * THE SOFTWARE.
*/ */
#include "migraphx/instruction_ref.hpp" #include <migraphx/instruction_ref.hpp>
#include <migraphx/gpu/context.hpp> #include <migraphx/gpu/context.hpp>
#include <migraphx/gpu/lowering.hpp> #include <migraphx/gpu/lowering.hpp>
#include <migraphx/gpu/target.hpp> #include <migraphx/gpu/target.hpp>
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include <migraphx/adjust_allocation.hpp> #include <migraphx/adjust_allocation.hpp>
#include <migraphx/gpu/pack_int8_args.hpp> #include <migraphx/gpu/pack_int8_args.hpp>
#include <migraphx/gpu/rocblas.hpp> #include <migraphx/gpu/rocblas.hpp>
#include <migraphx/gpu/device_name.hpp>
#include <migraphx/auto_contiguous.hpp> #include <migraphx/auto_contiguous.hpp>
#include <migraphx/dead_code_elimination.hpp> #include <migraphx/dead_code_elimination.hpp>
#include <migraphx/replace_allocate.hpp> #include <migraphx/replace_allocate.hpp>
...@@ -38,10 +39,13 @@ ...@@ -38,10 +39,13 @@
#include <migraphx/pass_manager.hpp> #include <migraphx/pass_manager.hpp>
#include <migraphx/make_op.hpp> #include <migraphx/make_op.hpp>
#include <test.hpp> #include <test.hpp>
#include "make_precompile_op.hpp"
void run_passes(migraphx::module& m) // Treat some operators as compilable to enable lowering
MIGRAPHX_GPU_TEST_PRECOMPILE("add", "mul", "convert")
void run_passes(migraphx::module& m, migraphx::gpu::context& ctx)
{ {
auto ctx = migraphx::gpu::context{};
migraphx::run_passes(m, migraphx::run_passes(m,
{migraphx::auto_contiguous{}, {migraphx::auto_contiguous{},
migraphx::gpu::lowering{&ctx, false}, migraphx::gpu::lowering{&ctx, false},
...@@ -52,18 +56,6 @@ void run_passes(migraphx::module& m) ...@@ -52,18 +56,6 @@ void run_passes(migraphx::module& m)
migraphx::dead_code_elimination{}}); migraphx::dead_code_elimination{}});
} }
bool get_int8_x4_format()
{
bool int8_x4_format = true;
#if ROCBLAS_VERSION_MAJOR >= 2 && ROCBLAS_VERSION_MINOR >= 38
auto ctx = migraphx::gpu::context{};
rocblas_gemm_flags flag;
rocblas_query_int8_layout_flag(ctx.get_stream().get_rocblas(), &flag);
int8_x4_format = (flag == rocblas_gemm_flags_pack_int8x4);
#endif
return int8_x4_format;
}
TEST_CASE(quant_dot) TEST_CASE(quant_dot)
{ {
auto create_module = [] { auto create_module = [] {
...@@ -102,11 +94,13 @@ TEST_CASE(quant_dot) ...@@ -102,11 +94,13 @@ TEST_CASE(quant_dot)
migraphx::make_op("hip::allocate", {{"shape", migraphx::to_value(m2_shape)}})); migraphx::make_op("hip::allocate", {{"shape", migraphx::to_value(m2_shape)}}));
packa = m.add_instruction(migraphx::make_op("gpu::int8_gemm_pack_a"), l2, alloc); packa = m.add_instruction(migraphx::make_op("gpu::int8_gemm_pack_a"), l2, alloc);
} }
auto gemm = auto gemm = m.add_instruction(
m.add_instruction(migraphx::make_op("gpu::quant_gemm", {{"int8_x4_format", int8_x4}}), migraphx::make_op("gpu::quant_gemm",
l1, {{"int8_x4_format", int8_x4},
packa, {"compute_fp32", migraphx::gpu::get_compute_fp32_flag()}}),
gemm_alloc); l1,
packa,
gemm_alloc);
auto beta_broadcast = m.add_instruction( auto beta_broadcast = m.add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", m3_shape.lens()}}), beta); migraphx::make_op("multibroadcast", {{"out_lens", m3_shape.lens()}}), beta);
...@@ -116,19 +110,19 @@ TEST_CASE(quant_dot) ...@@ -116,19 +110,19 @@ TEST_CASE(quant_dot)
m.add_instruction(migraphx::make_op("gpu::contiguous"), beta_broadcast, beta_alloc); m.add_instruction(migraphx::make_op("gpu::contiguous"), beta_broadcast, beta_alloc);
auto mul_alloc = m.add_instruction( auto mul_alloc = m.add_instruction(
migraphx::make_op("hip::allocate", {{"shape", migraphx::to_value(m3_shape)}})); migraphx::make_op("hip::allocate", {{"shape", migraphx::to_value(m3_shape)}}));
auto m3_beta = auto m3_beta = m.add_instruction(make_precompile_op("mul"), l3, beta_contiguous, mul_alloc);
m.add_instruction(migraphx::make_op("gpu::mul"), l3, beta_contiguous, mul_alloc); auto gemm_add = m.add_instruction(make_precompile_op("add"), gemm, m3_beta, output);
auto gemm_add = m.add_instruction(migraphx::make_op("gpu::add"), gemm, m3_beta, output);
m.add_return({gemm_add}); m.add_return({gemm_add});
return m; return m;
}; };
auto m1 = create_module(); auto m1 = create_module();
run_passes(m1); auto ctx = migraphx::gpu::context{};
run_passes(m1, ctx);
bool flag = get_int8_x4_format(); bool int8_x4 = migraphx::gpu::get_int8_x4_format(ctx);
auto m2 = create_optimized_int8_x4(flag); auto m2 = create_optimized_int8_x4(int8_x4);
EXPECT(m1 == m2); EXPECT(m1 == m2);
} }
...@@ -187,21 +181,23 @@ TEST_CASE(quant_dot_trans) ...@@ -187,21 +181,23 @@ TEST_CASE(quant_dot_trans)
// back result to int8 // back result to int8
auto tl1_convert_alloc = m.add_instruction(migraphx::make_op( auto tl1_convert_alloc = m.add_instruction(migraphx::make_op(
"hip::allocate", {{"shape", migraphx::to_value(alpha_contiguous->get_shape())}})); "hip::allocate", {{"shape", migraphx::to_value(alpha_contiguous->get_shape())}}));
auto tl1_convert = m.add_instruction( auto tl1_convert =
migraphx::make_op("gpu::convert", {{"target_type", alpha->get_shape().type()}}), m.add_instruction(make_precompile_op(migraphx::make_op(
conta, "convert", {{"target_type", alpha->get_shape().type()}})),
tl1_convert_alloc); conta,
auto mul_alloc = m.add_instruction(migraphx::make_op( tl1_convert_alloc);
auto mul_alloc = m.add_instruction(migraphx::make_op(
"hip::allocate", {{"shape", migraphx::to_value(tl1_convert->get_shape())}})); "hip::allocate", {{"shape", migraphx::to_value(tl1_convert->get_shape())}}));
auto tl1_alpha_int32 = m.add_instruction( auto tl1_alpha_int32 =
migraphx::make_op("gpu::mul"), alpha_contiguous, tl1_convert, mul_alloc); m.add_instruction(make_precompile_op("mul"), alpha_contiguous, tl1_convert, mul_alloc);
// convert mul_res to int8 // convert mul_res to int8
auto tl1_alpha_int8_alloc = m.add_instruction(migraphx::make_op( auto tl1_alpha_int8_alloc = m.add_instruction(migraphx::make_op(
"hip::allocate", {{"shape", migraphx::to_value(conta->get_shape())}})); "hip::allocate", {{"shape", migraphx::to_value(conta->get_shape())}}));
auto tl1_alpha_int8 = m.add_instruction( auto tl1_alpha_int8 =
migraphx::make_op("gpu::convert", {{"target_type", conta->get_shape().type()}}), m.add_instruction(make_precompile_op(migraphx::make_op(
tl1_alpha_int32, "convert", {{"target_type", conta->get_shape().type()}})),
tl1_alpha_int8_alloc); tl1_alpha_int32,
tl1_alpha_int8_alloc);
auto packb = contb; auto packb = contb;
if(int8_x4) if(int8_x4)
...@@ -211,21 +207,24 @@ TEST_CASE(quant_dot_trans) ...@@ -211,21 +207,24 @@ TEST_CASE(quant_dot_trans)
packb = m.add_instruction(migraphx::make_op("gpu::int8_gemm_pack_a"), contb, allocpb); packb = m.add_instruction(migraphx::make_op("gpu::int8_gemm_pack_a"), contb, allocpb);
} }
auto gemm = auto gemm = m.add_instruction(
m.add_instruction(migraphx::make_op("gpu::quant_gemm", {{"int8_x4_format", int8_x4}}), migraphx::make_op("gpu::quant_gemm",
tl1_alpha_int8, {{"int8_x4_format", int8_x4},
packb, {"compute_fp32", migraphx::gpu::get_compute_fp32_flag()}}),
output); tl1_alpha_int8,
packb,
output);
m.add_return({gemm}); m.add_return({gemm});
return m; return m;
}; };
auto m1 = create_module(); auto m1 = create_module();
bool flag = get_int8_x4_format(); auto ctx = migraphx::gpu::context{};
auto m2 = create_optimized_int8_x4(flag); run_passes(m1, ctx);
run_passes(m1); bool int8_x4 = migraphx::gpu::get_int8_x4_format(ctx);
auto m2 = create_optimized_int8_x4(int8_x4);
EXPECT(m1 == m2); EXPECT(m1 == m2);
} }
...@@ -292,11 +291,13 @@ TEST_CASE(quant_dot_pad) ...@@ -292,11 +291,13 @@ TEST_CASE(quant_dot_pad)
packa = m.add_instruction(migraphx::make_op("gpu::int8_gemm_pack_a"), pl2, alloc); packa = m.add_instruction(migraphx::make_op("gpu::int8_gemm_pack_a"), pl2, alloc);
} }
auto gemm = auto gemm = m.add_instruction(
m.add_instruction(migraphx::make_op("gpu::quant_gemm", {{"int8_x4_format", int8_x4}}), migraphx::make_op("gpu::quant_gemm",
pl1, {{"int8_x4_format", int8_x4},
packa, {"compute_fp32", migraphx::gpu::get_compute_fp32_flag()}}),
gemm_alloc); pl1,
packa,
gemm_alloc);
auto beta_broadcast = auto beta_broadcast =
m.add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", s3.lens()}}), beta); m.add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", s3.lens()}}), beta);
...@@ -306,18 +307,18 @@ TEST_CASE(quant_dot_pad) ...@@ -306,18 +307,18 @@ TEST_CASE(quant_dot_pad)
m.add_instruction(migraphx::make_op("gpu::contiguous"), beta_broadcast, beta_alloc); m.add_instruction(migraphx::make_op("gpu::contiguous"), beta_broadcast, beta_alloc);
auto mul_alloc = m.add_instruction( auto mul_alloc = m.add_instruction(
migraphx::make_op("hip::allocate", {{"shape", migraphx::to_value(s3)}})); migraphx::make_op("hip::allocate", {{"shape", migraphx::to_value(s3)}}));
auto m3_beta = auto m3_beta = m.add_instruction(make_precompile_op("mul"), l3, beta_contiguous, mul_alloc);
m.add_instruction(migraphx::make_op("gpu::mul"), l3, beta_contiguous, mul_alloc); auto gemm_add = m.add_instruction(make_precompile_op("add"), gemm, m3_beta, output);
auto gemm_add = m.add_instruction(migraphx::make_op("gpu::add"), gemm, m3_beta, output);
m.add_return({gemm_add}); m.add_return({gemm_add});
return m; return m;
}; };
auto m1 = create_module(); auto m1 = create_module();
bool flag = get_int8_x4_format(); auto ctx = migraphx::gpu::context{};
auto m2 = create_optimized_int8_x4(flag); run_passes(m1, ctx);
run_passes(m1); bool int8_x4 = migraphx::gpu::get_int8_x4_format(ctx);
auto m2 = create_optimized_int8_x4(int8_x4);
EXPECT(m1 == m2); EXPECT(m1 == m2);
} }
...@@ -396,14 +397,15 @@ TEST_CASE(quant_dot_trans_pad) ...@@ -396,14 +397,15 @@ TEST_CASE(quant_dot_trans_pad)
// back result to int8 // back result to int8
auto tl1_convert_alloc = m.add_instruction(migraphx::make_op( auto tl1_convert_alloc = m.add_instruction(migraphx::make_op(
"hip::allocate", {{"shape", migraphx::to_value(alpha_contiguous->get_shape())}})); "hip::allocate", {{"shape", migraphx::to_value(alpha_contiguous->get_shape())}}));
auto tl1_convert = m.add_instruction( auto tl1_convert =
migraphx::make_op("gpu::convert", {{"target_type", alpha->get_shape().type()}}), m.add_instruction(make_precompile_op(migraphx::make_op(
conta, "convert", {{"target_type", alpha->get_shape().type()}})),
tl1_convert_alloc); conta,
auto mul_alloc = m.add_instruction(migraphx::make_op( tl1_convert_alloc);
auto mul_alloc = m.add_instruction(migraphx::make_op(
"hip::allocate", {{"shape", migraphx::to_value(tl1_convert->get_shape())}})); "hip::allocate", {{"shape", migraphx::to_value(tl1_convert->get_shape())}}));
auto tl1_alpha_int32 = m.add_instruction( auto tl1_alpha_int32 =
migraphx::make_op("gpu::mul"), alpha_contiguous, tl1_convert, mul_alloc); m.add_instruction(make_precompile_op("mul"), alpha_contiguous, tl1_convert, mul_alloc);
// convert mul_res to int8 // convert mul_res to int8
auto tl1_alpha_int8_alloc = m.add_instruction(migraphx::make_op( auto tl1_alpha_int8_alloc = m.add_instruction(migraphx::make_op(
"hip::allocate", {{"shape", migraphx::to_value(conta->get_shape())}})); "hip::allocate", {{"shape", migraphx::to_value(conta->get_shape())}}));
...@@ -415,10 +417,11 @@ TEST_CASE(quant_dot_trans_pad) ...@@ -415,10 +417,11 @@ TEST_CASE(quant_dot_trans_pad)
migraphx::make_op("hip::allocate", {{"shape", migraphx::to_value(ps1)}})); migraphx::make_op("hip::allocate", {{"shape", migraphx::to_value(ps1)}}));
} }
auto tl1_alpha_int8 = m.add_instruction( auto tl1_alpha_int8 =
migraphx::make_op("gpu::convert", {{"target_type", conta->get_shape().type()}}), m.add_instruction(make_precompile_op(migraphx::make_op(
tl1_alpha_int32, "convert", {{"target_type", conta->get_shape().type()}})),
tl1_alpha_int8_alloc); tl1_alpha_int32,
tl1_alpha_int8_alloc);
auto pa = tl1_alpha_int8; auto pa = tl1_alpha_int8;
if(int8_x4) if(int8_x4)
...@@ -438,17 +441,23 @@ TEST_CASE(quant_dot_trans_pad) ...@@ -438,17 +441,23 @@ TEST_CASE(quant_dot_trans_pad)
} }
auto gemm = m.add_instruction( auto gemm = m.add_instruction(
migraphx::make_op("gpu::quant_gemm", {{"int8_x4_format", int8_x4}}), pa, packb, output); migraphx::make_op("gpu::quant_gemm",
{{"int8_x4_format", int8_x4},
{"compute_fp32", migraphx::gpu::get_compute_fp32_flag()}}),
pa,
packb,
output);
m.add_return({gemm}); m.add_return({gemm});
return m; return m;
}; };
auto m1 = create_module(); auto m1 = create_module();
bool flag = get_int8_x4_format(); auto ctx = migraphx::gpu::context{};
auto m2 = create_optimized_int8_x4(flag); run_passes(m1, ctx);
run_passes(m1); bool int8_x4 = migraphx::gpu::get_int8_x4_format(ctx);
auto m2 = create_optimized_int8_x4(int8_x4);
EXPECT(m1 == m2); EXPECT(m1 == m2);
} }
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <iostream>
#include <vector>
#include <migraphx/gpu/context.hpp>
#include <migraphx/context.hpp>
#include <migraphx/gpu/compile_hip.hpp>
#include <migraphx/gpu/kernel.hpp>
#include <migraphx/gpu/device_name.hpp>
#include <migraphx/par_for.hpp>
#include <migraphx/program.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/module.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/gpu/target.hpp>
#include "test.hpp"
using hip_stream_ptr = MIGRAPHX_MANAGE_PTR(hipStream_t, hipStreamDestroy);
constexpr uint32_t stream_sync_test_val = 1337;
// NOLINTNEXTLINE
const std::string compare_numbers = R"__migraphx__(
#include <hip/hip_runtime.h>
extern "C" {
__global__ void compare(float* data)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (data[i] != 1337)
{
abort();
}
}
}
int main() {}
)__migraphx__";
migraphx::src_file make_src_file(const std::string& name, const std::string& content)
{
return {name, std::make_pair(content.data(), content.data() + content.size())};
}
hip_stream_ptr get_stream()
{
hipStream_t stream;
auto status = hipStreamCreate(&stream);
if(status != hipSuccess)
{
MIGRAPHX_THROW("Failed to get stream");
}
return hip_stream_ptr{stream};
}
TEST_CASE(test_stream_sync_compare_kernel)
{
auto binaries = migraphx::gpu::compile_hip_src(
{make_src_file("check_stuff.cpp", compare_numbers)}, "", migraphx::gpu::get_device_name());
EXPECT(binaries.size() == 1);
migraphx::gpu::kernel k1{binaries.front(), "compare"};
auto input =
migraphx::fill_argument({migraphx::shape::float_type, {128}}, stream_sync_test_val);
auto ginput = migraphx::gpu::to_gpu(input);
hip_stream_ptr pstream = get_stream();
k1.launch(pstream.get(), input.get_shape().elements(), 1024)(ginput.cast<float>());
auto output = migraphx::gpu::from_gpu(ginput);
EXPECT(output == input);
}
TEST_CASE(test_stream_sync)
{
auto binaries = migraphx::gpu::compile_hip_src(
{make_src_file("check_stuff.cpp", compare_numbers)}, "", migraphx::gpu::get_device_name());
EXPECT(binaries.size() == 1);
migraphx::gpu::kernel k1{binaries.front(), "compare"};
const unsigned int m = 128;
const unsigned int k = 8192;
// Setup empty GPU memory buffer
migraphx::shape input_shape{migraphx::shape::float_type, {m, k}};
migraphx::shape output_shape{migraphx::shape::float_type, {m, m}};
auto input = migraphx::fill_argument(input_shape, 0);
auto ginput = migraphx::gpu::to_gpu(input);
auto output = migraphx::fill_argument(output_shape, 0);
auto goutput = migraphx::gpu::to_gpu(output);
hip_stream_ptr pstream = get_stream();
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {m, k}});
auto y = mm->add_literal(
migraphx::generate_literal(migraphx::shape{migraphx::shape::float_type, {k, m}}));
std::vector<float> data(m * m, stream_sync_test_val);
auto test_val = mm->add_literal(output_shape, data);
auto mult_out = mm->add_instruction(migraphx::make_op("dot"), x, y);
mm->add_instruction(migraphx::make_op("add"), mult_out, test_val);
p.compile(migraphx::gpu::target{});
// Run network and then verify with kernel
auto args = p.eval({{"x", ginput}, {"output", goutput}}, {pstream.get(), true});
k1.launch(pstream.get(), m * m, 1024)(goutput.cast<float>());
output = migraphx::gpu::from_gpu(goutput);
EXPECT(output != input);
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
...@@ -724,7 +724,7 @@ TEST_CASE(test39) ...@@ -724,7 +724,7 @@ TEST_CASE(test39)
auto sub_modules = p.get_modules(); auto sub_modules = p.get_modules();
std::reverse(sub_modules.begin(), sub_modules.end()); std::reverse(sub_modules.begin(), sub_modules.end());
for(auto& smod : sub_modules) for(const auto& smod : sub_modules)
{ {
run_pass(*smod); run_pass(*smod);
} }
......
batch_norm_1d_test:
7
x
scale
bias
mean
variancey"BatchNormalizationbatch_norm_1d_testZ
x




Z
scale

Z
bias

Z
mean

Z
variance

b
y




B
\ No newline at end of file
batch_norm_2d_test:
7
x
scale
bias
mean
variancey"BatchNormalizationbatch_norm_2d_testZ
x




Z
scale

Z
bias

Z
mean

Z
variance

b
y




B
\ No newline at end of file
batch_norm_3d_test:
J
x
scale
bias
mean
variancey"BatchNormalization*
epsilon75batch_norm_3d_testZ
x






Z
scale


Z
bias


Z
mean


Z
variance


b
y






B
\ No newline at end of file
batch_norm_flat_test:
J
x
scale
bias
mean
variancey"BatchNormalization*
epsilon75batch_norm_flat_testZ
x


Z
scale

Z
bias

Z
mean

Z
variance

b
y


B
\ No newline at end of file
!batch_norm_invalid_bias_rank_test:
7
x
scale
bias
mean
variancey"BatchNormalization!batch_norm_invalid_bias_rank_testZ
x




Z
scale

Z
bias


Z
mean

Z
variance

b
y




B
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment