Unverified Commit 2e128d9d authored by Chris Austen's avatar Chris Austen Committed by GitHub
Browse files

Rocm56 dynbatch (#1737)



* Removed split_single_dyn_dim compile flag (#1711)
* Update C/C++ API for dynamic batch (#1712)
* Python API update for dynamic batch (#1723)
* Dynamic batch C++ API example #1728
* Optimize file space of github runners (#1743)
Co-authored-by: default avatarCharlie Lin <charlie.lin@amd.com>
parent a46f378e
...@@ -25,7 +25,6 @@ ...@@ -25,7 +25,6 @@
#include <hip/hip_runtime_api.h> #include <hip/hip_runtime_api.h>
#include <migraphx/migraphx.h> #include <migraphx/migraphx.h>
#include <migraphx/migraphx.hpp> #include <migraphx/migraphx.hpp>
#include <migraphx/manage_ptr.hpp> #include <migraphx/manage_ptr.hpp>
#include "test.hpp" #include "test.hpp"
...@@ -72,6 +71,105 @@ hip_ptr get_hip_buffer(size_t size) ...@@ -72,6 +71,105 @@ hip_ptr get_hip_buffer(size_t size)
return hip_ptr{ptr}; return hip_ptr{ptr};
} }
// TODO: placeholder until we have a way to copy tuple arguments to/from device through c++ api
// TEST_CASE(dynamic_batch_load_and_run)
//{
// migraphx::onnx_options o_options;
// migraphx::dynamic_dimensions dyn_dims = {{1, 4, {2, 4}}, {3, 3}, {4, 4}, {4, 4}};
// o_options.set_dyn_input_parameter_shape("0", dyn_dims);
// dyn_dims = {{2, 2}, {3, 3}, {3, 3}, {3, 3}};
// o_options.set_dyn_input_parameter_shape("1", dyn_dims);
// auto p = migraphx::parse_onnx("conv_dynamic_batch_test.onnx", o_options);
// migraphx::compile_options c_options;
// c_options.set_split_single_dyn_dim();
// p.compile(migraphx::target("gpu"), c_options);
// auto out_shapes = p.get_output_shapes();
// CHECK(out_shapes.size() == 1);
// EXPECT(out_shapes[0].dynamic());
//
// std::vector<float> a(0.12, 2*3*4*4);
// std::vector<float> c(0.75, 2*3*3*3);
//
// auto param_shapes = p.get_parameter_shapes();
// int batch_size = 2;
// std::unordered_map<std::string, migraphx::argument> arg_map;
//
// arg_map["0"] = migraphx::argument(param_shapes["0"].to_static(batch_size), a.data());
// arg_map["1"] = migraphx::argument(param_shapes["1"].to_static(batch_size), c.data());
//
// migraphx::program_parameters pp;
// std::vector<hip_ptr> buffs;
// std::vector<migraphx::argument> args;
//
// // copy to GPU and create parameter map
// for(auto&& name : param_shapes.names())
// {
// if(arg_map.find(name) != arg_map.end())
// {
// args.push_back(arg_map.at(name));
// }
// else
// {
// migraphx::shape static_shape = param_shapes[name].to_static(batch_size);
// auto output_arg = migraphx::argument(static_shape);
// args.push_back(output_arg);
// }
// buffs.push_back(get_hip_buffer(args.rbegin()->get_shape().bytes()));
// auto err = hipMemcpy(buffs.rbegin()->get(),
// args.rbegin()->data(),
// args.rbegin()->get_shape().bytes(),
// hipMemcpyHostToDevice);
// EXPECT(err == hipSuccess);
// pp.add(name, migraphx::argument(args.rbegin()->get_shape(), buffs.rbegin()->get()));
// }
//
// auto output = p.eval(pp)[0];
//
// // copy output back to host
// auto host_arg = migraphx::argument(output.get_shape());
// auto err = hipMemcpy(
// host_arg.data(), output.data(), output.get_shape().bytes(), hipMemcpyDeviceToHost);
// EXPECT(err == hipSuccess);
//}
TEST_CASE(dynamic_batch_load_and_run_offload)
{
migraphx::onnx_options o_options;
migraphx::dynamic_dimensions dyn_dims = {migraphx::dynamic_dimension{1, 4, {2, 4}},
migraphx::dynamic_dimension{3, 3},
migraphx::dynamic_dimension{4, 4},
migraphx::dynamic_dimension{4, 4}};
o_options.set_dyn_input_parameter_shape("0", dyn_dims);
dyn_dims = {migraphx::dynamic_dimension{2, 2},
migraphx::dynamic_dimension{3, 3},
migraphx::dynamic_dimension{3, 3},
migraphx::dynamic_dimension{3, 3}};
o_options.set_dyn_input_parameter_shape("1", dyn_dims);
auto p = migraphx::parse_onnx("conv_dynamic_batch_test.onnx", o_options);
auto shapes_before = p.get_output_shapes();
migraphx::compile_options c_options;
c_options.set_offload_copy();
p.compile(migraphx::target("gpu"), c_options);
auto out_shapes = p.get_output_shapes();
CHECK(out_shapes.size() == 1);
EXPECT(out_shapes[0].dynamic());
// batch size = 2
std::vector<float> a(2 * 3 * 4 * 4, 0.12);
std::vector<float> c(2 * 3 * 3 * 3, 0.75);
migraphx::program_parameters pp;
auto param_shapes = p.get_parameter_shapes();
pp.add("0",
migraphx::argument(migraphx::shape(migraphx_shape_float_type, {2, 3, 4, 4}), a.data()));
pp.add("1",
migraphx::argument(migraphx::shape(migraphx_shape_float_type, {2, 3, 3, 3}), c.data()));
auto outputs = p.eval(pp);
CHECK(shapes_before.size() == outputs.size());
CHECK(bool{outputs.front().get_shape() ==
migraphx::shape(migraphx_shape_float_type, {2, 1, 3, 3})});
}
TEST_CASE(load_and_run_async) TEST_CASE(load_and_run_async)
{ {
auto p = migraphx::parse_onnx("conv_relu_maxpool_test.onnx"); auto p = migraphx::parse_onnx("conv_relu_maxpool_test.onnx");
......
...@@ -86,8 +86,8 @@ def test_nonzero(): ...@@ -86,8 +86,8 @@ def test_nonzero():
params = {} params = {}
shapes = p.get_parameter_shapes() shapes = p.get_parameter_shapes()
params["data"] = np.array([1, 1, 0, 1]).reshape( params["data"] = np.array([1, 1, 0,
shapes["data"].lens()).astype(np.bool) 1]).reshape(shapes["data"].lens()).astype(bool)
r = p.run(params) r = p.run(params)
print(r) print(r)
...@@ -127,15 +127,54 @@ def test_if_pl(): ...@@ -127,15 +127,54 @@ def test_if_pl():
params["x"] = np.ones(6).reshape(shapes["x"].lens()).astype(np.float32) params["x"] = np.ones(6).reshape(shapes["x"].lens()).astype(np.float32)
params["y"] = np.array([2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0 params["y"] = np.array([2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0
]).reshape(shapes["y"].lens()).astype(np.float32) ]).reshape(shapes["y"].lens()).astype(np.float32)
params["cond"] = np.array([1]).reshape(()).astype(np.bool) params["cond"] = np.array([1]).reshape(()).astype(bool)
r = p.run(params)[-1] r = p.run(params)[-1]
print(r) print(r)
def test_dyn_batch():
a = migraphx.shape.dynamic_dimension(1, 4, {2, 4})
b = migraphx.shape.dynamic_dimension(3, 3)
c = migraphx.shape.dynamic_dimension(32, 32)
dd_map = {"0": [a, b, c, c]}
p = migraphx.parse_onnx("conv_relu_maxpool_test.onnx",
map_dyn_input_dims=dd_map)
print(p)
print("Compiling ...")
p.compile(migraphx.get_target("gpu"))
print(p)
def run_prog(batch_size):
params = {}
for key, value in p.get_parameter_shapes().items():
# convert to a static shape
if value.dynamic():
dds = value.dyn_dims()
new_lens = []
for dd in dds:
if dd.is_fixed():
new_lens.append(dd.min)
else:
new_lens.append(batch_size)
s = migraphx.shape(type=value.type_string(), lens=new_lens)
else:
s = value
print("Parameter {} -> {}".format(key, s))
params[key] = migraphx.generate_argument(s)
r = p.run(params)
print(r)
run_prog(1)
run_prog(2)
run_prog(3)
run_prog(4)
test_conv_relu() test_conv_relu()
test_sub_uint64() test_sub_uint64()
test_neg_int64() test_neg_int64()
test_fp16_imagescaler() test_fp16_imagescaler()
test_if_pl() test_if_pl()
test_nonzero() test_nonzero()
test_dyn_batch()
...@@ -23,16 +23,53 @@ ...@@ -23,16 +23,53 @@
##################################################################################### #####################################################################################
import migraphx import migraphx
p = migraphx.parse_onnx("conv_relu_maxpool_test.onnx")
print(p)
print("Compiling ...")
p.compile(migraphx.get_target("gpu"), offload_copy=False)
print(p)
params = {}
for key, value in p.get_parameter_shapes().items(): def test_conv_relu():
p = migraphx.parse_onnx("conv_relu_maxpool_test.onnx")
print(p)
print("Compiling ...")
p.compile(migraphx.get_target("gpu"), offload_copy=False)
print(p)
params = {}
for key, value in p.get_parameter_shapes().items():
print("Parameter {} -> {}".format(key, value)) print("Parameter {} -> {}".format(key, value))
params[key] = migraphx.to_gpu(migraphx.generate_argument(value)) params[key] = migraphx.to_gpu(migraphx.generate_argument(value))
r = migraphx.from_gpu(p.run(params)[-1]) r = migraphx.from_gpu(p.run(params)[-1])
print(r) print(r)
# TODO: placeholder until tuple shapes and arguments exposed
#def test_dyn_batch():
# a = migraphx.shape.dynamic_dimension(1, 4, {2, 4})
# b = migraphx.shape.dynamic_dimension(3, 3)
# c = migraphx.shape.dynamic_dimension(32, 32)
# dd_map = {"0": [a, b, c, c]}
# p = migraphx.parse_onnx("conv_relu_maxpool_test.onnx",
# map_dyn_input_dims=dd_map)
# print(p)
# print("Compiling ...")
# p.compile(migraphx.get_target("gpu"), offload_copy=False)
#
# print(p)
#
# def run_prog(batch_size):
# params = {}
# for key, value in p.get_parameter_shapes().items():
# print("Parameter {} -> {}".format(key, value))
# params[key] = migraphx.to_gpu(
# migraphx.generate_argument(value.to_static(batch_size)))
#
# print("before_output")
# outputs = p.run(params)
# print(outputs)
# r = migraphx.from_gpu(p.run(params)[-1])
# print(r)
#
# run_prog(1)
# run_prog(2)
# run_prog(3)
# run_prog(4)
test_conv_relu()
...@@ -29,6 +29,7 @@ def test_create_shape(): ...@@ -29,6 +29,7 @@ def test_create_shape():
assert s.standard() assert s.standard()
assert s.packed() assert s.packed()
assert s.lens() == [1, 64, 3, 3] assert s.lens() == [1, 64, 3, 3]
assert s.ndim() == 4
def test_create_shape_broadcast(): def test_create_shape_broadcast():
...@@ -49,6 +50,35 @@ def test_create_shape_type(): ...@@ -49,6 +50,35 @@ def test_create_shape_type():
assert s.type_size() == 4 assert s.type_size() == 4
def test_create_dyn_dims():
a = migraphx.shape.dynamic_dimension()
assert a.is_fixed()
assert a.min == 0
b = migraphx.shape.dynamic_dimension(4, 4)
assert b.is_fixed()
assert b.max == 4
c = migraphx.shape.dynamic_dimension(1, 4, {2, 4})
assert not c.is_fixed()
assert c.min == 1
assert c.max == 4
assert c.optimals == {2, 4}
dyn_dims = [a, b]
dyn_dims.append(c)
assert dyn_dims[1] == b
def test_create_dyn_shape():
a = migraphx.shape.dynamic_dimension(1, 4, {2, 4})
b = migraphx.shape.dynamic_dimension(4, 4)
dds = [a, b]
dyn_shape = migraphx.shape(type='float', dyn_dims=dds)
assert dyn_shape.dynamic()
assert dyn_shape.dyn_dims()[0].min == dds[0].min
assert dyn_shape.dyn_dims()[0].max == dds[0].max
assert dyn_shape.dyn_dims()[0].optimals == dds[0].optimals
def test_type_enum(): def test_type_enum():
mgx_types = [ mgx_types = [
'bool_type', 'double_type', 'float_type', 'half_type', 'int16_type', 'bool_type', 'double_type', 'float_type', 'half_type', 'int16_type',
...@@ -63,3 +93,5 @@ if __name__ == "__main__": ...@@ -63,3 +93,5 @@ if __name__ == "__main__":
test_create_shape() test_create_shape()
test_create_shape_broadcast() test_create_shape_broadcast()
test_create_shape_type() test_create_shape_type()
test_create_dyn_dims()
test_create_dyn_shape()
...@@ -1132,50 +1132,6 @@ TEST_CASE(conv_dyn_batch_test) ...@@ -1132,50 +1132,6 @@ TEST_CASE(conv_dyn_batch_test)
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); }); result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(results_vector, sol)); EXPECT(migraphx::verify_range(results_vector, sol));
a = {2.71567607, -0.9960829, 0.91671127, 0.28140706, 0.63235772, 0.08077253, 0.80927712,
-0.59108931, -1.05421555, -2.76622486, -0.85044265, -0.52049929, 0.67726439, -0.65290606,
0.02345525, -0.33579525, 0.38901961, 1.05473483, -1.31188095, 1.8963089, -0.07265259,
0.947339, 0.41949373, -0.70814759, 0.25892952, 1.07311416, 1.2571274, -0.62318051,
-0.19951548, -0.94232577, -0.29393643, 0.42292568, -0.80230367, 1.40909171, 0.63617158,
0.13900366, 1.09253144, -0.15265895, 1.54781747, 0.72780299, 1.09189606, -0.38068101,
0.97057933, -0.58958799, 1.56188643, 0.21474874, 0.58725154, -1.27097559, -0.03024297,
1.09437096, -0.4897908, 0.34838957, -1.31042492, -1.69069934, 0.86956722, -0.40457946,
0.46691212, 1.29273605, 0.26464137, 0.22073045, -1.02178168, 0.22163901, -1.84387338,
0.75522131, -0.45775682, -0.42241111, -1.50944722, 1.07256448, -1.95876884, -0.28106022,
0.3341668, 2.13129425, -1.14728117, -1.06555498, -0.298444, -0.88322699, -0.65866792,
-2.06007552, 0.01374334, 0.45612028, 0.52715492, 1.01914406, -1.72659791, 0.80650896,
0.16860051, 2.24112225, -0.78620857, 0.36566174, -0.07020134, -0.47976932, -0.68230027,
-0.94711417, -0.54506505, 1.66504931, -0.71860826, 0.61132306};
c = {-0.14601797, -0.13000923, 0.06521662, 0.06178288, -0.11083675, 0.10154136, 0.09990512,
0.06030385, -0.11374587, -0.17523311, -0.14344215, 0.17802463, 0.06300922, -0.15325832,
0.07066704, 0.05166031, 0.00615084, -0.02606523, 0.08083995, -0.17913306, 0.0624622,
0.0735731, -0.04198661, -0.0164391, -0.06374192, 0.16569914, 0.10681538, 0.07370754,
0.02802075, 0.00282027, 0.15104802, -0.11084409, -0.00197773, 0.07924436, 0.03528272,
0.04765259, -0.15896152, 0.07917164, 0.12125669, -0.1154705, -0.11999125, 0.12749968,
-0.06269585, 0.18658121, -0.03944227, 0.0111798, -0.17731084, 0.11789055, -0.09982193,
0.08142821, 0.0729029, 0.11303909, 0.12735154, 0.03885292};
sol = {-0.20817225,
0.87965256,
0.14958936,
-1.24887264,
-0.06540672,
0.20778663,
0.40456355,
-0.99900877};
migraphx::shape input_fixed_shape1{migraphx::shape::float_type, {1, 3, 4, 4}};
migraphx::parameter_map params1;
params1["X"] = migraphx::argument(input_fixed_shape1, a.data());
params1["W"] = migraphx::argument(weights_shape, c.data());
result = p.eval(params1).back();
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(results_vector, sol));
} }
TEST_CASE(conv_dyn_img_shape_test) TEST_CASE(conv_dyn_img_shape_test)
......
...@@ -201,6 +201,20 @@ TEST_CASE(dynamic_dimension_add_sub_fixed) ...@@ -201,6 +201,20 @@ TEST_CASE(dynamic_dimension_add_sub_fixed)
EXPECT((2 + e) == d); EXPECT((2 + e) == d);
} }
TEST_CASE(dynamic_dimension_serialize)
{
using migraphx::shape;
auto a = shape::dynamic_dimension{2, 5, {2, 3}};
auto b = shape::dynamic_dimension{3, 6, {3}};
auto v1 = migraphx::to_value(a);
auto v2 = migraphx::to_value(b);
EXPECT(v1 != v2);
auto c = migraphx::from_value<shape::dynamic_dimension>(v1);
EXPECT(a == c);
auto d = migraphx::from_value<shape::dynamic_dimension>(v2);
EXPECT(b == d);
}
TEST_CASE(test_shape_dynamic_errors) TEST_CASE(test_shape_dynamic_errors)
{ {
using migraphx::shape; using migraphx::shape;
......
...@@ -46,11 +46,4 @@ struct test_split_single_dyn_dim : verify_program<test_split_single_dyn_dim> ...@@ -46,11 +46,4 @@ struct test_split_single_dyn_dim : verify_program<test_split_single_dyn_dim>
mm->add_return({add_ins}); mm->add_return({add_ins});
return p; return p;
} }
migraphx::compile_options get_compile_options() const
{
migraphx::compile_options co;
co.split_single_dyn_dim = true;
return co;
};
}; };
...@@ -764,9 +764,12 @@ const Target* object_cast(const U* x) ...@@ -764,9 +764,12 @@ const Target* object_cast(const U* x)
return reinterpret_cast<const Target*>(x); return reinterpret_cast<const Target*>(x);
} }
template<class T, class... Ts, class Target=std::remove_pointer_t<T>> template <class T, class... Ts, class Target = std::remove_pointer_t<T>>
Target* allocate(Ts&&... xs) Target* allocate(Ts&&... xs)
{ {
if constexpr(std::is_aggregate<Target>{})
return new Target{std::forward<Ts>(xs)...}; // NOLINT
else
return new Target(std::forward<Ts>(xs)...); // NOLINT return new Target(std::forward<Ts>(xs)...); // NOLINT
} }
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <migraphx/execution_environment.hpp> #include <migraphx/execution_environment.hpp>
#include <migraphx/migraphx.h> #include <migraphx/migraphx.h>
#include <migraphx/rank.hpp> #include <migraphx/rank.hpp>
#include <migraphx/ranges.hpp>
#include <migraphx/shape.hpp> #include <migraphx/shape.hpp>
#include <migraphx/program.hpp> #include <migraphx/program.hpp>
#include <migraphx/onnx.hpp> #include <migraphx/onnx.hpp>
...@@ -145,6 +146,11 @@ void set_default_dim_value(onnx_options& options, size_t value) ...@@ -145,6 +146,11 @@ void set_default_dim_value(onnx_options& options, size_t value)
options.default_dim_value = value; options.default_dim_value = value;
} }
void set_default_dyn_dim_value(onnx_options& options, const shape::dynamic_dimension& dd)
{
options.default_dyn_dim_value = dd;
}
void set_default_loop_iterations(onnx_options& options, int64_t value) void set_default_loop_iterations(onnx_options& options, int64_t value)
{ {
options.max_loop_iterations = value; options.max_loop_iterations = value;
...@@ -161,6 +167,13 @@ void set_input_parameter_shape(onnx_options& options, ...@@ -161,6 +167,13 @@ void set_input_parameter_shape(onnx_options& options,
options.map_input_dims[std::string(name)] = std::move(dims); options.map_input_dims[std::string(name)] = std::move(dims);
} }
void set_dyn_input_parameter_shape(onnx_options& options,
const char* name,
std::vector<shape::dynamic_dimension> dyn_dims)
{
options.map_dyn_input_dims[std::string(name)] = std::move(dyn_dims);
}
void set_input_parameter_shape(tf_options& options, const char* name, std::vector<std::size_t> dims) void set_input_parameter_shape(tf_options& options, const char* name, std::vector<std::size_t> dims)
{ {
options.map_input_dims[std::string(name)] = std::move(dims); options.map_input_dims[std::string(name)] = std::move(dims);
...@@ -187,6 +200,12 @@ std::vector<const char*> get_names(const std::unordered_map<std::string, Value>& ...@@ -187,6 +200,12 @@ std::vector<const char*> get_names(const std::unordered_map<std::string, Value>&
return result; return result;
} }
template <class T>
std::set<T> make_set(const T* x, std::size_t n)
{
return {x, x + n};
}
void quantize_fp16_with_op_names(program& prog, std::vector<std::string>& names) void quantize_fp16_with_op_names(program& prog, std::vector<std::string>& names)
{ {
if(names.empty()) if(names.empty())
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment