"git@developer.sourcefind.cn:gaoqiong/migraphx.git" did not exist on "6b36c82ea39ad5ece5fcbcac43a7eee98aada741"
Unverified Commit adccec52 authored by Charlie Lin's avatar Charlie Lin Committed by GitHub
Browse files

Driver dynamic batch update (#1652)

Examples..

bin/driver verify /codes/onnx_models/resnet50-v1-7/resnet50-v1-7.onnx --split-single-dyn-dim --batch 3 --dyn-input-dim @data "[{min:1, max:4}, 3, 224, 224]"

bin/driver compile /codes/onnx_models/resnet50-v1-7/resnet50-v1-7.onnx --split-single-dyn-dim --default-dyn-dim "{min:1, max:10}" --output resnet50_batch1-10.mxr

bin/driver perf resnet50_batch1-10.mxr --batch 4
parent f201285c
...@@ -32,6 +32,10 @@ Disable fast math optimization ...@@ -32,6 +32,10 @@ Disable fast math optimization
Perform an exhaustive search to find the fastest version of generated kernels for selected backend Perform an exhaustive search to find the fastest version of generated kernels for selected backend
.. options:: --split-single-dyn-dim
Enable the split single dynamic dimension pass
.. option:: --fp16 .. option:: --fp16
Quantize for fp16 Quantize for fp16
......
...@@ -24,7 +24,7 @@ Load as MIGraphX JSON ...@@ -24,7 +24,7 @@ Load as MIGraphX JSON
.. option:: --batch [unsigned int] (Default: 1) .. option:: --batch [unsigned int] (Default: 1)
Set batch size for model For a static model, set batch size. For a dynamic batch model, sets the batch size at runtime.
.. option:: --nhwc .. option:: --nhwc
...@@ -46,6 +46,14 @@ Trim instructions from the end (Default: 0) ...@@ -46,6 +46,14 @@ Trim instructions from the end (Default: 0)
Dim of a parameter (format: "@name d1 d2 dn") Dim of a parameter (format: "@name d1 d2 dn")
.. options:: --dyn-input-dim [std::vector<std::string>]
Set dynamic dimensions of a parameter using JSON formatting (format "@name" "dynamic_dimension_json")
.. options:: --default-dyn-dim
Set the default dynamic dimension (format {min:x, max:y, optimals:[o1,o2,...]})
.. option:: --optimize, -O .. option:: --optimize, -O
Optimize when reading Optimize when reading
......
...@@ -29,7 +29,7 @@ See below for a comprehensive list of commands and option arguments, as well as ...@@ -29,7 +29,7 @@ See below for a comprehensive list of commands and option arguments, as well as
| --tf | Load file as a tensorflow graph | | --tf | Load file as a tensorflow graph |
| --migraphx | Load file as a migraphx graph | | --migraphx | Load file as a migraphx graph |
| --migraphx-json | Load file as a migraphx JSON graph | | --migraphx-json | Load file as a migraphx JSON graph |
| --batch | Set batch size for the model | | --batch | For a static model, set batch size. For a dynamic batch model, sets the batch size at runtime.|
| --nhwc | Treat tensorflow format as nhwc | | --nhwc | Treat tensorflow format as nhwc |
| --nchw | Treat tensorflow format as nchw | | --nchw | Treat tensorflow format as nchw |
| --skip-unknown-operators | Skip unknown operators when parsing and continue to parse | | --skip-unknown-operators | Skip unknown operators when parsing and continue to parse |
...@@ -44,12 +44,16 @@ See below for a comprehensive list of commands and option arguments, as well as ...@@ -44,12 +44,16 @@ See below for a comprehensive list of commands and option arguments, as well as
| --output \| -o | Output to file | | --output \| -o | Output to file |
| --fill0 | Fill parameter with 0s | | --fill0 | Fill parameter with 0s |
| --fill1 | Fill parameter with 1s | | --fill1 | Fill parameter with 1s |
| --input-dim | Set static dimensions of a parameter |
| --dyn-input-dim | Set dynamic dimensions of a parameter |
| --default-dyn-dim | Set default dynamic dimension |
| --gpu | Compile on the gpu | | --gpu | Compile on the gpu |
| --cpu | Compile on the cpu | | --cpu | Compile on the cpu |
| --ref | Compile on the reference implementation | | --ref | Compile on the reference implementation |
| --enable-offload-copy | Enable implicit offload copying | | --enable-offload-copy | Enable implicit offload copying |
| --disable-fast-math | Disable fast math optimization | | --disable-fast-math | Disable fast math optimization |
| --exhaustive-tune | Enable exhaustive search to find fastest kernel | | --exhaustive-tune | Enable exhaustive search to find fastest kernel |
| --split-single-dyn-dim | Enable split_single_dyn_dim compiler pass |
| --fp16 | Quantize for fp16 | | --fp16 | Quantize for fp16 |
| --int8 | Quantize for int8 | | --int8 | Quantize for int8 |
| --tolerance | Tolerance for errors | | --tolerance | Tolerance for errors |
......
...@@ -147,6 +147,13 @@ struct value_parser ...@@ -147,6 +147,13 @@ struct value_parser
{ {
template <MIGRAPHX_REQUIRES(not std::is_enum<T>{} and not is_multi_value<T>{})> template <MIGRAPHX_REQUIRES(not std::is_enum<T>{} and not is_multi_value<T>{})>
static T apply(const std::string& x) static T apply(const std::string& x)
{
// handle whitespace in string
if constexpr(std::is_same<T, std::string>{})
{
return x;
}
else
{ {
T result; T result;
std::stringstream ss; std::stringstream ss;
...@@ -156,6 +163,7 @@ struct value_parser ...@@ -156,6 +163,7 @@ struct value_parser
throw std::runtime_error("Failed to parse '" + x + "' as " + type_name<T>::apply()); throw std::runtime_error("Failed to parse '" + x + "' as " + type_name<T>::apply());
return result; return result;
} }
}
template <MIGRAPHX_REQUIRES(std::is_enum<T>{} and not is_multi_value<T>{})> template <MIGRAPHX_REQUIRES(std::is_enum<T>{} and not is_multi_value<T>{})>
static T apply(const std::string& x) static T apply(const std::string& x)
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <migraphx/tf.hpp> #include <migraphx/tf.hpp>
#include <migraphx/onnx.hpp> #include <migraphx/onnx.hpp>
#include <migraphx/stringutils.hpp> #include <migraphx/stringutils.hpp>
#include <migraphx/convert_to_json.hpp>
#include <migraphx/load_save.hpp> #include <migraphx/load_save.hpp>
#include <migraphx/json.hpp> #include <migraphx/json.hpp>
#include <migraphx/version.h> #include <migraphx/version.h>
...@@ -68,7 +69,9 @@ struct loader ...@@ -68,7 +69,9 @@ struct loader
bool brief = false; bool brief = false;
std::string output_type; std::string output_type;
std::string output; std::string output;
std::string default_dyn_dim;
std::vector<std::string> param_dims; std::vector<std::string> param_dims;
std::vector<std::string> dyn_param_dims;
std::vector<std::string> output_names; std::vector<std::string> output_names;
void parse(argument_parser& ap) void parse(argument_parser& ap)
...@@ -83,7 +86,11 @@ struct loader ...@@ -83,7 +86,11 @@ struct loader
ap(file_type, {"--tf"}, ap.help("Load as tensorflow"), ap.set_value("tf")); ap(file_type, {"--tf"}, ap.help("Load as tensorflow"), ap.set_value("tf"));
ap(file_type, {"--migraphx"}, ap.help("Load as MIGraphX"), ap.set_value("migraphx")); ap(file_type, {"--migraphx"}, ap.help("Load as MIGraphX"), ap.set_value("migraphx"));
ap(file_type, {"--migraphx-json"}, ap.help("Load as MIGraphX JSON"), ap.set_value("json")); ap(file_type, {"--migraphx-json"}, ap.help("Load as MIGraphX JSON"), ap.set_value("json"));
ap(batch, {"--batch"}, ap.help("Set batch size for model")); ap(batch,
{"--batch"},
ap.help("For a static model, sets default_dim_value size (commonly batch size). For a "
"dynamic batch model, sets the batch "
"size at runtime."));
ap(is_nhwc, {"--nhwc"}, ap.help("Treat tensorflow format as nhwc"), ap.set_value(true)); ap(is_nhwc, {"--nhwc"}, ap.help("Treat tensorflow format as nhwc"), ap.set_value(true));
ap(skip_unknown_operators, ap(skip_unknown_operators,
{"--skip-unknown-operators"}, {"--skip-unknown-operators"},
...@@ -96,7 +103,16 @@ struct loader ...@@ -96,7 +103,16 @@ struct loader
ap.help("Dim of a parameter (format: \"@name d1 d2 dn\")"), ap.help("Dim of a parameter (format: \"@name d1 d2 dn\")"),
ap.append(), ap.append(),
ap.nargs(2)); ap.nargs(2));
ap(dyn_param_dims,
{"--dyn-input-dim"},
ap.help("Dynamic dimensions of a parameter (format: \"@name_1\" \"[{min:x, max:y, "
"optimals:[o1,o2,...]}, dim2,dim3, ...]\", \"@name_2\", ... You can supply a "
"single integer value for a dimension to specify it as fixed."),
ap.append(),
ap.nargs(2));
ap(default_dyn_dim,
{"--default-dyn-dim"},
ap.help("Default dynamic dimension (format: \"{min:x, max:y, optimals:[o1,o2]}\")."));
ap(output_names, ap(output_names,
{"--output-names"}, {"--output-names"},
ap.help("Names of node output (format: \"name_1 name_2 name_n\")"), ap.help("Names of node output (format: \"name_1 name_2 name_n\")"),
...@@ -147,6 +163,40 @@ struct loader ...@@ -147,6 +163,40 @@ struct loader
return map_input_dims; return map_input_dims;
} }
static auto parse_dyn_dims_json(const std::string& dd_json)
{
// expecting a json string like "[{min:1,max:64,optimals:[1,2,4,8]},3,224,224]"
auto v = from_json_string(convert_to_json(dd_json));
std::vector<migraphx::shape::dynamic_dimension> dyn_dims;
std::transform(v.begin(), v.end(), std::back_inserter(dyn_dims), [&](auto x) {
if(x.is_object())
return from_value<migraphx::shape::dynamic_dimension>(x);
auto d = x.template to<std::size_t>();
return migraphx::shape::dynamic_dimension{d, d};
});
return dyn_dims;
}
static auto parse_dyn_dims_map(const std::vector<std::string>& param_dyn_dims)
{
// expecting vector of strings formatted like
// {"@param_name_0", "dd_json_0", "@param_name_1", "dd_json_1", ...}
std::unordered_map<std::string, std::vector<shape::dynamic_dimension>> map_dyn_input_dims;
std::string name = "";
for(auto&& x : param_dyn_dims)
{
if(x[0] == '@')
{
name = x.substr(1);
}
else
{
map_dyn_input_dims[name] = parse_dyn_dims_json(x);
}
}
return map_dyn_input_dims;
}
static auto parse_output_names(const std::vector<std::string>& output_names_info) static auto parse_output_names(const std::vector<std::string>& output_names_info)
{ {
std::vector<std::string> output_node_names; std::vector<std::string> output_node_names;
...@@ -158,13 +208,44 @@ struct loader ...@@ -158,13 +208,44 @@ struct loader
return output_node_names; return output_node_names;
} }
tf_options get_tf_options() const
{
auto map_input_dims = parse_param_dims(param_dims);
auto output_node_names = parse_output_names(output_names);
tf_options options;
options.is_nhwc = is_nhwc;
options.batch_size = batch;
options.map_input_dims = map_input_dims;
options.output_node_names = output_node_names;
return options;
}
onnx_options get_onnx_options() const
{
auto map_input_dims = parse_param_dims(param_dims);
auto map_dyn_input_dims = parse_dyn_dims_map(dyn_param_dims);
onnx_options options;
if(default_dyn_dim.empty())
{
options.default_dim_value = batch;
}
else
{
auto v = from_json_string(convert_to_json(default_dyn_dim));
options.default_dyn_dim_value = from_value<migraphx::shape::dynamic_dimension>(v);
}
options.skip_unknown_operators = skip_unknown_operators;
options.print_program_on_error = true;
options.map_input_dims = map_input_dims;
options.map_dyn_input_dims = map_dyn_input_dims;
return options;
}
program load() program load()
{ {
program p; program p;
if(model.empty()) if(model.empty())
{ {
auto map_input_dims = parse_param_dims(param_dims);
auto output_node_names = parse_output_names(output_names);
if(file_type.empty()) if(file_type.empty())
{ {
if(ends_with(file, ".onnx")) if(ends_with(file, ".onnx"))
...@@ -179,16 +260,11 @@ struct loader ...@@ -179,16 +260,11 @@ struct loader
std::cout << "Reading: " << file << std::endl; std::cout << "Reading: " << file << std::endl;
if(file_type == "onnx") if(file_type == "onnx")
{ {
onnx_options options; p = parse_onnx(file, get_onnx_options());
options.default_dim_value = batch;
options.skip_unknown_operators = skip_unknown_operators;
options.print_program_on_error = true;
options.map_input_dims = map_input_dims;
p = parse_onnx(file, options);
} }
else if(file_type == "tf") else if(file_type == "tf")
{ {
p = parse_tf(file, tf_options{is_nhwc, batch, map_input_dims, output_node_names}); p = parse_tf(file, get_tf_options());
} }
else if(file_type == "json") else if(file_type == "json")
{ {
...@@ -289,14 +365,21 @@ struct program_params ...@@ -289,14 +365,21 @@ struct program_params
ap(fill1, {"--fill1"}, ap.help("Fill parameter with 1s"), ap.append(), ap.nargs(2)); ap(fill1, {"--fill1"}, ap.help("Fill parameter with 1s"), ap.append(), ap.nargs(2));
} }
auto generate(const program& p, const target& t, bool offload) auto generate(const program& p, const target& t, bool offload, unsigned batch)
{ {
parameter_map m; parameter_map m;
auto param_shapes = p.get_parameter_shapes();
std::unordered_map<std::string, shape> static_param_shapes;
std::transform(
param_shapes.cbegin(),
param_shapes.cend(),
std::inserter(static_param_shapes, static_param_shapes.end()),
[&](const auto& x) { return std::make_pair(x.first, x.second.to_static(batch)); });
for(auto&& s : fill0) for(auto&& s : fill0)
m[s] = fill_argument(p.get_parameter_shape(s), 0); m[s] = fill_argument(static_param_shapes.at(s), 0);
for(auto&& s : fill1) for(auto&& s : fill1)
m[s] = fill_argument(p.get_parameter_shape(s), 1); m[s] = fill_argument(static_param_shapes.at(s), 1);
fill_param_map(m, p, t, offload); fill_param_map(m, static_param_shapes, t, offload);
return m; return m;
} }
}; };
...@@ -353,13 +436,18 @@ struct compiler ...@@ -353,13 +436,18 @@ struct compiler
{"--exhaustive-tune"}, {"--exhaustive-tune"},
ap.help("Exhastively search for best tuning parameters for kernels"), ap.help("Exhastively search for best tuning parameters for kernels"),
ap.set_value(true)); ap.set_value(true));
ap(co.split_single_dyn_dim,
{"--split-single-dyn-dim"},
ap.help("If there is a single non-fixed dynamic dimension in the model, then split to "
"static submodules"),
ap.set_value(true));
ap(quantize, {"--fp16"}, ap.help("Quantize for fp16"), ap.set_value(precision::fp16)); ap(quantize, {"--fp16"}, ap.help("Quantize for fp16"), ap.set_value(precision::fp16));
ap(quantize, {"--int8"}, ap.help("Quantize for int8"), ap.set_value(precision::int8)); ap(quantize, {"--int8"}, ap.help("Quantize for int8"), ap.set_value(precision::int8));
} }
auto params(const program& p) auto params(const program& p)
{ {
return parameters.generate(p, ct.get_target(), co.offload_copy); return parameters.generate(p, ct.get_target(), co.offload_copy, l.batch);
} }
program compile() program compile()
...@@ -432,7 +520,7 @@ struct verify : command<verify> ...@@ -432,7 +520,7 @@ struct verify : command<verify>
std::cout << p << std::endl; std::cout << p << std::endl;
auto t = c.ct.get_target(); auto t = c.ct.get_target();
auto m = c.parameters.generate(p, t, true); auto m = c.parameters.generate(p, t, true, c.l.batch);
if(per_instruction) if(per_instruction)
{ {
......
...@@ -39,36 +39,25 @@ auto get_hash(const T& x) ...@@ -39,36 +39,25 @@ auto get_hash(const T& x)
return std::hash<T>{}(x); return std::hash<T>{}(x);
} }
parameter_map fill_param_map(parameter_map& m, const program& p, const target& t, bool offload) parameter_map fill_param_map(parameter_map& m,
const std::unordered_map<std::string, shape>& param_shapes,
const target& t,
bool offload)
{ {
for(auto&& x : p.get_parameter_shapes()) for(auto&& x : param_shapes)
{ {
argument& arg = m[x.first]; argument& arg = m[x.first];
if(arg.empty()) if(arg.empty())
{
assert(not x.second.dynamic());
arg = generate_argument(x.second, get_hash(x.first)); arg = generate_argument(x.second, get_hash(x.first));
}
if(not offload) if(not offload)
arg = t.copy_to(arg); arg = t.copy_to(arg);
} }
return m; return m;
} }
parameter_map fill_param_map(parameter_map& m, const program& p, bool gpu)
{
for(auto&& x : p.get_parameter_shapes())
{
argument& arg = m[x.first];
if(arg.empty())
arg = generate_argument(x.second, get_hash(x.first));
#ifdef HAVE_GPU
if(gpu)
arg = gpu::to_gpu(arg);
#else
(void)gpu;
#endif
}
return m;
}
parameter_map create_param_map(const program& p, const target& t, bool offload) parameter_map create_param_map(const program& p, const target& t, bool offload)
{ {
parameter_map m; parameter_map m;
......
...@@ -30,8 +30,10 @@ namespace migraphx { ...@@ -30,8 +30,10 @@ namespace migraphx {
namespace driver { namespace driver {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
parameter_map parameter_map fill_param_map(parameter_map& m,
fill_param_map(parameter_map& m, const program& p, const target& t, bool offload = false); const std::unordered_map<std::string, shape>& param_shapes,
const target& t,
bool offload = false);
parameter_map create_param_map(const program& p, const target& t, bool offload = false); parameter_map create_param_map(const program& p, const target& t, bool offload = false);
parameter_map fill_param_map(parameter_map& m, const program& p, bool gpu); parameter_map fill_param_map(parameter_map& m, const program& p, bool gpu);
......
...@@ -189,19 +189,19 @@ struct shape ...@@ -189,19 +189,19 @@ struct shape
/*! /*!
* Minimum lengths for dynamic shape. * Minimum lengths for dynamic shape.
* lens() for fixed shape. * lens() for static shape.
*/ */
std::vector<std::size_t> min_lens() const; std::vector<std::size_t> min_lens() const;
/*! /*!
* Maximum lengths for dynamic shape. * Maximum lengths for dynamic shape.
* lens() for fixed shape. * lens() for static shape.
*/ */
std::vector<std::size_t> max_lens() const; std::vector<std::size_t> max_lens() const;
/*! /*!
* Optimum lengths for dynamic shape. * Optimum lengths for dynamic shape.
* Empty for fixed shape. * Empty for static shape.
*/ */
std::vector<std::set<std::size_t>> opt_lens() const; std::vector<std::set<std::size_t>> opt_lens() const;
...@@ -259,6 +259,9 @@ struct shape ...@@ -259,6 +259,9 @@ struct shape
// convert the shape to an equivalent dynamic shape with empty optimals // convert the shape to an equivalent dynamic shape with empty optimals
shape to_dynamic() const; shape to_dynamic() const;
// convert the shape to a static one setting any non-fixed dynamic_dimensions to x
shape to_static(std::size_t x) const;
friend bool operator==(const shape& x, const shape& y); friend bool operator==(const shape& x, const shape& y);
friend bool operator!=(const shape& x, const shape& y); friend bool operator!=(const shape& x, const shape& y);
friend std::ostream& operator<<(std::ostream& os, const shape& x); friend std::ostream& operator<<(std::ostream& os, const shape& x);
......
...@@ -331,7 +331,8 @@ std::vector<argument> generic_eval(const module* mod, ...@@ -331,7 +331,8 @@ std::vector<argument> generic_eval(const module* mod,
MIGRAPHX_THROW("Parameter not found: " + param_name); MIGRAPHX_THROW("Parameter not found: " + param_name);
auto param = params[param_name]; auto param = params[param_name];
// TODO: may want to check correct number of dimensions and/or was within bounds // TODO: may want to check correct number of dimensions and/or was within bounds
if(not ins->get_shape().dynamic() and param.get_shape() != ins->get_shape()) if(not ins->get_shape().any_of_dynamic() and
param.get_shape() != ins->get_shape())
{ {
MIGRAPHX_THROW("Incorrect shape {" + to_string(param.get_shape()) + MIGRAPHX_THROW("Incorrect shape {" + to_string(param.get_shape()) +
"} for parameter: " + param_name + "} for parameter: " + param_name +
......
...@@ -481,6 +481,15 @@ shape shape::with_type(type_t t) const ...@@ -481,6 +481,15 @@ shape shape::with_type(type_t t) const
shape shape::to_dynamic() const shape shape::to_dynamic() const
{ {
if(not sub_shapes().empty())
{
std::vector<shape> subs;
std::transform(sub_shapes().cbegin(),
sub_shapes().cend(),
std::back_inserter(subs),
[](auto s) { return s.to_dynamic(); });
return {subs};
}
if(this->dynamic()) if(this->dynamic())
{ {
return *this; return *this;
...@@ -488,6 +497,30 @@ shape shape::to_dynamic() const ...@@ -488,6 +497,30 @@ shape shape::to_dynamic() const
return {type(), lens(), lens(), {}}; return {type(), lens(), lens(), {}};
} }
shape shape::to_static(std::size_t x) const
{
if(not sub_shapes().empty())
{
std::vector<shape> subs;
std::transform(sub_shapes().cbegin(),
sub_shapes().cend(),
std::back_inserter(subs),
[&](auto s) { return s.to_static(x); });
return {subs};
}
if(not this->dynamic())
{
return *this;
}
auto static_lens = this->max_lens();
std::transform(static_lens.begin(),
static_lens.end(),
this->dyn_dims().cbegin(),
static_lens.begin(),
[&](auto sl, auto dd) { return dd.is_fixed() ? sl : x; });
return {type(), static_lens};
}
std::size_t shape::element_space() const { return impl->element_space(); } std::size_t shape::element_space() const { return impl->element_space(); }
std::string shape::type_string() const { return name(this->type()); } std::string shape::type_string() const { return name(this->type()); }
......
...@@ -189,8 +189,20 @@ argument register_on_gpu(const argument& arg) ...@@ -189,8 +189,20 @@ argument register_on_gpu(const argument& arg)
argument to_gpu(const argument& arg, bool host) argument to_gpu(const argument& arg, bool host)
{ {
argument result;
arg.visit(
[&](auto x) {
auto p = write_to_gpu(arg.data(), arg.get_shape().bytes(), host); auto p = write_to_gpu(arg.data(), arg.get_shape().bytes(), host);
return {arg.get_shape(), p}; result = {x.get_shape(), p};
},
[&](const auto& xs) {
std::vector<argument> args;
std::transform(xs.begin(), xs.end(), std::back_inserter(args), [&](auto x) {
return to_gpu(x, host);
});
result = argument{args};
});
return result;
} }
argument from_gpu(const argument& arg) argument from_gpu(const argument& arg)
......
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
#include <migraphx/gpu/hip.hpp> #include <migraphx/gpu/hip.hpp>
#include <migraphx/gpu/target.hpp> #include <migraphx/gpu/target.hpp>
TEST_CASE(tuple_to_from_gpu) TEST_CASE(tuple_from_gpu)
{ {
migraphx::shape s1{migraphx::shape::float_type, {2, 3}}; migraphx::shape s1{migraphx::shape::float_type, {2, 3}};
migraphx::shape s2{migraphx::shape::int32_type, {2, 4}}; migraphx::shape s2{migraphx::shape::int32_type, {2, 4}};
...@@ -47,4 +47,23 @@ TEST_CASE(tuple_to_from_gpu) ...@@ -47,4 +47,23 @@ TEST_CASE(tuple_to_from_gpu)
EXPECT(result2 == p2_data); EXPECT(result2 == p2_data);
} }
TEST_CASE(tuple_to_gpu)
{
migraphx::shape s1{migraphx::shape::float_type, {2, 3}};
migraphx::shape s2{migraphx::shape::int32_type, {2, 4}};
std::vector<float> p1_data = {1.1, 2.2, 3.3, 4.4, 5.5, 6.6};
std::vector<int> p2_data = {1, 2, 3, 4, 5, 6, 7, 8};
auto p1 = migraphx::argument{s1, p1_data.data()};
auto p2 = migraphx::argument{s2, p2_data.data()};
auto p_gpu = migraphx::gpu::to_gpu(migraphx::argument({p1, p2}));
auto p_host = migraphx::gpu::from_gpu(p_gpu);
std::vector<migraphx::argument> results = p_host.get_sub_objects();
std::vector<float> result1;
results[0].visit([&](auto output) { result1.assign(output.begin(), output.end()); });
std::vector<int> result2;
results[1].visit([&](auto output) { result2.assign(output.begin(), output.end()); });
EXPECT(result1 == p1_data);
EXPECT(result2 == p2_data);
}
int main(int argc, const char* argv[]) { test::run(argc, argv); } int main(int argc, const char* argv[]) { test::run(argc, argv); }
...@@ -336,6 +336,49 @@ TEST_CASE(test_shape_dyn_to_dynamic) ...@@ -336,6 +336,49 @@ TEST_CASE(test_shape_dyn_to_dynamic)
EXPECT(s0 == s1); EXPECT(s0 == s1);
} }
TEST_CASE(test_shape_subshapes_to_dynamic)
{
std::vector<migraphx::shape> sub_shapes0 = {};
sub_shapes0.push_back(migraphx::shape{migraphx::shape::float_type, {{1, 4}, {4, 4}}});
sub_shapes0.push_back(migraphx::shape{migraphx::shape::float_type, {3, 4, 5}});
migraphx::shape s0{sub_shapes0};
migraphx::shape s1 = s0.to_dynamic();
std::vector<migraphx::shape> sub_shapes1 = {};
sub_shapes1.push_back(migraphx::shape{migraphx::shape::float_type, {{1, 4}, {4, 4}}});
sub_shapes1.push_back(migraphx::shape{migraphx::shape::float_type, {{3, 3}, {4, 4}, {5, 5}}});
migraphx::shape s2{sub_shapes1};
EXPECT(s1 == s2);
}
TEST_CASE(test_shape_dyn_to_static)
{
migraphx::shape s0{migraphx::shape::float_type, {{1, 1}, {2, 2}, {2, 10}, {2, 10}}};
migraphx::shape s1 = s0.to_static(4);
migraphx::shape s2{migraphx::shape::float_type, {1, 2, 4, 4}};
EXPECT(s1 == s2);
}
TEST_CASE(test_shape_static_to_static)
{
migraphx::shape s0{migraphx::shape::float_type, {1, 2, 4, 4}};
migraphx::shape s1 = s0.to_static(8);
EXPECT(s0 == s1);
}
TEST_CASE(test_shape_subshapes_to_static)
{
std::vector<migraphx::shape> sub_shapes0 = {};
sub_shapes0.push_back(migraphx::shape{migraphx::shape::float_type, {{1, 4}, {4, 4}}});
sub_shapes0.push_back(migraphx::shape{migraphx::shape::float_type, {3, 4, 5}});
migraphx::shape s0{sub_shapes0};
migraphx::shape s1 = s0.to_static(3);
std::vector<migraphx::shape> sub_shapes1 = {};
sub_shapes1.push_back(migraphx::shape{migraphx::shape::float_type, {3, 4}});
sub_shapes1.push_back(migraphx::shape{migraphx::shape::float_type, {3, 4, 5}});
migraphx::shape s2{sub_shapes1};
EXPECT(s1 == s2);
}
TEST_CASE(test_shape_overlap) TEST_CASE(test_shape_overlap)
{ {
migraphx::shape s{migraphx::shape::float_type, {2, 2, 3}, {6, 3, 2}}; migraphx::shape s{migraphx::shape::float_type, {2, 2, 3}, {6, 3, 2}};
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment