"src/lib/vscode:/vscode.git/clone" did not exist on "671c9d63cbc31fdf6fdd57a815c99027c1e657b2"
Commit c4b1102e authored by charlie's avatar charlie
Browse files

Merge branch 'dyn_model_test' of github.com:ROCmSoftwarePlatform/AMDMIGraphX into dyn_model_test

parents 5fc48e77 31065c7d
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct test_batchnorm_inference : verify_program<test_batchnorm_inference>
{
const size_t width = 3;
const size_t height = 3;
const size_t channels = 3;
const size_t batches = 4;
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {batches, channels, height, width}};
migraphx::shape vars{migraphx::shape::float_type, {channels}};
auto x = mm->add_parameter("x", s);
auto scale = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 1)));
auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2)));
auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3)));
auto variance = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4)));
mm->add_instruction(
migraphx::make_op("batch_norm_inference"), x, scale, bias, mean, variance);
return p;
}
};
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct test_batchnorm_inference_2 : verify_program<test_batchnorm_inference_2>
{
const size_t width = 14;
const size_t height = 14;
const size_t channels = 256;
const size_t batches = 1;
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {batches, channels, height, width}};
migraphx::shape vars{migraphx::shape::float_type, {channels}};
auto x = mm->add_parameter("x", s);
auto scale = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 1)));
auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2)));
auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3)));
auto variance = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4)));
mm->add_instruction(
migraphx::make_op("batch_norm_inference"), x, scale, bias, mean, variance);
return p;
}
};
...@@ -26,6 +26,8 @@ ...@@ -26,6 +26,8 @@
#include <migraphx/program.hpp> #include <migraphx/program.hpp>
#include <migraphx/generate.hpp> #include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp> #include <migraphx/make_op.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/common.hpp>
struct test_conv_bn : verify_program<test_conv_bn> struct test_conv_bn : verify_program<test_conv_bn>
{ {
...@@ -37,19 +39,38 @@ struct test_conv_bn : verify_program<test_conv_bn> ...@@ -37,19 +39,38 @@ struct test_conv_bn : verify_program<test_conv_bn>
migraphx::shape xs{migraphx::shape::float_type, {1, 3, 224, 224}}; migraphx::shape xs{migraphx::shape::float_type, {1, 3, 224, 224}};
migraphx::shape ws{migraphx::shape::float_type, {64, 3, 7, 7}}; migraphx::shape ws{migraphx::shape::float_type, {64, 3, 7, 7}};
migraphx::shape vars{migraphx::shape::float_type, {64}}; migraphx::shape vars{migraphx::shape::float_type, {64}};
auto x = mm->add_parameter("x", xs); auto x = mm->add_parameter("x", xs);
auto w = mm->add_parameter("w", ws); auto w = mm->add_parameter("w", ws);
// non-symmetrical tiling
auto conv = mm->add_instruction( auto conv = mm->add_instruction(
migraphx::make_op("convolution", migraphx::make_op("convolution",
{{"padding", {3, 3}}, {"stride", {2, 2}}, {"dilation", {1, 1}}}), {{"padding", {3, 3}}, {"stride", {2, 2}}, {"dilation", {1, 1}}}),
x, x,
w); w);
auto scale = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 1))); auto scale = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 1)));
auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2))); auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2)));
auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3))); auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3)));
auto variance = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4))); auto variance = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4)));
mm->add_instruction(
migraphx::make_op("batch_norm_inference"), conv, scale, bias, mean, variance); auto rt = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.5}});
auto eps = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {1e-5f}});
auto usq_scale =
mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), scale);
auto usq_bias =
mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), bias);
auto usq_mean =
mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), mean);
auto usq_var =
mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), variance);
auto numer = add_common_op(*mm, migraphx::make_op("sub"), {conv, usq_mean});
auto var_eps = add_common_op(*mm, migraphx::make_op("add"), {usq_var, eps});
auto denom = add_common_op(*mm, migraphx::make_op("pow"), {var_eps, rt});
auto div0 = add_common_op(*mm, migraphx::make_op("div"), {numer, denom});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {div0, usq_scale});
add_common_op(*mm, migraphx::make_op("add"), {r0, usq_bias});
return p; return p;
} }
}; };
...@@ -26,21 +26,38 @@ ...@@ -26,21 +26,38 @@
#include <migraphx/program.hpp> #include <migraphx/program.hpp>
#include <migraphx/generate.hpp> #include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp> #include <migraphx/make_op.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/common.hpp>
struct test_conv_bn_add : verify_program<test_conv_bn_add> struct test_conv_bn_add : verify_program<test_conv_bn_add>
{ {
static migraphx::instruction_ref add_bn(migraphx::module& m, static migraphx::instruction_ref add_bn(migraphx::module& m, migraphx::instruction_ref x)
migraphx::instruction_ref x,
std::size_t channels,
std::size_t seed = 1)
{ {
migraphx::shape vars{migraphx::shape::float_type, {channels}}; auto bn_lens = x->get_shape().lens();
auto scale = m.add_literal(migraphx::abs(migraphx::generate_literal(vars, 1 + seed))); auto c_len = bn_lens.at(1);
auto bias = m.add_literal(migraphx::abs(migraphx::generate_literal(vars, 2 + seed)));
auto mean = m.add_literal(migraphx::abs(migraphx::generate_literal(vars, 3 + seed))); migraphx::shape vars{migraphx::shape::float_type, {c_len}};
auto variance = m.add_literal(migraphx::abs(migraphx::generate_literal(vars, 4 + seed))); auto scale = m.add_literal(migraphx::abs(migraphx::generate_literal(vars, 1 + c_len)));
return m.add_instruction( auto bias = m.add_literal(migraphx::abs(migraphx::generate_literal(vars, 2 + c_len)));
migraphx::make_op("batch_norm_inference"), x, scale, bias, mean, variance); auto mean = m.add_literal(migraphx::abs(migraphx::generate_literal(vars, 3 + c_len)));
auto variance = m.add_literal(migraphx::abs(migraphx::generate_literal(vars, 4 + c_len)));
auto rt = m.add_literal(migraphx::literal{migraphx::shape::float_type, {0.5}});
auto eps = m.add_literal(migraphx::literal{migraphx::shape::float_type, {1e-5f}});
auto usq_scale =
m.add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), scale);
auto usq_bias = m.add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), bias);
auto usq_mean = m.add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), mean);
auto usq_var =
m.add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), variance);
auto numer = add_common_op(m, migraphx::make_op("sub"), {x, usq_mean});
auto var_eps = add_common_op(m, migraphx::make_op("add"), {usq_var, eps});
auto denom = add_common_op(m, migraphx::make_op("pow"), {var_eps, rt});
auto div0 = add_common_op(m, migraphx::make_op("div"), {numer, denom});
auto r0 = add_common_op(m, migraphx::make_op("mul"), {div0, usq_scale});
return add_common_op(m, migraphx::make_op("add"), {r0, usq_bias});
} }
migraphx::program create_program() const migraphx::program create_program() const
...@@ -57,10 +74,10 @@ struct test_conv_bn_add : verify_program<test_conv_bn_add> ...@@ -57,10 +74,10 @@ struct test_conv_bn_add : verify_program<test_conv_bn_add>
{migraphx::shape::float_type, {ochannels, ichannels, 1, 1}}, 2)); {migraphx::shape::float_type, {ochannels, ichannels, 1, 1}}, 2));
auto relu1 = mm->add_instruction(migraphx::make_op("relu"), x); auto relu1 = mm->add_instruction(migraphx::make_op("relu"), x);
auto conv1 = mm->add_instruction(migraphx::make_op("convolution"), relu1, w); auto conv1 = mm->add_instruction(migraphx::make_op("convolution"), relu1, w);
auto bn1 = add_bn(*mm, conv1, ochannels, 1); auto bn1 = add_bn(*mm, conv1);
auto relu2 = mm->add_instruction(migraphx::make_op("relu"), y); auto relu2 = mm->add_instruction(migraphx::make_op("relu"), y);
auto conv2 = mm->add_instruction(migraphx::make_op("convolution"), relu2, v); auto conv2 = mm->add_instruction(migraphx::make_op("convolution"), relu2, v);
auto bn2 = add_bn(*mm, conv2, ochannels, 1); auto bn2 = add_bn(*mm, conv2);
auto sum = mm->add_instruction(migraphx::make_op("add"), bn1, bn2); auto sum = mm->add_instruction(migraphx::make_op("add"), bn1, bn2);
mm->add_instruction(migraphx::make_op("relu"), sum); mm->add_instruction(migraphx::make_op("relu"), sum);
return p; return p;
......
...@@ -27,6 +27,8 @@ ...@@ -27,6 +27,8 @@
#include <migraphx/generate.hpp> #include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp> #include <migraphx/make_op.hpp>
#include <migraphx/op/common.hpp> #include <migraphx/op/common.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/common.hpp>
struct test_conv_bn_relu_pooling : verify_program<test_conv_bn_relu_pooling> struct test_conv_bn_relu_pooling : verify_program<test_conv_bn_relu_pooling>
{ {
...@@ -49,8 +51,26 @@ struct test_conv_bn_relu_pooling : verify_program<test_conv_bn_relu_pooling> ...@@ -49,8 +51,26 @@ struct test_conv_bn_relu_pooling : verify_program<test_conv_bn_relu_pooling>
auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2))); auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2)));
auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3))); auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3)));
auto variance = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4))); auto variance = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4)));
auto bn = mm->add_instruction(
migraphx::make_op("batch_norm_inference"), conv, scale, bias, mean, variance); auto rt = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.5}});
auto eps = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {1e-5f}});
auto usq_scale =
mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), scale);
auto usq_bias =
mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), bias);
auto usq_mean =
mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), mean);
auto usq_var =
mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), variance);
auto numer = add_common_op(*mm, migraphx::make_op("sub"), {conv, usq_mean});
auto var_eps = add_common_op(*mm, migraphx::make_op("add"), {usq_var, eps});
auto denom = add_common_op(*mm, migraphx::make_op("pow"), {var_eps, rt});
auto div0 = add_common_op(*mm, migraphx::make_op("div"), {numer, denom});
auto r0 = add_common_op(*mm, migraphx::make_op("mul"), {div0, usq_scale});
auto bn = add_common_op(*mm, migraphx::make_op("add"), {r0, usq_bias});
auto relu = mm->add_instruction(migraphx::make_op("relu"), bn); auto relu = mm->add_instruction(migraphx::make_op("relu"), bn);
mm->add_instruction(migraphx::make_op("pooling", mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::average}, {{"mode", migraphx::op::pooling_mode::average},
......
...@@ -27,22 +27,40 @@ ...@@ -27,22 +27,40 @@
#include <migraphx/generate.hpp> #include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp> #include <migraphx/make_op.hpp>
#include <migraphx/op/common.hpp> #include <migraphx/op/common.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/common.hpp>
struct test_conv_bn_relu_pooling2 : verify_program<test_conv_bn_relu_pooling2> struct test_conv_bn_relu_pooling2 : verify_program<test_conv_bn_relu_pooling2>
{ {
static migraphx::instruction_ref static migraphx::instruction_ref add_bn(migraphx::module& m, migraphx::instruction_ref x)
add_bn(migraphx::program& p, migraphx::instruction_ref x, std::size_t channels)
{ {
auto* mm = p.get_main_module(); auto bn_lens = x->get_shape().lens();
migraphx::shape vars{migraphx::shape::float_type, {channels}}; auto c_len = bn_lens.at(1);
auto scale = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 1 + channels)));
auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2 + channels))); migraphx::shape vars{migraphx::shape::float_type, {c_len}};
auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3 + channels))); auto scale = m.add_literal(migraphx::abs(migraphx::generate_literal(vars, 1 + c_len)));
auto variance = auto bias = m.add_literal(migraphx::abs(migraphx::generate_literal(vars, 2 + c_len)));
mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4 + channels))); auto mean = m.add_literal(migraphx::abs(migraphx::generate_literal(vars, 3 + c_len)));
return mm->add_instruction( auto variance = m.add_literal(migraphx::abs(migraphx::generate_literal(vars, 4 + c_len)));
migraphx::make_op("batch_norm_inference"), x, scale, bias, mean, variance);
auto rt = m.add_literal(migraphx::literal{migraphx::shape::float_type, {0.5}});
auto eps = m.add_literal(migraphx::literal{migraphx::shape::float_type, {1e-5f}});
auto usq_scale =
m.add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), scale);
auto usq_bias = m.add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), bias);
auto usq_mean = m.add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), mean);
auto usq_var =
m.add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1, 2}}}), variance);
auto numer = add_common_op(m, migraphx::make_op("sub"), {x, usq_mean});
auto var_eps = add_common_op(m, migraphx::make_op("add"), {usq_var, eps});
auto denom = add_common_op(m, migraphx::make_op("pow"), {var_eps, rt});
auto div0 = add_common_op(m, migraphx::make_op("div"), {numer, denom});
auto r0 = add_common_op(m, migraphx::make_op("mul"), {div0, usq_scale});
return add_common_op(m, migraphx::make_op("add"), {r0, usq_bias});
} }
migraphx::program create_program() const migraphx::program create_program() const
{ {
migraphx::program p; migraphx::program p;
...@@ -59,7 +77,7 @@ struct test_conv_bn_relu_pooling2 : verify_program<test_conv_bn_relu_pooling2> ...@@ -59,7 +77,7 @@ struct test_conv_bn_relu_pooling2 : verify_program<test_conv_bn_relu_pooling2>
{{"padding", {0, 0}}, {"stride", {1, 1}}, {"dilation", {1, 1}}}), {{"padding", {0, 0}}, {"stride", {1, 1}}, {"dilation", {1, 1}}}),
x1, x1,
w1); w1);
auto bn1 = add_bn(p, conv1, 2048); auto bn1 = add_bn(*mm, conv1);
auto x2 = mm->add_parameter("x2", xs2); auto x2 = mm->add_parameter("x2", xs2);
auto w2 = mm->add_parameter("w2", ws2); auto w2 = mm->add_parameter("w2", ws2);
auto conv2 = mm->add_instruction( auto conv2 = mm->add_instruction(
...@@ -67,7 +85,7 @@ struct test_conv_bn_relu_pooling2 : verify_program<test_conv_bn_relu_pooling2> ...@@ -67,7 +85,7 @@ struct test_conv_bn_relu_pooling2 : verify_program<test_conv_bn_relu_pooling2>
{{"padding", {0, 0}}, {"stride", {2, 2}}, {"dilation", {1, 1}}}), {{"padding", {0, 0}}, {"stride", {2, 2}}, {"dilation", {1, 1}}}),
x2, x2,
w2); w2);
auto bn2 = add_bn(p, conv2, 2048); auto bn2 = add_bn(*mm, conv2);
auto add = mm->add_instruction(migraphx::make_op("add"), bn1, bn2); auto add = mm->add_instruction(migraphx::make_op("add"), bn1, bn2);
auto relu = mm->add_instruction(migraphx::make_op("relu"), add); auto relu = mm->add_instruction(migraphx::make_op("relu"), add);
mm->add_instruction(migraphx::make_op("pooling", mm->add_instruction(migraphx::make_op("pooling",
......
...@@ -27,14 +27,16 @@ ...@@ -27,14 +27,16 @@
#include <migraphx/generate.hpp> #include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp> #include <migraphx/make_op.hpp>
struct test_leaky_relu : verify_program<test_leaky_relu> struct test_pad_large : verify_program<test_pad_large>
{ {
migraphx::program create_program() const migraphx::program create_program() const
{ {
migraphx::program p; migraphx::program p;
auto* mm = p.get_main_module(); auto* mm = p.get_main_module();
auto x = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {4, 3, 3, 3}}); migraphx::shape s0{migraphx::shape::float_type, {586, 3, 224, 224}};
mm->add_instruction(migraphx::make_op("leaky_relu", {{"alpha", 0.41}}), x); std::vector<int64_t> pads0 = {0, 0, 1, 1, 0, 0, 1, 1};
auto l0 = mm->add_parameter("x", s0);
mm->add_instruction(migraphx::make_op("pad", {{"pads", pads0}}), l0);
return p; return p;
} }
}; };
...@@ -51,7 +51,7 @@ template struct test_reduce_op_large<migraphx::op::reduce_min, 1, migraphx::shap ...@@ -51,7 +51,7 @@ template struct test_reduce_op_large<migraphx::op::reduce_min, 1, migraphx::shap
template struct test_reduce_op_large<migraphx::op::reduce_prod, 2, migraphx::shape::float_type>; template struct test_reduce_op_large<migraphx::op::reduce_prod, 2, migraphx::shape::float_type>;
template struct test_reduce_op_large<migraphx::op::reduce_sum, 1, migraphx::shape::float_type>; template struct test_reduce_op_large<migraphx::op::reduce_sum, 1, migraphx::shape::float_type>;
struct test_reduce_mean : verify_program<test_reduce_mean> struct test_reduce_mean_1 : verify_program<test_reduce_mean_1>
{ {
migraphx::program create_program() const migraphx::program create_program() const
{ {
...@@ -63,3 +63,16 @@ struct test_reduce_mean : verify_program<test_reduce_mean> ...@@ -63,3 +63,16 @@ struct test_reduce_mean : verify_program<test_reduce_mean>
return p; return p;
}; };
}; };
struct test_reduce_mean_2 : verify_program<test_reduce_mean_2>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {336, 400}};
auto x = mm->add_parameter("x", s);
mm->add_instruction(migraphx::op::reduce_mean{{1}}, x);
return p;
};
};
...@@ -21,47 +21,41 @@ ...@@ -21,47 +21,41 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE. * THE SOFTWARE.
*/ */
#include "verify_program.hpp" #include "verify_program.hpp"
#include <migraphx/program.hpp> #include <migraphx/program.hpp>
#include <migraphx/generate.hpp> #include <migraphx/generate.hpp>
#include <migraphx/serialize.hpp>
#include <migraphx/make_op.hpp> #include <migraphx/make_op.hpp>
#include <migraphx/op/reduce_mean.hpp>
#include <migraphx/op/batch_norm_inference.hpp> /**
* @brief test_shape_alloc sets up a situation that could lead to an exception "convolution: Shapes
struct test_batchnorm_2d_per_actv : verify_program<test_batchnorm_2d_per_actv> * are not in standard layout" if a "replace_allocate" compiler pass is not followed with
* "adjust_allocation". The last transpose instruction generates a shape with a stride of 1 in
* the 2nd index, a non-standard layout that should be reallocated by adjust_allocation.
*/
struct test_shape_alloc : verify_program<test_shape_alloc>
{ {
const size_t d1 = 2;
const size_t d2 = 4;
const size_t channels = 2;
const size_t batches = 3;
migraphx::program create_program() const migraphx::program create_program() const
{ {
migraphx::program p; migraphx::program p;
auto* mm = p.get_main_module(); auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {batches, channels, d1, d2}}; auto weights = mm->add_literal(migraphx::generate_literal(
migraphx::shape vars{migraphx::shape::float_type, {channels, d1, d2}}; migraphx::shape{migraphx::shape::float_type, {11, 8, 1, 1}, {8, 1, 1, 1}}));
auto x = mm->add_parameter("x", s);
auto scale = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 1))); auto x = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 8, 7, 7}});
auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2))); auto transpose1 =
auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3))); mm->add_instruction(migraphx::make_op("transpose", {{"permutation", {0, 2, 3, 1}}}),
auto variance = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4))); x); // -> float_type, {1, 7, 7, 8}, {392, 7, 1, 49}
mm->add_instruction( auto reduce_ins =
migraphx::make_op( mm->add_instruction(migraphx::make_op("reduce_mean", {{"axes", {1, 2}}}),
"batch_norm_inference", transpose1); // -> float_type, {1, 1, 1, 8}, {8, 8, 8, 1}
{{"epsilon", 1.0e-6}, auto transpose2 =
{"momentum", 0.9f}, mm->add_instruction(migraphx::make_op("transpose", {{"permutation", {0, 3, 1, 2}}}),
{"bn_mode", reduce_ins); // -> float_type, {1, 8, 1, 1}, {8, 1, 8, 8}
migraphx::to_value(migraphx::op::batch_norm_inference::per_activation)}}), auto conv_op = migraphx::make_op("convolution");
x, mm->add_instruction(conv_op, transpose2, weights);
scale,
bias,
mean,
variance);
return p; return p;
} }
}; };
...@@ -26,27 +26,18 @@ ...@@ -26,27 +26,18 @@
#include <migraphx/program.hpp> #include <migraphx/program.hpp>
#include <migraphx/generate.hpp> #include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp> #include <migraphx/make_op.hpp>
#include <migraphx/common.hpp>
struct test_batchnorm_1d : verify_program<test_batchnorm_1d> struct test_softmax_large3 : verify_program<test_softmax_large3>
{ {
const size_t size = 3;
const size_t channels = 3;
const size_t batches = 4;
migraphx::program create_program() const migraphx::program create_program() const
{ {
migraphx::program p; migraphx::program p;
auto* mm = p.get_main_module(); auto* mm = p.get_main_module();
auto x = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {2, 4}});
migraphx::shape s{migraphx::shape::float_type, {batches, channels, size}}; auto large = mm->add_literal({migraphx::shape{migraphx::shape::float_type}, {100}});
migraphx::shape vars{migraphx::shape::float_type, {channels}}; auto add = migraphx::add_common_op(*mm, migraphx::make_op("mul"), {x, large});
auto x = mm->add_parameter("x", s); mm->add_instruction(migraphx::make_op("softmax", {{"axis", -1}}), add);
auto scale = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 1)));
auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2)));
auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3)));
auto variance = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4)));
mm->add_instruction(
migraphx::make_op("batch_norm_inference"), x, scale, bias, mean, variance);
return p; return p;
} }
}; };
...@@ -116,6 +116,9 @@ def main(): ...@@ -116,6 +116,9 @@ def main():
model = migraphx.parse_onnx(model_name, default_dim_value=batch) model = migraphx.parse_onnx(model_name, default_dim_value=batch)
if args.verbose:
print(model)
model.compile(migraphx.get_target('gpu'), offload_copy=False) model.compile(migraphx.get_target('gpu'), offload_copy=False)
params = {} params = {}
......
...@@ -21,5 +21,5 @@ ...@@ -21,5 +21,5 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE. # THE SOFTWARE.
##################################################################################### #####################################################################################
numpy==1.18.5 numpy==1.21.6
onnxruntime==1.10.0 onnxruntime==1.10.0
...@@ -21,7 +21,10 @@ ...@@ -21,7 +21,10 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE. # THE SOFTWARE.
##################################################################################### #####################################################################################
import string, sys, re, runpy import string
import sys
import re
import runpy
from functools import wraps from functools import wraps
from typing import Any, Callable, Dict, List, Optional, Tuple, Union from typing import Any, Callable, Dict, List, Optional, Tuple, Union
...@@ -308,18 +311,39 @@ class Parameter: ...@@ -308,18 +311,39 @@ class Parameter:
return self.substitute('${type} ${name}', prefix=prefix) return self.substitute('${type} ${name}', prefix=prefix)
def virtual_output_args(self, prefix: Optional[str] = None) -> List[str]: def virtual_output_args(self, prefix: Optional[str] = None) -> List[str]:
return [ container_type = self.type.remove_generic().basic().str()
'&{prefix}{n}'.format(prefix=prefix or '', n=n) decl_list: List[str] = []
for t, n in self.cparams container = (container_type == "std::vector"
] or container_type == "vector")
for t, n, in self.cparams:
if not decl_list and container:
decl_list.append('{prefix}{n}.data()'.format(prefix=prefix
or '',
n=n))
else:
decl_list.append('&{prefix}{n}'.format(prefix=prefix or '',
n=n))
return decl_list
def virtual_output_declarations(self, def virtual_output_declarations(self,
prefix: Optional[str] = None) -> List[str]: prefix: Optional[str] = None) -> List[str]:
return [ container_type = self.type.remove_generic().basic().str()
'std::remove_pointer_t<{type}> {prefix}{n};'.format( container = (container_type == "std::vector"
type=Type(t).str(), prefix=prefix or '', n=n) or container_type == "vector")
for t, n in self.cparams decl_list: List[str] = []
] for t, n, in self.cparams:
if not decl_list and container:
inner_t = self.type.inner_type()
if inner_t:
decl_list.append(
'std::array<{inner_t}, 1024> {prefix}{n};'.format(
inner_t=inner_t.str(), prefix=prefix or '', n=n))
else:
decl_list.append(
'std::remove_pointer_t<{type}> {prefix}{n}'.format(
type=Type(t).str(), prefix=prefix or '', n=n))
decl_list[-1] += '=1024;' if container else ';'
return decl_list
def virtual_output(self, prefix: Optional[str] = None) -> str: def virtual_output(self, prefix: Optional[str] = None) -> str:
write = self.virtual_write write = self.virtual_write
...@@ -694,9 +718,14 @@ def generate_cpp_header() -> str: ...@@ -694,9 +718,14 @@ def generate_cpp_header() -> str:
[c.generate() for c in cpp_classes]) [c.generate() for c in cpp_classes])
def cwrap(name: str) -> Callable: c_type_map: Dict[str, Type] = {}
def cwrap(name: str, c_type: Optional[str] = None) -> Callable:
def with_cwrap(f): def with_cwrap(f):
type_map[name] = f type_map[name] = f
if c_type:
c_type_map[name] = Type(c_type)
@wraps(f) @wraps(f)
def decorated(*args, **kwargs): def decorated(*args, **kwargs):
...@@ -917,6 +946,9 @@ def vector_c_wrap(p: Parameter) -> None: ...@@ -917,6 +946,9 @@ def vector_c_wrap(p: Parameter) -> None:
# Not a generic type # Not a generic type
if not inner: if not inner:
return return
if inner.str() in c_type_map:
inner = c_type_map[inner.str()]
t = inner.add_pointer() t = inner.add_pointer()
if p.type.is_reference(): if p.type.is_reference():
if p.type.is_const(): if p.type.is_const():
...@@ -927,6 +959,12 @@ def vector_c_wrap(p: Parameter) -> None: ...@@ -927,6 +959,12 @@ def vector_c_wrap(p: Parameter) -> None:
p.add_size_param() p.add_size_param()
p.bad_param('${name} == nullptr or ${size} == nullptr', p.bad_param('${name} == nullptr or ${size} == nullptr',
'Null pointer') 'Null pointer')
elif p.virtual:
p.add_param(t)
p.add_size_param()
p.bad_param('${name} == nullptr or ${size} == nullptr',
'Null pointer')
p.virtual_write = '{${name}.begin(), ${name}.begin()+${size}}; // cppcheck-suppress returnDanglingLifetime'
else: else:
p.add_param(t) p.add_param(t)
p.bad_param('${name} == nullptr', 'Null pointer') p.bad_param('${name} == nullptr', 'Null pointer')
...@@ -946,7 +984,7 @@ def vector_c_wrap(p: Parameter) -> None: ...@@ -946,7 +984,7 @@ def vector_c_wrap(p: Parameter) -> None:
p.write = ['std::copy(${result}.begin(), ${result}.end(), ${name})'] p.write = ['std::copy(${result}.begin(), ${result}.end(), ${name})']
@cwrap('std::string') @cwrap('std::string', 'char*')
def string_c_wrap(p: Parameter) -> None: def string_c_wrap(p: Parameter) -> None:
t = Type('char*') t = Type('char*')
if p.returns: if p.returns:
...@@ -1061,9 +1099,9 @@ struct ${ctype} { ...@@ -1061,9 +1099,9 @@ struct ${ctype} {
c_api_virtual_impl = Template(''' c_api_virtual_impl = Template('''
${return_type} ${name}(${params}) const ${return_type} ${name}(${params}) const
{ {
${output_decls}
if (${fname} == nullptr) if (${fname} == nullptr)
throw std::runtime_error("${name} function is missing."); throw std::runtime_error("${name} function is missing.");
${output_decls}
std::array<char, 256> exception_msg; std::array<char, 256> exception_msg;
exception_msg.front() = '\\0'; exception_msg.front() = '\\0';
auto api_error_result = ${fname}(${args}); auto api_error_result = ${fname}(${args});
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE. * THE SOFTWARE.
*/ */
#include <migraphx/execution_environment.hpp>
#include <migraphx/migraphx.h> #include <migraphx/migraphx.h>
#include <migraphx/rank.hpp> #include <migraphx/rank.hpp>
#include <migraphx/shape.hpp> #include <migraphx/shape.hpp>
...@@ -166,6 +167,13 @@ void set_output_names(tf_options& options, std::vector<const char*> names) ...@@ -166,6 +167,13 @@ void set_output_names(tf_options& options, std::vector<const char*> names)
options.output_node_names = std::vector<std::string>(names.begin(), names.end()); options.output_node_names = std::vector<std::string>(names.begin(), names.end());
} }
std::vector<argument>
run_async(program& p, const parameter_map& params, void* s, std::string_view name)
{
execution_environment exec_env{any_ptr(s, name), true};
return p.eval(params, exec_env);
}
template <class Value> template <class Value>
std::vector<const char*> get_names(const std::unordered_map<std::string, Value>& m) std::vector<const char*> get_names(const std::unordered_map<std::string, Value>& m)
{ {
...@@ -265,11 +273,18 @@ struct experimental_custom_op ...@@ -265,11 +273,18 @@ struct experimental_custom_op
template <class CustomOp> template <class CustomOp>
struct custom_operation struct custom_operation
{ {
template <class Self, class F> template <class Self, class F>
static auto reflect(Self&, F) static auto reflect(Self&, F)
{ {
return pack(); return pack();
} }
value attributes() const
{
return {{"custom_op", true}, {"target", op.runs_on_offload_target() ? "gpu" : "cpu"}};
}
CustomOp op; CustomOp op;
std::string name() const { return op.xobject.name; } std::string name() const { return op.xobject.name; }
...@@ -284,6 +299,23 @@ struct custom_operation ...@@ -284,6 +299,23 @@ struct custom_operation
{ {
return op.compute(std::move(ctx), std::move(output_shape), std::move(inputs)); return op.compute(std::move(ctx), std::move(output_shape), std::move(inputs));
} }
std::ptrdiff_t output_alias(std::vector<shape> inputs) const
{
auto alias_vec = op.output_alias(std::move(inputs));
// TODO: For now, only support one output alias
if(alias_vec.empty())
{
return -1;
}
if(alias_vec.size() > 1)
{
MIGRAPHX_THROW("Currently, CustomOps in MIGraphX only supports one output_alias");
}
return alias_vec.front();
}
bool runs_on_offload_target() const { return op.runs_on_offload_target(); }
}; };
template <class CustomOp> template <class CustomOp>
......
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
#include <stdlib.h> #include <stdlib.h>
#include <stdbool.h> #include <stdbool.h>
// Add new types here // Add new types here
// clang-format off // clang-format off
#define MIGRAPHX_SHAPE_VISIT_TYPES(m) \ #define MIGRAPHX_SHAPE_VISIT_TYPES(m) \
......
...@@ -66,12 +66,21 @@ any_ptr get_queue_context(T&) ...@@ -66,12 +66,21 @@ any_ptr get_queue_context(T&)
{ {
return {}; return {};
} }
template <class T>
void wait_for_context(T&, any_ptr)
{
}
template <class T>
void finish_on_context(T&, any_ptr){}
<% <%
interface('context', interface('context',
virtual('to_value', returns = 'value', const = True, default = 'to_value_context'), virtual('to_value', returns = 'value', const = True, default = 'to_value_context'),
virtual('from_value', v = 'const value&', default = 'from_value_context'), virtual('from_value', v = 'const value&', default = 'from_value_context'),
virtual('get_queue', returns = 'any_ptr', default = 'get_queue_context'), virtual('get_queue', returns = 'any_ptr', default = 'get_queue_context'),
virtual('wait_for', queue = 'any_ptr', returns = 'void', default = 'wait_for_context'),
virtual('finish_on', queue = 'any_ptr', returns = 'void', default = 'finish_on_context'),
virtual('finish', returns = 'void', const = True)) %> virtual('finish', returns = 'void', const = True)) %>
inline void migraphx_to_value(value& v, const context& ctx) inline void migraphx_to_value(value& v, const context& ctx)
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <utility> #include <utility>
#include <unordered_map> #include <unordered_map>
#include <migraphx/reflect.hpp> #include <migraphx/reflect.hpp>
#include <migraphx/dyn_output.hpp>
#include <migraphx/functional.hpp> #include <migraphx/functional.hpp>
#include <migraphx/streamutils.hpp> #include <migraphx/streamutils.hpp>
#include <migraphx/normalize_attributes.hpp> #include <migraphx/normalize_attributes.hpp>
...@@ -95,46 +96,6 @@ bool has_finalize(const operation& x); ...@@ -95,46 +96,6 @@ bool has_finalize(const operation& x);
#else #else
struct dyn_output
{
// original shape from the instruction
shape ins_shape;
// shape computed at eval time using input arguments
shape computed_shape;
};
/**
* Handle dynamic and static shape at evaluation time.
* If converted to shape type, returns original ins_shape.
* If converted to dyn_output type, will compute an output shape using the input arguments.
*/
template <class F>
struct compute_output_shape
{
F ins_inputs;
operator dyn_output() const
{
return ins_inputs([](const auto& x, shape ins_shape, const std::vector<argument>& inputs) {
if(ins_shape.dynamic())
return dyn_output{ins_shape, compute_shape(x, to_shapes(inputs))};
return dyn_output{ins_shape, ins_shape};
});
}
operator shape() const
{
return ins_inputs(
[](const auto&, shape ins_shape, const std::vector<argument>&) { return ins_shape; });
}
};
template <class F>
compute_output_shape<F> make_compute_output_shape(F f)
{
return {f};
}
namespace detail { namespace detail {
namespace operation_operators { namespace operation_operators {
......
...@@ -57,7 +57,7 @@ echo "Dependencies are installed at $PREFIX" ...@@ -57,7 +57,7 @@ echo "Dependencies are installed at $PREFIX"
rbuild prepare -d $PREFIX -s develop rbuild prepare -d $PREFIX -s develop
# install onnx package for unit tests # install onnx package for unit tests
pip3 install onnx==1.8.1 numpy==1.18.5 typing==3.7.4 pytest==6.0.1 packaging==16.8 pip3 install onnx==1.8.1 numpy==1.21.6 typing==3.7.4 pytest==6.0.1 packaging==16.8
# pin version of protobuf in Python for onnx runtime unit tests # pin version of protobuf in Python for onnx runtime unit tests
pip3 install protobuf==3.20.0 pip3 install protobuf==3.20.0
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment