Commit 31065c7d authored by charlie's avatar charlie
Browse files

Merge branch 'dyn_squeeze' of github.com:ROCmSoftwarePlatform/AMDMIGraphX into dyn_model_test

parents 6bec381f 6acbd4e4
...@@ -27,26 +27,20 @@ ...@@ -27,26 +27,20 @@
#include <migraphx/generate.hpp> #include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp> #include <migraphx/make_op.hpp>
struct test_batchnorm_1d : verify_program<test_batchnorm_1d> struct test_conv_group_add : verify_program<test_conv_group_add>
{ {
const size_t size = 3;
const size_t channels = 3;
const size_t batches = 4;
migraphx::program create_program() const migraphx::program create_program() const
{ {
migraphx::program p; migraphx::program p;
auto* mm = p.get_main_module(); auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {1, 68, 28, 28}};
migraphx::shape s{migraphx::shape::float_type, {batches, channels, size}}; auto x = mm->add_parameter("x", s);
migraphx::shape vars{migraphx::shape::float_type, {channels}}; auto w = mm->add_parameter("w", {migraphx::shape::float_type, {68, 17, 1, 1}});
auto x = mm->add_parameter("x", s); auto b = mm->add_parameter("b", {migraphx::shape::float_type, {68}});
auto scale = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 1))); auto conv = mm->add_instruction(migraphx::make_op("convolution", {{"group", 4}}), x, w);
auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2))); auto bb = mm->add_instruction(
auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3))); migraphx::make_op("broadcast", {{"axis", 1}, {"out_lens", {1, 68, 28, 28}}}), b);
auto variance = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4))); mm->add_instruction(migraphx::make_op("add"), conv, bb);
mm->add_instruction(
migraphx::make_op("batch_norm_inference"), x, scale, bias, mean, variance);
return p; return p;
} }
}; };
...@@ -25,42 +25,49 @@ ...@@ -25,42 +25,49 @@
#include "verify_program.hpp" #include "verify_program.hpp"
#include <migraphx/program.hpp> #include <migraphx/program.hpp>
#include <migraphx/generate.hpp> #include <migraphx/generate.hpp>
#include <migraphx/serialize.hpp>
#include <migraphx/make_op.hpp> #include <migraphx/make_op.hpp>
#include <migraphx/common.hpp>
#include <migraphx/op/batch_norm_inference.hpp> /*
Checking for y == 0 ? eps : y
struct test_batchnorm_1d_per_actv : verify_program<test_batchnorm_1d_per_actv> Adding this because HIP fmod sign changes when y = 0 resulting in nan and -nan not beign
consistent between ref and gpu implementations.
*/
migraphx::instruction_ref add_epsilon(migraphx::module& m, migraphx::instruction_ref y)
{ {
const size_t d1 = 5; auto zero = m.add_literal(0.0f);
const size_t channels = 2; auto eps = m.add_literal(1e-3f);
const size_t batches = 3; auto op_y = add_common_op(m, migraphx::make_op("equal"), {y, zero});
return add_common_op(m, migraphx::make_op("where"), {op_y, eps, y});
}
struct test_fmod : verify_program<test_fmod>
{
migraphx::program create_program() const migraphx::program create_program() const
{ {
migraphx::program p; migraphx::program p;
auto* mm = p.get_main_module(); auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {64}};
auto x = mm->add_parameter("x", s);
auto y = mm->add_parameter("y", s);
auto op_where = add_epsilon(*mm, y);
mm->add_instruction(migraphx::make_op("fmod"), x, op_where);
return p;
}
};
migraphx::shape s{migraphx::shape::float_type, {batches, channels, d1}}; struct test_mod : verify_program<test_mod>
migraphx::shape vars{migraphx::shape::float_type, {channels, d1}}; {
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {64}};
auto x = mm->add_parameter("x", s); auto x = mm->add_parameter("x", s);
auto scale = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 1))); auto y = mm->add_parameter("y", s);
auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2))); auto op_where = add_epsilon(*mm, y);
auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3))); mm->add_instruction(migraphx::make_op("mod"), x, op_where);
auto variance = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4)));
mm->add_instruction(
migraphx::make_op(
"batch_norm_inference",
{{"epsilon", 1.0e-5},
{"momentum", 0.96f},
{"bn_mode",
migraphx::to_value(migraphx::op::batch_norm_inference::per_activation)}}),
x,
scale,
bias,
mean,
variance);
return p; return p;
} }
}; };
...@@ -29,14 +29,16 @@ ...@@ -29,14 +29,16 @@
#include <migraphx/op/reduce_mean.hpp> #include <migraphx/op/reduce_mean.hpp>
migraphx::instruction_ref migraphx::instruction_ref add_layernorm(migraphx::module& m,
add_layernorm(migraphx::module& m, migraphx::instruction_ref x, std::vector<size_t> dims) migraphx::instruction_ref x,
std::vector<size_t> dims,
float eps = 1e-12f)
{ {
auto scale = auto scale =
m.add_parameter("scale", migraphx::shape{migraphx::shape::float_type, {dims.back()}}); m.add_parameter("scale", migraphx::shape{migraphx::shape::float_type, {dims.back()}});
auto bias = auto bias =
m.add_parameter("bias", migraphx::shape{migraphx::shape::float_type, {dims.back()}}); m.add_parameter("bias", migraphx::shape{migraphx::shape::float_type, {dims.back()}});
auto epsilon = m.add_literal(1e-12f); auto epsilon = m.add_literal(eps);
auto exponent = m.add_literal(2.0f); auto exponent = m.add_literal(2.0f);
auto mean = m.add_instruction(migraphx::op::reduce_mean({2}), x); auto mean = m.add_instruction(migraphx::op::reduce_mean({2}), x);
...@@ -68,7 +70,7 @@ struct test_layernorm : verify_program<test_layernorm> ...@@ -68,7 +70,7 @@ struct test_layernorm : verify_program<test_layernorm>
{ {
migraphx::program p; migraphx::program p;
auto* mm = p.get_main_module(); auto* mm = p.get_main_module();
std::vector<size_t> dims = {1, 1, 5}; std::vector<size_t> dims = {1, 2, 5};
auto x = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, dims}); auto x = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, dims});
add_layernorm(*mm, x, dims); add_layernorm(*mm, x, dims);
return p; return p;
...@@ -88,6 +90,19 @@ struct test_layernorm2 : verify_program<test_layernorm2> ...@@ -88,6 +90,19 @@ struct test_layernorm2 : verify_program<test_layernorm2>
} }
}; };
struct test_layernorm_eps : verify_program<test_layernorm_eps>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<size_t> dims = {1, 2, 5};
auto x = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, dims});
add_layernorm(*mm, x, dims, 1e-5f);
return p;
}
};
struct test_layernorm_triadd : verify_program<test_layernorm_triadd> struct test_layernorm_triadd : verify_program<test_layernorm_triadd>
{ {
migraphx::program create_program() const migraphx::program create_program() const
......
...@@ -27,14 +27,16 @@ ...@@ -27,14 +27,16 @@
#include <migraphx/generate.hpp> #include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp> #include <migraphx/make_op.hpp>
struct test_elu : verify_program<test_elu> struct test_pad_large : verify_program<test_pad_large>
{ {
migraphx::program create_program() const migraphx::program create_program() const
{ {
migraphx::program p; migraphx::program p;
auto* mm = p.get_main_module(); auto* mm = p.get_main_module();
auto x = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {4, 3, 3, 3}}); migraphx::shape s0{migraphx::shape::float_type, {586, 3, 224, 224}};
mm->add_instruction(migraphx::make_op("leaky_relu", {{"alpha", 1.0}}), x); std::vector<int64_t> pads0 = {0, 0, 1, 1, 0, 0, 1, 1};
auto l0 = mm->add_parameter("x", s0);
mm->add_instruction(migraphx::make_op("pad", {{"pads", pads0}}), l0);
return p; return p;
} }
}; };
...@@ -51,7 +51,7 @@ template struct test_reduce_op_large<migraphx::op::reduce_min, 1, migraphx::shap ...@@ -51,7 +51,7 @@ template struct test_reduce_op_large<migraphx::op::reduce_min, 1, migraphx::shap
template struct test_reduce_op_large<migraphx::op::reduce_prod, 2, migraphx::shape::float_type>; template struct test_reduce_op_large<migraphx::op::reduce_prod, 2, migraphx::shape::float_type>;
template struct test_reduce_op_large<migraphx::op::reduce_sum, 1, migraphx::shape::float_type>; template struct test_reduce_op_large<migraphx::op::reduce_sum, 1, migraphx::shape::float_type>;
struct test_reduce_mean : verify_program<test_reduce_mean> struct test_reduce_mean_1 : verify_program<test_reduce_mean_1>
{ {
migraphx::program create_program() const migraphx::program create_program() const
{ {
...@@ -63,3 +63,16 @@ struct test_reduce_mean : verify_program<test_reduce_mean> ...@@ -63,3 +63,16 @@ struct test_reduce_mean : verify_program<test_reduce_mean>
return p; return p;
}; };
}; };
struct test_reduce_mean_2 : verify_program<test_reduce_mean_2>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {336, 400}};
auto x = mm->add_parameter("x", s);
mm->add_instruction(migraphx::op::reduce_mean{{1}}, x);
return p;
};
};
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/op/reduce_mean.hpp>
/**
* @brief test_shape_alloc sets up a situation that could lead to an exception "convolution: Shapes
* are not in standard layout" if a "replace_allocate" compiler pass is not followed with
* "adjust_allocation". The last transpose instruction generates a shape with a stride of 1 in
* the 2nd index, a non-standard layout that should be reallocated by adjust_allocation.
*/
struct test_shape_alloc : verify_program<test_shape_alloc>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
auto weights = mm->add_literal(migraphx::generate_literal(
migraphx::shape{migraphx::shape::float_type, {11, 8, 1, 1}, {8, 1, 1, 1}}));
auto x = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 8, 7, 7}});
auto transpose1 =
mm->add_instruction(migraphx::make_op("transpose", {{"permutation", {0, 2, 3, 1}}}),
x); // -> float_type, {1, 7, 7, 8}, {392, 7, 1, 49}
auto reduce_ins =
mm->add_instruction(migraphx::make_op("reduce_mean", {{"axes", {1, 2}}}),
transpose1); // -> float_type, {1, 1, 1, 8}, {8, 8, 8, 1}
auto transpose2 =
mm->add_instruction(migraphx::make_op("transpose", {{"permutation", {0, 3, 1, 2}}}),
reduce_ins); // -> float_type, {1, 8, 1, 1}, {8, 1, 8, 8}
auto conv_op = migraphx::make_op("convolution");
mm->add_instruction(conv_op, transpose2, weights);
return p;
}
};
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct test_slice2 : verify_program<test_slice2>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::float_type, {1, 44, 57, 57}};
auto x = mm->add_parameter("x", {migraphx::shape::float_type, {1, 44, 57, 57}});
auto y = mm->add_parameter("y", {migraphx::shape::float_type, {1, 44, 56, 56}});
auto slice0 = mm->add_instruction(
migraphx::make_op(
"slice",
{{"axes", {0, 2, 3, 1}}, {"starts", {0, 1, 1, 0}}, {"ends", {1, 57, 57, 44}}}),
x);
mm->add_instruction(migraphx::make_op("add"), y, slice0);
return p;
}
};
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/common.hpp>
struct test_softmax_large1 : verify_program<test_softmax_large1>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {2, 4}});
auto large = mm->add_literal({migraphx::shape{migraphx::shape::float_type}, {10000}});
auto add = migraphx::add_common_op(*mm, migraphx::make_op("add"), {x, large});
mm->add_instruction(migraphx::make_op("softmax", {{"axis", -1}}), add);
return p;
}
};
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/common.hpp>
struct test_softmax_large2 : verify_program<test_softmax_large2>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {2, 4}});
auto large = mm->add_literal({migraphx::shape{migraphx::shape::float_type}, {-10000}});
auto add = migraphx::add_common_op(*mm, migraphx::make_op("add"), {x, large});
mm->add_instruction(migraphx::make_op("softmax", {{"axis", -1}}), add);
return p;
}
};
...@@ -21,23 +21,23 @@ ...@@ -21,23 +21,23 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE. * THE SOFTWARE.
*/ */
#ifndef MIGRAPHX_GUARD_RTGLIB_DEVICE_SOFTMAX_HPP
#define MIGRAPHX_GUARD_RTGLIB_DEVICE_SOFTMAX_HPP
#include <migraphx/argument.hpp> #include "verify_program.hpp"
#include <migraphx/config.hpp> #include <migraphx/program.hpp>
#include <hip/hip_runtime_api.h> #include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/common.hpp>
namespace migraphx { struct test_softmax_large3 : verify_program<test_softmax_large3>
inline namespace MIGRAPHX_INLINE_NS { {
namespace gpu { migraphx::program create_program() const
namespace device { {
migraphx::program p;
void softmax(hipStream_t stream, const argument& result, const argument& arg, int64_t axis); auto* mm = p.get_main_module();
auto x = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {2, 4}});
} // namespace device auto large = mm->add_literal({migraphx::shape{migraphx::shape::float_type}, {100}});
} // namespace gpu auto add = migraphx::add_common_op(*mm, migraphx::make_op("mul"), {x, large});
} // namespace MIGRAPHX_INLINE_NS mm->add_instruction(migraphx::make_op("softmax", {{"axis", -1}}), add);
} // namespace migraphx return p;
}
#endif };
...@@ -25,43 +25,34 @@ ...@@ -25,43 +25,34 @@
#include "verify_program.hpp" #include "verify_program.hpp"
#include <migraphx/program.hpp> #include <migraphx/program.hpp>
#include <migraphx/generate.hpp> #include <migraphx/generate.hpp>
#include <migraphx/serialize.hpp>
#include <migraphx/make_op.hpp> #include <migraphx/make_op.hpp>
#include <migraphx/apply_alpha_beta.hpp>
#include <migraphx/op/batch_norm_inference.hpp> struct test_unbatched_gemm_1 : verify_program<test_unbatched_gemm_1>
struct test_batchnorm_2d_per_actv : verify_program<test_batchnorm_2d_per_actv>
{ {
const size_t d1 = 2;
const size_t d2 = 4;
const size_t channels = 2;
const size_t batches = 3;
migraphx::program create_program() const migraphx::program create_program() const
{ {
migraphx::program p; migraphx::program p;
auto* mm = p.get_main_module(); auto* mm = p.get_main_module();
migraphx::shape m1_shape{migraphx::shape::float_type, {2, 32, 64}};
migraphx::shape m2_shape{migraphx::shape::float_type, {64, 64}};
migraphx::shape m3_shape{migraphx::shape::float_type, {2, 32, 192}};
auto l1 = mm->add_parameter("1", m1_shape);
auto l2 = mm->add_literal(migraphx::generate_literal(m2_shape));
l2 = mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {2, 64, 64}}}),
l2);
auto l3 = mm->add_literal(migraphx::generate_literal(m2_shape));
l3 = mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {2, 64, 64}}}),
l3);
auto l4 = mm->add_literal(migraphx::generate_literal(m2_shape));
l4 = mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {2, 64, 64}}}),
l4);
auto concat = mm->add_instruction(migraphx::make_op("concat", {{"axis", 2}}), l2, l3, l4);
migraphx::shape s{migraphx::shape::float_type, {batches, channels, d1, d2}}; auto l5 = mm->add_parameter("3", m3_shape);
migraphx::shape vars{migraphx::shape::float_type, {channels, d1, d2}}; float alpha = 1.0f;
auto x = mm->add_parameter("x", s); float beta = 1.0f;
auto scale = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 1))); migraphx::add_apply_alpha_beta(
auto bias = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 2))); *mm, {l1, concat, l5}, migraphx::make_op("dot"), alpha, beta);
auto mean = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 3)));
auto variance = mm->add_literal(migraphx::abs(migraphx::generate_literal(vars, 4)));
mm->add_instruction(
migraphx::make_op(
"batch_norm_inference",
{{"epsilon", 1.0e-6},
{"momentum", 0.9f},
{"bn_mode",
migraphx::to_value(migraphx::op::batch_norm_inference::per_activation)}}),
x,
scale,
bias,
mean,
variance);
return p; return p;
} }
}; };
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/apply_alpha_beta.hpp>
struct test_unbatched_gemm_2 : verify_program<test_unbatched_gemm_2>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape m1_shape{migraphx::shape::float_type, {4, 32, 64}};
migraphx::shape m2_shape{migraphx::shape::float_type, {64, 64}};
auto l1 = mm->add_parameter("1", m1_shape);
auto l2 = mm->add_literal(migraphx::generate_literal(m2_shape));
l2 = mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {4, 64, 64}}}),
l2);
mm->add_instruction(migraphx::make_op("dot"), l1, l2);
return p;
}
};
...@@ -116,6 +116,9 @@ def main(): ...@@ -116,6 +116,9 @@ def main():
model = migraphx.parse_onnx(model_name, default_dim_value=batch) model = migraphx.parse_onnx(model_name, default_dim_value=batch)
if args.verbose:
print(model)
model.compile(migraphx.get_target('gpu'), offload_copy=False) model.compile(migraphx.get_target('gpu'), offload_copy=False)
params = {} params = {}
......
...@@ -21,5 +21,5 @@ ...@@ -21,5 +21,5 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE. # THE SOFTWARE.
##################################################################################### #####################################################################################
numpy==1.18.5 numpy==1.21.6
onnxruntime==1.10.0 onnxruntime==1.10.0
...@@ -21,7 +21,10 @@ ...@@ -21,7 +21,10 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE. # THE SOFTWARE.
##################################################################################### #####################################################################################
import string, sys, re, runpy import string
import sys
import re
import runpy
from functools import wraps from functools import wraps
from typing import Any, Callable, Dict, List, Optional, Tuple, Union from typing import Any, Callable, Dict, List, Optional, Tuple, Union
...@@ -308,18 +311,39 @@ class Parameter: ...@@ -308,18 +311,39 @@ class Parameter:
return self.substitute('${type} ${name}', prefix=prefix) return self.substitute('${type} ${name}', prefix=prefix)
def virtual_output_args(self, prefix: Optional[str] = None) -> List[str]: def virtual_output_args(self, prefix: Optional[str] = None) -> List[str]:
return [ container_type = self.type.remove_generic().basic().str()
'&{prefix}{n}'.format(prefix=prefix or '', n=n) decl_list: List[str] = []
for t, n in self.cparams container = (container_type == "std::vector"
] or container_type == "vector")
for t, n, in self.cparams:
if not decl_list and container:
decl_list.append('{prefix}{n}.data()'.format(prefix=prefix
or '',
n=n))
else:
decl_list.append('&{prefix}{n}'.format(prefix=prefix or '',
n=n))
return decl_list
def virtual_output_declarations(self, def virtual_output_declarations(self,
prefix: Optional[str] = None) -> List[str]: prefix: Optional[str] = None) -> List[str]:
return [ container_type = self.type.remove_generic().basic().str()
'std::remove_pointer_t<{type}> {prefix}{n};'.format( container = (container_type == "std::vector"
type=Type(t).str(), prefix=prefix or '', n=n) or container_type == "vector")
for t, n in self.cparams decl_list: List[str] = []
] for t, n, in self.cparams:
if not decl_list and container:
inner_t = self.type.inner_type()
if inner_t:
decl_list.append(
'std::array<{inner_t}, 1024> {prefix}{n};'.format(
inner_t=inner_t.str(), prefix=prefix or '', n=n))
else:
decl_list.append(
'std::remove_pointer_t<{type}> {prefix}{n}'.format(
type=Type(t).str(), prefix=prefix or '', n=n))
decl_list[-1] += '=1024;' if container else ';'
return decl_list
def virtual_output(self, prefix: Optional[str] = None) -> str: def virtual_output(self, prefix: Optional[str] = None) -> str:
write = self.virtual_write write = self.virtual_write
...@@ -694,9 +718,14 @@ def generate_cpp_header() -> str: ...@@ -694,9 +718,14 @@ def generate_cpp_header() -> str:
[c.generate() for c in cpp_classes]) [c.generate() for c in cpp_classes])
def cwrap(name: str) -> Callable: c_type_map: Dict[str, Type] = {}
def cwrap(name: str, c_type: Optional[str] = None) -> Callable:
def with_cwrap(f): def with_cwrap(f):
type_map[name] = f type_map[name] = f
if c_type:
c_type_map[name] = Type(c_type)
@wraps(f) @wraps(f)
def decorated(*args, **kwargs): def decorated(*args, **kwargs):
...@@ -917,6 +946,9 @@ def vector_c_wrap(p: Parameter) -> None: ...@@ -917,6 +946,9 @@ def vector_c_wrap(p: Parameter) -> None:
# Not a generic type # Not a generic type
if not inner: if not inner:
return return
if inner.str() in c_type_map:
inner = c_type_map[inner.str()]
t = inner.add_pointer() t = inner.add_pointer()
if p.type.is_reference(): if p.type.is_reference():
if p.type.is_const(): if p.type.is_const():
...@@ -927,6 +959,12 @@ def vector_c_wrap(p: Parameter) -> None: ...@@ -927,6 +959,12 @@ def vector_c_wrap(p: Parameter) -> None:
p.add_size_param() p.add_size_param()
p.bad_param('${name} == nullptr or ${size} == nullptr', p.bad_param('${name} == nullptr or ${size} == nullptr',
'Null pointer') 'Null pointer')
elif p.virtual:
p.add_param(t)
p.add_size_param()
p.bad_param('${name} == nullptr or ${size} == nullptr',
'Null pointer')
p.virtual_write = '{${name}.begin(), ${name}.begin()+${size}}; // cppcheck-suppress returnDanglingLifetime'
else: else:
p.add_param(t) p.add_param(t)
p.bad_param('${name} == nullptr', 'Null pointer') p.bad_param('${name} == nullptr', 'Null pointer')
...@@ -946,7 +984,7 @@ def vector_c_wrap(p: Parameter) -> None: ...@@ -946,7 +984,7 @@ def vector_c_wrap(p: Parameter) -> None:
p.write = ['std::copy(${result}.begin(), ${result}.end(), ${name})'] p.write = ['std::copy(${result}.begin(), ${result}.end(), ${name})']
@cwrap('std::string') @cwrap('std::string', 'char*')
def string_c_wrap(p: Parameter) -> None: def string_c_wrap(p: Parameter) -> None:
t = Type('char*') t = Type('char*')
if p.returns: if p.returns:
...@@ -1061,9 +1099,9 @@ struct ${ctype} { ...@@ -1061,9 +1099,9 @@ struct ${ctype} {
c_api_virtual_impl = Template(''' c_api_virtual_impl = Template('''
${return_type} ${name}(${params}) const ${return_type} ${name}(${params}) const
{ {
${output_decls}
if (${fname} == nullptr) if (${fname} == nullptr)
throw std::runtime_error("${name} function is missing."); throw std::runtime_error("${name} function is missing.");
${output_decls}
std::array<char, 256> exception_msg; std::array<char, 256> exception_msg;
exception_msg.front() = '\\0'; exception_msg.front() = '\\0';
auto api_error_result = ${fname}(${args}); auto api_error_result = ${fname}(${args});
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE. * THE SOFTWARE.
*/ */
#include <migraphx/execution_environment.hpp>
#include <migraphx/migraphx.h> #include <migraphx/migraphx.h>
#include <migraphx/rank.hpp> #include <migraphx/rank.hpp>
#include <migraphx/shape.hpp> #include <migraphx/shape.hpp>
...@@ -166,6 +167,13 @@ void set_output_names(tf_options& options, std::vector<const char*> names) ...@@ -166,6 +167,13 @@ void set_output_names(tf_options& options, std::vector<const char*> names)
options.output_node_names = std::vector<std::string>(names.begin(), names.end()); options.output_node_names = std::vector<std::string>(names.begin(), names.end());
} }
std::vector<argument>
run_async(program& p, const parameter_map& params, void* s, std::string_view name)
{
execution_environment exec_env{any_ptr(s, name), true};
return p.eval(params, exec_env);
}
template <class Value> template <class Value>
std::vector<const char*> get_names(const std::unordered_map<std::string, Value>& m) std::vector<const char*> get_names(const std::unordered_map<std::string, Value>& m)
{ {
...@@ -265,11 +273,18 @@ struct experimental_custom_op ...@@ -265,11 +273,18 @@ struct experimental_custom_op
template <class CustomOp> template <class CustomOp>
struct custom_operation struct custom_operation
{ {
template <class Self, class F> template <class Self, class F>
static auto reflect(Self&, F) static auto reflect(Self&, F)
{ {
return pack(); return pack();
} }
value attributes() const
{
return {{"custom_op", true}, {"target", op.runs_on_offload_target() ? "gpu" : "cpu"}};
}
CustomOp op; CustomOp op;
std::string name() const { return op.xobject.name; } std::string name() const { return op.xobject.name; }
...@@ -284,6 +299,23 @@ struct custom_operation ...@@ -284,6 +299,23 @@ struct custom_operation
{ {
return op.compute(std::move(ctx), std::move(output_shape), std::move(inputs)); return op.compute(std::move(ctx), std::move(output_shape), std::move(inputs));
} }
std::ptrdiff_t output_alias(std::vector<shape> inputs) const
{
auto alias_vec = op.output_alias(std::move(inputs));
// TODO: For now, only support one output alias
if(alias_vec.empty())
{
return -1;
}
if(alias_vec.size() > 1)
{
MIGRAPHX_THROW("Currently, CustomOps in MIGraphX only supports one output_alias");
}
return alias_vec.front();
}
bool runs_on_offload_target() const { return op.runs_on_offload_target(); }
}; };
template <class CustomOp> template <class CustomOp>
......
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
#include <stdlib.h> #include <stdlib.h>
#include <stdbool.h> #include <stdbool.h>
// Add new types here // Add new types here
// clang-format off // clang-format off
#define MIGRAPHX_SHAPE_VISIT_TYPES(m) \ #define MIGRAPHX_SHAPE_VISIT_TYPES(m) \
......
...@@ -66,12 +66,21 @@ any_ptr get_queue_context(T&) ...@@ -66,12 +66,21 @@ any_ptr get_queue_context(T&)
{ {
return {}; return {};
} }
template <class T>
void wait_for_context(T&, any_ptr)
{
}
template <class T>
void finish_on_context(T&, any_ptr){}
<% <%
interface('context', interface('context',
virtual('to_value', returns = 'value', const = True, default = 'to_value_context'), virtual('to_value', returns = 'value', const = True, default = 'to_value_context'),
virtual('from_value', v = 'const value&', default = 'from_value_context'), virtual('from_value', v = 'const value&', default = 'from_value_context'),
virtual('get_queue', returns = 'any_ptr', default = 'get_queue_context'), virtual('get_queue', returns = 'any_ptr', default = 'get_queue_context'),
virtual('wait_for', queue = 'any_ptr', returns = 'void', default = 'wait_for_context'),
virtual('finish_on', queue = 'any_ptr', returns = 'void', default = 'finish_on_context'),
virtual('finish', returns = 'void', const = True)) %> virtual('finish', returns = 'void', const = True)) %>
inline void migraphx_to_value(value& v, const context& ctx) inline void migraphx_to_value(value& v, const context& ctx)
......
...@@ -32,6 +32,8 @@ ...@@ -32,6 +32,8 @@
#include <utility> #include <utility>
#include <unordered_map> #include <unordered_map>
#include <migraphx/reflect.hpp> #include <migraphx/reflect.hpp>
#include <migraphx/dyn_output.hpp>
#include <migraphx/functional.hpp>
#include <migraphx/streamutils.hpp> #include <migraphx/streamutils.hpp>
#include <migraphx/normalize_attributes.hpp> #include <migraphx/normalize_attributes.hpp>
#include <migraphx/argument.hpp> #include <migraphx/argument.hpp>
...@@ -199,9 +201,12 @@ auto compute_op(rank<1>, ...@@ -199,9 +201,12 @@ auto compute_op(rank<1>,
context& ctx, context& ctx,
const shape& output_shape, const shape& output_shape,
const std::vector<argument>& input) const std::vector<argument>& input)
-> decltype(x.compute(auto_any_cast(ctx), output_shape, input)) -> decltype(x.compute(auto_any_cast(ctx),
make_compute_output_shape(pack(x, output_shape, input)),
input))
{ {
return x.compute(auto_any_cast(ctx), output_shape, input); return x.compute(
auto_any_cast(ctx), make_compute_output_shape(pack(x, output_shape, input)), input);
} }
template <class T> template <class T>
...@@ -220,9 +225,9 @@ compute_op(const T& x, context& ctx, const shape& output_shape, const std::vecto ...@@ -220,9 +225,9 @@ compute_op(const T& x, context& ctx, const shape& output_shape, const std::vecto
template <class T> template <class T>
auto compute_op(rank<1>, const T& x, const shape& output_shape, const std::vector<argument>& input) auto compute_op(rank<1>, const T& x, const shape& output_shape, const std::vector<argument>& input)
-> decltype(x.compute(output_shape, input)) -> decltype(x.compute(make_compute_output_shape(pack(x, output_shape, input)), input))
{ {
return x.compute(output_shape, input); return x.compute(make_compute_output_shape(pack(x, output_shape, input)), input);
} }
template <class T> template <class T>
...@@ -244,9 +249,11 @@ auto compute_op(rank<1>, ...@@ -244,9 +249,11 @@ auto compute_op(rank<1>,
const shape& output, const shape& output,
const std::vector<argument>& inputs, const std::vector<argument>& inputs,
const std::vector<module_ref>& module_args, const std::vector<module_ref>& module_args,
F f) -> decltype(x.compute(output, inputs, module_args, f)) F f)
-> decltype(
x.compute(make_compute_output_shape(pack(x, output, inputs)), inputs, module_args, f))
{ {
return x.compute(output, inputs, module_args, f); return x.compute(make_compute_output_shape(pack(x, output, inputs)), inputs, module_args, f);
} }
template <class T, class F> template <class T, class F>
...@@ -278,9 +285,17 @@ auto compute_op(rank<4>, ...@@ -278,9 +285,17 @@ auto compute_op(rank<4>,
const shape& output, const shape& output,
const std::vector<argument>& inputs, const std::vector<argument>& inputs,
const std::vector<module_ref>& module_args, const std::vector<module_ref>& module_args,
F f) -> decltype(x.compute(auto_any_cast(ctx), output, inputs, module_args, f)) F f) -> decltype(x.compute(auto_any_cast(ctx),
make_compute_output_shape(pack(x, output, inputs)),
inputs,
module_args,
f))
{ {
return x.compute(auto_any_cast(ctx), output, inputs, module_args, f); return x.compute(auto_any_cast(ctx),
make_compute_output_shape(pack(x, output, inputs)),
inputs,
module_args,
f);
} }
template <class T, class F> template <class T, class F>
...@@ -290,9 +305,11 @@ auto compute_op(rank<3>, ...@@ -290,9 +305,11 @@ auto compute_op(rank<3>,
const shape& output, const shape& output,
const std::vector<argument>& inputs, const std::vector<argument>& inputs,
const std::vector<module_ref>& module_args, const std::vector<module_ref>& module_args,
F f) -> decltype(x.compute(output, inputs, module_args, f)) F f)
-> decltype(
x.compute(make_compute_output_shape(pack(x, output, inputs)), inputs, module_args, f))
{ {
return x.compute(output, inputs, module_args, f); return x.compute(make_compute_output_shape(pack(x, output, inputs)), inputs, module_args, f);
} }
template <class T, class F> template <class T, class F>
...@@ -302,9 +319,10 @@ auto compute_op(rank<2>, ...@@ -302,9 +319,10 @@ auto compute_op(rank<2>,
const shape& output, const shape& output,
const std::vector<argument>& inputs, const std::vector<argument>& inputs,
const std::vector<module_ref>&, const std::vector<module_ref>&,
F) -> decltype(x.compute(output, inputs)) F)
-> decltype(x.compute(make_compute_output_shape(pack(x, output, inputs)), inputs))
{ {
return x.compute(output, inputs); return x.compute(make_compute_output_shape(pack(x, output, inputs)), inputs);
} }
template <class T, class F> template <class T, class F>
...@@ -314,9 +332,12 @@ auto compute_op(rank<1>, ...@@ -314,9 +332,12 @@ auto compute_op(rank<1>,
const shape& output, const shape& output,
const std::vector<argument>& inputs, const std::vector<argument>& inputs,
const std::vector<module_ref>&, const std::vector<module_ref>&,
F) -> decltype(x.compute(auto_any_cast(ctx), output, inputs)) F) -> decltype(x.compute(auto_any_cast(ctx),
make_compute_output_shape(pack(x, output, inputs)),
inputs))
{ {
return x.compute(auto_any_cast(ctx), output, inputs); return x.compute(
auto_any_cast(ctx), make_compute_output_shape(pack(x, output, inputs)), inputs);
} }
template <class T, class F> template <class T, class F>
...@@ -348,7 +369,8 @@ auto is_context_free_op(rank<1>, ...@@ -348,7 +369,8 @@ auto is_context_free_op(rank<1>,
const T& x, const T& x,
const shape& output_shape, const shape& output_shape,
const std::vector<argument>& input) const std::vector<argument>& input)
-> decltype(x.compute(output_shape, input), std::true_type{}); -> decltype(x.compute(make_compute_output_shape(pack(x, output_shape, input)), input),
std::true_type{});
template <class T> template <class T>
auto is_context_free_op(rank<0>, const T&, const shape&, const std::vector<argument>&) auto is_context_free_op(rank<0>, const T&, const shape&, const std::vector<argument>&)
...@@ -560,7 +582,7 @@ lifetime get_lifetime_op(const T&) ...@@ -560,7 +582,7 @@ lifetime get_lifetime_op(const T&)
inline bool operator!=(const operation& x, const operation& y) inline bool operator!=(const operation& x, const operation& y)
{ {
return !(x == y); return not(x == y);
} }
inline value inline value
......
...@@ -37,8 +37,10 @@ ...@@ -37,8 +37,10 @@
#include <migraphx/compile_options.hpp> #include <migraphx/compile_options.hpp>
#include <migraphx/argument.hpp> #include <migraphx/argument.hpp>
#include <migraphx/rank.hpp> #include <migraphx/rank.hpp>
#include <migraphx/module_ref.hpp>
#include <migraphx/support_metric.hpp> #include <migraphx/support_metric.hpp>
#include <migraphx/instruction_ref.hpp> #include <migraphx/instruction_ref.hpp>
#include <migraphx/supported_segments.hpp>
namespace migraphx { namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS { inline namespace MIGRAPHX_INLINE_NS {
...@@ -64,12 +66,12 @@ struct target ...@@ -64,12 +66,12 @@ struct target
*/ */
context get_context() const; context get_context() const;
/** /**
* @brief Check how well an instruction is supported on a target with the given metric * @brief Get the ranges of instructions that are supported on a target
* @param ins Instruction to check if it's supported * @param module Module to check for supported instructions
* @param metric Used to define how the return value should be interpreted * @param metric Used to define how the quality of the support should be measured
* @return The value based on the chosen metric. Negative numbers mean unsupported * @return the supported segments of the graph
*/ */
float is_supported(T&, instruction_ref ins, support_metric m) const; supported_segments target_is_supported(T&, const_module_ref mod, support_metric metric) const;
/** /**
* @brief copy an argument to the current target. * @brief copy an argument to the current target.
* *
...@@ -115,9 +117,9 @@ argument copy_from_target(T&, const argument& arg) ...@@ -115,9 +117,9 @@ argument copy_from_target(T&, const argument& arg)
} }
template <class T> template <class T>
float target_is_supported(T&, instruction_ref, support_metric) supported_segments target_find_supported(T&, const_module_ref, support_metric)
{ {
return 0; return {};
} }
<% <%
...@@ -125,7 +127,7 @@ interface('target', ...@@ -125,7 +127,7 @@ interface('target',
virtual('name', returns='std::string', const=True), virtual('name', returns='std::string', const=True),
virtual('get_passes', ctx='context&', options='const compile_options&', returns='std::vector<pass>', const=True), virtual('get_passes', ctx='context&', options='const compile_options&', returns='std::vector<pass>', const=True),
virtual('get_context', returns='context', const=True), virtual('get_context', returns='context', const=True),
virtual('is_supported', returns='float', ins='instruction_ref', m='support_metric', const=True, default='target_is_supported'), virtual('find_supported', returns='supported_segments', mod='const_module_ref', m='support_metric', const=True, default='target_find_supported'),
virtual('copy_to', virtual('copy_to',
returns = 'argument', returns = 'argument',
input = 'const argument&', input = 'const argument&',
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment