"test/assets/vscode:/vscode.git/clone" did not exist on "9bd633e3f92f4bbfb6bc2eb46f407018703ef75e"
Commit b2f12dae authored by turneram's avatar turneram
Browse files

Merge remote-tracking branch 'origin/develop' into ck-gemm-int8

parents 05122cc7 ce2adafd
...@@ -81,7 +81,7 @@ struct roialign_compiler : compiler<roialign_compiler> ...@@ -81,7 +81,7 @@ struct roialign_compiler : compiler<roialign_compiler>
// coord_trans_mode // coord_trans_mode
auto ctm = v.at("coordinate_transformation_mode").to<std::string>(); auto ctm = v.at("coordinate_transformation_mode").to<std::string>();
float rois_offset = (ctm == "output_half_pixel") ? -0.5f : 0.0f; float rois_offset = (ctm == "half_pixel") ? -0.5f : 0.0f;
options.params += " -DROIS_OFFSET=" + std::to_string(rois_offset); options.params += " -DROIS_OFFSET=" + std::to_string(rois_offset);
// spatial_scale // spatial_scale
......
...@@ -682,11 +682,12 @@ struct mlir_program ...@@ -682,11 +682,12 @@ struct mlir_program
MIGRAPHX_THROW("Failed setting tuning key: " + *str); MIGRAPHX_THROW("Failed setting tuning key: " + *str);
} }
tuning_config get_tuning_config() MIGRAPHX_TIDY_CONST tuning_config get_tuning_config(bool exhaustive) MIGRAPHX_TIDY_CONST
{ {
tuning_config tc; tuning_config tc;
run_high_level_pipeline(); run_high_level_pipeline();
auto tuning_mode = RocmlirTuningParamSetKindFull; auto tuning_mode =
exhaustive ? RocmlirTuningParamSetKindFull : RocmlirTuningParamSetKindQuick;
if(enabled(MIGRAPHX_MLIR_TUNE_EXHAUSTIVE{})) if(enabled(MIGRAPHX_MLIR_TUNE_EXHAUSTIVE{}))
tuning_mode = RocmlirTuningParamSetKindExhaustive; tuning_mode = RocmlirTuningParamSetKindExhaustive;
mlir_tuning_space params{mlirRockTuningSpaceCreate(mmodule.get(), tuning_mode)}; mlir_tuning_space params{mlirRockTuningSpaceCreate(mmodule.get(), tuning_mode)};
...@@ -914,15 +915,17 @@ instruction_ref insert_mlir(module& m, ...@@ -914,15 +915,17 @@ instruction_ref insert_mlir(module& m,
return m.insert_instruction(ins, co, refs); return m.insert_instruction(ins, co, refs);
} }
tuning_config tuning_config get_tuning_config_mlir(const context& migraphx_ctx,
get_tuning_config_mlir(const context& migraphx_ctx, module m, const std::vector<shape>& inputs) module m,
const std::vector<shape>& inputs,
bool exhaustive)
{ {
adjust_param_shapes(m, inputs); adjust_param_shapes(m, inputs);
mlir_program mp; mlir_program mp;
mp.set_gpu_properties(migraphx_ctx); mp.set_gpu_properties(migraphx_ctx);
mp.parse(m); mp.parse(m);
return mp.get_tuning_config(); return mp.get_tuning_config(exhaustive);
} }
#else #else
...@@ -951,7 +954,7 @@ insert_mlir(module& m, instruction_ref, code_object_op co, const std::vector<ins ...@@ -951,7 +954,7 @@ insert_mlir(module& m, instruction_ref, code_object_op co, const std::vector<ins
return m.end(); return m.end();
} }
tuning_config get_tuning_config_mlir(const context&, module, const std::vector<shape>&) tuning_config get_tuning_config_mlir(const context&, module, const std::vector<shape>&, bool)
{ {
return {}; return {};
} }
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE. * THE SOFTWARE.
*/ */
#include <migraphx/permutation.hpp>
#include <migraphx/gpu/prefuse_ops.hpp> #include <migraphx/gpu/prefuse_ops.hpp>
#include <migraphx/match/layernorm.hpp> #include <migraphx/match/layernorm.hpp>
#include <migraphx/check_shapes.hpp> #include <migraphx/check_shapes.hpp>
...@@ -45,40 +46,42 @@ struct layernorm_base ...@@ -45,40 +46,42 @@ struct layernorm_base
} }
shape compute_shape(std::vector<shape> inputs, std::vector<module_ref> mods) const shape compute_shape(std::vector<shape> inputs, std::vector<module_ref> mods) const
{ {
std::size_t nargs = 1; std::size_t nargs = N;
if(not mods.empty()) if(not mods.empty())
{ {
auto* pm = mods.front(); auto* pm = mods.front();
nargs = pm->get_parameter_names().size(); nargs += pm->get_parameter_names().size() - 1;
} }
check_shapes{inputs, static_cast<const Derived&>(*this)}.has(nargs + N); check_shapes{inputs, static_cast<const Derived&>(*this)}.has(nargs);
auto s = inputs.at(0); auto s = inputs.front();
auto t = s.type(); auto t = s.type();
if(not mods.empty()) if(not mods.empty())
t = mods.front()->get_output_shapes().front().type(); t = mods.front()->get_output_shapes().front().type();
if(s.scalar())
{ // Scalar output if all inputs are scalar
return s; if(inputs.front().elements() == 1 and
} all_of(inputs, [](const auto& ss) { return ss.scalar(); }))
else if(s.broadcasted()) return inputs.front();
{ auto l_s = shape::from_permutation(
return {t, s.lens()}; t, s.lens(), find_permutation(std::vector<shape>(inputs.begin(), inputs.begin() + N)));
} // just prelayernorm or preadd_layernorm
else if(nargs <= N)
{ return l_s;
return s.with_lens(t, s.lens()); // else, layernorm + pointwise fusion, preserve layout of fused op
} std::vector<shape> lp_s(inputs.begin() + N, inputs.end());
lp_s.insert(lp_s.begin(), l_s);
return shape::from_permutation(t, s.lens(), find_permutation(lp_s));
} }
}; };
struct layernorm : layernorm_base<layernorm, 0> struct layernorm : layernorm_base<layernorm, 1>
{ {
std::string name() const { return "gpu::prelayernorm"; } std::string name() const { return "gpu::prelayernorm"; }
}; };
MIGRAPHX_REGISTER_OP(layernorm); MIGRAPHX_REGISTER_OP(layernorm);
struct add_layernorm : layernorm_base<add_layernorm, 1> struct add_layernorm : layernorm_base<add_layernorm, 2>
{ {
std::string name() const { return "gpu::preadd_layernorm"; } std::string name() const { return "gpu::preadd_layernorm"; }
}; };
......
ae74a517b62baa6d973e46b5b51ac9a640512c46 377f959c69e9f213cd4a8c71a5e80162a412989a
constant_no_attributes_test:)
"Constantconstant_no_attributes_testB
\ No newline at end of file
constant_value_int_test:7
"Constant*
value_int@ constant_value_int_testB
\ No newline at end of file
constant_value_ints_test:=
!"Constant*
value_ints@@@ constant_value_ints_testB
\ No newline at end of file
...@@ -825,6 +825,76 @@ def constant_test(): ...@@ -825,6 +825,76 @@ def constant_test():
return ([node], [], [y]) return ([node], [], [y])
@onnx_test()
def constant_value_float_test():
node = onnx.helper.make_node('Constant',
inputs=[],
outputs=[],
value_float=[1.0])
return ([node], [], [])
@onnx_test()
def constant_value_floats_test():
node = onnx.helper.make_node('Constant',
inputs=[],
outputs=[],
value_floats=[1.0, 2.0, 3.0])
return ([node], [], [])
@onnx_test()
def constant_value_int_test():
node = onnx.helper.make_node('Constant',
inputs=[],
outputs=[],
value_int=[1])
return ([node], [], [])
@onnx_test()
def constant_value_ints_test():
node = onnx.helper.make_node('Constant',
inputs=[],
outputs=[],
value_ints=[1, 2, 3])
return ([node], [], [])
@onnx_test()
def constant_no_attributes_test():
node = onnx.helper.make_node('Constant', inputs=[], outputs=[])
return ([node], [], [])
@onnx_test()
def constant_multiple_attributes_test():
x = np.array([0, 1, 2])
node = onnx.helper.make_node('Constant',
inputs=[],
outputs=[],
value_floats=[1.0, 2.0],
value_ints=[1, 2],
value=onnx.helper.make_tensor(
name='const_tensor',
data_type=TensorProto.FLOAT,
dims=x.shape,
vals=x.flatten().astype(float)))
return ([node], [], [])
@onnx_test() @onnx_test()
def constant_fill_test(): def constant_fill_test():
value = helper.make_tensor_value_info('value', TensorProto.FLOAT, [2, 3]) value = helper.make_tensor_value_info('value', TensorProto.FLOAT, [2, 3])
......
...@@ -930,6 +930,58 @@ TEST_CASE(constant_test) ...@@ -930,6 +930,58 @@ TEST_CASE(constant_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(constant_value_float_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
mm->add_literal(migraphx::literal{migraphx::shape{migraphx::shape::float_type, {1}}, {1.0f}});
auto prog = optimize_onnx("constant_value_float_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(constant_value_floats_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
mm->add_literal(
migraphx::literal{migraphx::shape{migraphx::shape::float_type, {3}}, {1.0f, 2.0f, 3.0f}});
auto prog = optimize_onnx("constant_value_floats_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(constant_value_int_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
mm->add_literal(migraphx::literal{migraphx::shape{migraphx::shape::int64_type, {1}}, {1}});
auto prog = optimize_onnx("constant_value_int_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(constant_value_ints_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
mm->add_literal(
migraphx::literal{migraphx::shape{migraphx::shape::int64_type, {3}}, {1, 2, 3}});
auto prog = optimize_onnx("constant_value_ints_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(constant_no_attributes_test)
{
EXPECT(test::throws([&] { optimize_onnx("constant_no_attributes_test.onnx"); }));
}
TEST_CASE(constant_multiple_attributes_test)
{
EXPECT(test::throws([&] { optimize_onnx("constant_multiple_attributes_test.onnx"); }));
}
TEST_CASE(constant_fill_test) TEST_CASE(constant_fill_test)
{ {
migraphx::program p; migraphx::program p;
...@@ -5899,7 +5951,13 @@ TEST_CASE(roialign_default_test) ...@@ -5899,7 +5951,13 @@ TEST_CASE(roialign_default_test)
auto rois = mm->add_parameter("rois", srois); auto rois = mm->add_parameter("rois", srois);
auto bi = mm->add_parameter("batch_ind", sbi); auto bi = mm->add_parameter("batch_ind", sbi);
auto r = mm->add_instruction(migraphx::make_op("roialign"), x, rois, bi); // Due to the onnx model using opset 12, the coordinate_transformation_mode should be set to
// output_half_pixel
auto r = mm->add_instruction(
migraphx::make_op("roialign", {{"coordinate_transformation_mode", "output_half_pixel"}}),
x,
rois,
bi);
mm->add_return({r}); mm->add_return({r});
auto prog = migraphx::parse_onnx("roialign_default_test.onnx"); auto prog = migraphx::parse_onnx("roialign_default_test.onnx");
......
...@@ -890,6 +890,50 @@ TEST_CASE(flatten_dyn_axis4) ...@@ -890,6 +890,50 @@ TEST_CASE(flatten_dyn_axis4)
input); input);
} }
TEST_CASE(fill_static_int)
{
migraphx::shape default_value{migraphx::shape::int64_type, {1}, {0}};
migraphx::shape data{migraphx::shape::int64_type, {3, 4, 4}};
expect_shape(migraphx::shape{migraphx::shape::int64_type, {3, 4, 4}},
migraphx::make_op("fill"),
default_value,
data);
}
TEST_CASE(fill_static_float)
{
migraphx::shape default_value{migraphx::shape::float_type, {1}, {0}};
migraphx::shape data{migraphx::shape::float_type, {4, 8}};
expect_shape(migraphx::shape{migraphx::shape::float_type, {4, 8}},
migraphx::make_op("fill"),
default_value,
data);
}
TEST_CASE(fill_dyn_int)
{
migraphx::shape default_value{migraphx::shape::int64_type, {1}, {0}};
migraphx::shape data{migraphx::shape::int64_type,
{{1, 4}, {4, 8, {4, 6, 8}}, {4, 8, {4, 6, 8}}}};
expect_shape(migraphx::shape{migraphx::shape::int64_type,
{{1, 4}, {4, 8, {4, 6, 8}}, {4, 8, {4, 6, 8}}}},
migraphx::make_op("fill"),
default_value,
data);
}
TEST_CASE(fill_dyn_float)
{
migraphx::shape default_value{migraphx::shape::float_type, {1}, {0}};
migraphx::shape data{migraphx::shape::float_type,
{{1, 4}, {4, 8, {4, 6, 8}}, {4, 8, {4, 6, 8}}}};
expect_shape(migraphx::shape{migraphx::shape::float_type,
{{1, 4}, {4, 8, {4, 6, 8}}, {4, 8, {4, 6, 8}}}},
migraphx::make_op("fill"),
default_value,
data);
}
TEST_CASE(gather) TEST_CASE(gather)
{ {
{ {
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <migraphx/literal.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/module.hpp>
#include <migraphx/optimize_module.hpp>
#include <migraphx/pass_manager.hpp>
#include <migraphx/serialize.hpp>
#include <test.hpp>
void run_pass(migraphx::module& m) { migraphx::run_passes(m, {migraphx::optimize_module{}}); }
TEST_CASE(broadcast_transpose_inner_broadcast)
{
// first optimizes broadcast+transpose to just a broadcast,
// then finds inner broadcast to become mul+broadcast
migraphx::module m1;
{
auto l1 = m1.add_parameter("x", {migraphx::shape::float_type, {1}, {0}});
auto l2 = m1.add_parameter("y", {migraphx::shape::float_type, {1}, {0}});
auto mb1 =
m1.add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {2, 2, 3}}}), l1);
auto mb2 =
m1.add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {2, 3, 2}}}), l2);
auto t1 =
m1.add_instruction(migraphx::make_op("transpose", {{"permutation", {0, 2, 1}}}), mb1);
auto mul = m1.add_instruction(migraphx::make_op("mul"), mb2, t1);
m1.add_return({mul});
}
run_pass(m1);
migraphx::module m2;
{
auto l1 = m2.add_parameter("x", {migraphx::shape::float_type, {1}, {0}});
auto l2 = m2.add_parameter("y", {migraphx::shape::float_type, {1}, {0}});
auto mul = m2.add_instruction(migraphx::make_op("mul"), l2, l1);
auto mb =
m2.add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {2, 3, 2}}}), mul);
m2.add_return({mb});
}
EXPECT(m1 == m2);
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
...@@ -108,6 +108,34 @@ def disabled_tests_onnx_1_12_0(backend_test): ...@@ -108,6 +108,34 @@ def disabled_tests_onnx_1_12_0(backend_test):
backend_test.exclude(r'test_scatter_elements_with_duplicate_indices_cpu') backend_test.exclude(r'test_scatter_elements_with_duplicate_indices_cpu')
def disabled_tests_onnx_1_13_0(backend_test):
# The following tests fail due to the CastLike operator being unsupported
backend_test.exclude(r'test_elu_default_expanded_ver18_cpu')
backend_test.exclude(r'test_elu_example_expanded_ver18_cpu')
backend_test.exclude(r'test_elu_expanded_ver18_cpu')
backend_test.exclude(r'test_hardsigmoid_default_expanded_ver18_cpu')
backend_test.exclude(r'test_hardsigmoid_example_expanded_ver18_cpu')
backend_test.exclude(r'test_hardsigmoid_expanded_ver18_cpu')
backend_test.exclude(r'test_leakyrelu_default_expanded_cpu')
backend_test.exclude(r'test_leakyrelu_example_expanded_cpu')
backend_test.exclude(r'test_leakyrelu_expanded_cpu')
backend_test.exclude(r'test_selu_default_expanded_ver18_cpu')
backend_test.exclude(r'test_selu_example_expanded_ver18_cpu')
backend_test.exclude(r'test_selu_expanded_ver18_cpu')
backend_test.exclude(r'test_thresholdedrelu_default_expanded_ver18_cpu')
backend_test.exclude(r'test_thresholdedrelu_example_expanded_ver18_cpu')
backend_test.exclude(r'test_thresholdedrelu_expanded_ver18_cpu')
backend_test.exclude(r'test_relu_expanded_ver18_cpu')
backend_test.exclude(r'test_softsign_example_expanded_ver18_cpu')
backend_test.exclude(r'test_softsign_expanded_ver18_cpu')
def disabled_tests_onnx_1_14_0(backend_test):
# The following tests fail due to the CastLike operator being unsupported
backend_test.exclude(r'test_softplus_example_expanded_ver18_cpu')
backend_test.exclude(r'test_softplus_expanded_ver18_cpu')
def create_backend_test(testname=None, target_device=None): def create_backend_test(testname=None, target_device=None):
if target_device is not None: if target_device is not None:
c2.set_device(target_device) c2.set_device(target_device)
...@@ -334,6 +362,12 @@ def create_backend_test(testname=None, target_device=None): ...@@ -334,6 +362,12 @@ def create_backend_test(testname=None, target_device=None):
if version.parse(onnx.__version__) >= version.parse("1.12.0"): if version.parse(onnx.__version__) >= version.parse("1.12.0"):
disabled_tests_onnx_1_12_0(backend_test) disabled_tests_onnx_1_12_0(backend_test)
if version.parse(onnx.__version__) >= version.parse("1.13.0"):
disabled_tests_onnx_1_13_0(backend_test)
if version.parse(onnx.__version__) >= version.parse("1.14.0"):
disabled_tests_onnx_1_14_0(backend_test)
# import all test cases at global scope to make # import all test cases at global scope to make
# them visible to python.unittest. # them visible to python.unittest.
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#include <migraphx/instruction.hpp> #include <migraphx/instruction.hpp>
#include <migraphx/literal.hpp> #include <migraphx/literal.hpp>
#include <migraphx/make_op.hpp> #include <migraphx/make_op.hpp>
#include <migraphx/onnx.hpp> #include <migraphx/program.hpp>
#include <migraphx/register_target.hpp> #include <migraphx/register_target.hpp>
#include <migraphx/verify.hpp> #include <migraphx/verify.hpp>
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#include <migraphx/instruction.hpp> #include <migraphx/instruction.hpp>
#include <migraphx/literal.hpp> #include <migraphx/literal.hpp>
#include <migraphx/make_op.hpp> #include <migraphx/make_op.hpp>
#include <migraphx/onnx.hpp> #include <migraphx/program.hpp>
#include <migraphx/register_target.hpp> #include <migraphx/register_target.hpp>
#include <migraphx/verify.hpp> #include <migraphx/verify.hpp>
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#include <migraphx/instruction.hpp> #include <migraphx/instruction.hpp>
#include <migraphx/literal.hpp> #include <migraphx/literal.hpp>
#include <migraphx/make_op.hpp> #include <migraphx/make_op.hpp>
#include <migraphx/onnx.hpp> #include <migraphx/program.hpp>
#include <migraphx/register_target.hpp> #include <migraphx/register_target.hpp>
#include <migraphx/verify.hpp> #include <migraphx/verify.hpp>
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#include <migraphx/instruction.hpp> #include <migraphx/instruction.hpp>
#include <migraphx/literal.hpp> #include <migraphx/literal.hpp>
#include <migraphx/make_op.hpp> #include <migraphx/make_op.hpp>
#include <migraphx/onnx.hpp> #include <migraphx/program.hpp>
#include <migraphx/quantization.hpp> #include <migraphx/quantization.hpp>
#include <migraphx/register_target.hpp> #include <migraphx/register_target.hpp>
#include <migraphx/verify.hpp> #include <migraphx/verify.hpp>
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#include <migraphx/instruction.hpp> #include <migraphx/instruction.hpp>
#include <migraphx/literal.hpp> #include <migraphx/literal.hpp>
#include <migraphx/make_op.hpp> #include <migraphx/make_op.hpp>
#include <migraphx/onnx.hpp> #include <migraphx/program.hpp>
#include <migraphx/register_target.hpp> #include <migraphx/register_target.hpp>
#include <migraphx/verify.hpp> #include <migraphx/verify.hpp>
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment