Unverified Commit 913ae362 authored by Chris Austen's avatar Chris Austen Committed by GitHub
Browse files

Merge branch 'develop' into optimize

parents f1e16656 b8c8d09b
......@@ -383,9 +383,9 @@ struct ref_gemm
std::string name() const { return "ref::dot"; }
shape compute_shape(const std::vector<shape>& inputs) const { return op.compute_shape(inputs); }
argument compute(context&, const shape& output_shape, std::vector<argument> args) const
argument compute(context&, const dyn_output& dyn_out, std::vector<argument> args) const
{
argument result{output_shape};
argument result{dyn_out.computed_shape};
migemm(result, args[0], args[1], 1.0f, 0.0f);
return result;
......@@ -449,10 +449,10 @@ struct ref_softmax : auto_register_op<ref_softmax<Op>>
{
return op.normalize_compute_shape(inputs);
}
argument compute(context&, const shape& output_shape, std::vector<argument> args) const
argument compute(context&, const dyn_output& dyn_out, std::vector<argument> args) const
{
argument result{output_shape};
auto batch_lens = output_shape.lens();
argument result{dyn_out.computed_shape};
auto batch_lens = dyn_out.computed_shape.lens();
int64_t tuned_axis = tune_axis(args[0].get_shape().lens().size(), op.axis, op.name());
std::size_t n_dims = batch_lens[tuned_axis];
batch_lens[tuned_axis] = 1;
......@@ -475,7 +475,7 @@ struct ref_softmax : auto_register_op<ref_softmax<Op>>
for(std::size_t j = 0; j < n_dims; ++j)
{
idx[tuned_axis] = j;
std::size_t index = output_shape.index(idx);
std::size_t index = dyn_out.computed_shape.index(idx);
output[index] = std::exp(input[index] - batch_max[i]);
}
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <test.hpp>
#include <migraphx/argument.hpp>
#include <migraphx/gpu/hip.hpp>
#include <migraphx/gpu/target.hpp>
TEST_CASE(tuple_to_from_gpu)
{
migraphx::shape s1{migraphx::shape::float_type, {2, 3}};
migraphx::shape s2{migraphx::shape::int32_type, {2, 4}};
std::vector<float> p1_data = {1.1, 2.2, 3.3, 4.4, 5.5, 6.6};
std::vector<int> p2_data = {1, 2, 3, 4, 5, 6, 7, 8};
auto p1 = migraphx::argument{s1, p1_data.data()};
auto p2 = migraphx::argument{s2, p2_data.data()};
auto p1_gpu = migraphx::gpu::to_gpu(p1);
auto p2_gpu = migraphx::gpu::to_gpu(p2);
auto p_tuple = migraphx::gpu::from_gpu(migraphx::argument({p1_gpu, p2_gpu}));
std::vector<migraphx::argument> results = p_tuple.get_sub_objects();
std::vector<float> result1;
results[0].visit([&](auto output) { result1.assign(output.begin(), output.end()); });
std::vector<int> result2;
results[1].visit([&](auto output) { result2.assign(output.begin(), output.end()); });
EXPECT(result1 == p1_data);
EXPECT(result2 == p2_data);
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
......@@ -140,7 +140,7 @@ TEST_CASE(conv)
{
const std::string mlir_output = R"__migraphx__(
module {
func.func @main(%arg0: tensor<2x8x3x3xf32>, %arg1: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {kernel = "mixr"} {
func.func @main(%arg0: tensor<2x8x3x3xf32>, %arg1: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {arch = "", kernel = "mixr"} {
%0 = migraphx.convolution(%arg1, %arg0) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
return %0 : tensor<1x2x2x2xf32>
}
......@@ -163,7 +163,7 @@ TEST_CASE(conv_add_relu)
{
const std::string mlir_output = R"__migraphx__(
module {
func.func @main(%arg0: tensor<1x2x2x2xf32>, %arg1: tensor<2x8x3x3xf32>, %arg2: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {kernel = "mixr"} {
func.func @main(%arg0: tensor<1x2x2x2xf32>, %arg1: tensor<2x8x3x3xf32>, %arg2: tensor<1x8x4x4xf32>) -> tensor<1x2x2x2xf32> attributes {arch = "", kernel = "mixr"} {
%0 = migraphx.convolution(%arg2, %arg1) {dilation = [1, 1], group = 1 : i64, padding = [0, 0, 0, 0], padding_mode = 0 : i64, stride = [1, 1]} : (tensor<1x8x4x4xf32>, tensor<2x8x3x3xf32>) -> tensor<1x2x2x2xf32>
%1 = migraphx.add(%0, %arg0) : (tensor<1x2x2x2xf32>, tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
%2 = migraphx.relu(%1) : (tensor<1x2x2x2xf32>) -> tensor<1x2x2x2xf32>
......
......@@ -21,28 +21,31 @@
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#ifndef MIGRAPHX_GUARD_RTGLIB_INT_DIVIDE_HPP
#define MIGRAPHX_GUARD_RTGLIB_INT_DIVIDE_HPP
#include <migraphx/config.hpp>
#include <cmath>
#include <migraphx/instruction.hpp>
#include <migraphx/program.hpp>
#include <migraphx/make_op.hpp>
#include "test.hpp"
namespace migraphx {
inline namespace MIGRAPHX_INLINE_NS {
template <class R, class T, class U>
R floor_divide(T x, U y)
TEST_CASE(check_undefined)
{
return R(std::floor(double(x) / double(y)));
}
migraphx::module m;
auto und = m.add_instruction(migraphx::make_op("undefined"));
auto cov = m.add_instruction(
migraphx::make_op("convert", {{"target_type", migraphx::shape::half_type}}), und);
auto abs = m.add_instruction(migraphx::make_op("abs"), cov);
template <class R, class T, class U>
R ceil_divide(T x, U y)
{
return R(std::ceil(double(x) / double(y)));
}
migraphx::shape xs{migraphx::shape::float_type, {2, 3}};
std::vector<float> datax = {1, 2, 3, 4, 5, 6};
auto lit = m.add_literal(migraphx::literal(xs, datax));
auto mul = m.add_instruction(migraphx::make_op("mul"), lit, lit);
} // namespace MIGRAPHX_INLINE_NS
} // namespace migraphx
EXPECT(und->is_undefined());
EXPECT(cov->is_undefined());
EXPECT(abs->is_undefined());
EXPECT(not lit->is_undefined());
EXPECT(not mul->is_undefined());
}
#endif
int main(int argc, const char* argv[]) { test::run(argc, argv); }
......@@ -49,6 +49,25 @@ TEST_CASE(literal_test)
EXPECT(l4.empty());
}
TEST_CASE(literal_nstd_shape_vector)
{
migraphx::shape nstd_shape{migraphx::shape::float_type, {1, 3, 2, 2}, {12, 1, 6, 3}};
std::vector<float> data(12);
std::iota(data.begin(), data.end(), 0);
auto l0 = migraphx::literal{nstd_shape, data};
// check data buffer is read in correctly
std::vector<float> expected_buffer = {0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11};
const auto* start = reinterpret_cast<const float*>(l0.data());
std::vector<float> l0_data{start, start + 12};
EXPECT(l0_data == expected_buffer);
// check that using visit() (that uses a tensor view) gives data in correct order
std::vector<float> results_vector(12);
l0.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(results_vector == data);
}
TEST_CASE(literal_os1)
{
migraphx::literal l{1};
......
external_constant_test:¡
v0"Constant*g
value*[B const_tensorj)
locationexternal_constant_test.weightj
offset48j
length24p external_constant_testb
0

B
\ No newline at end of file
This diff is collapsed.
......@@ -181,6 +181,24 @@ TEST_CASE(argmax_test)
EXPECT(p == prog);
}
TEST_CASE(argmax_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"x",
migraphx::shape{migraphx::shape::float_type, {{1, 4, 0}, {4, 4, 0}, {5, 5, 0}, {6, 6, 0}}});
auto ins = mm->add_instruction(migraphx::make_op("argmax", {{"axis", 2}}), l0);
auto ret = mm->add_instruction(migraphx::make_op("squeeze", {{"axes", {2}}}), ins);
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
auto prog = parse_onnx("argmax_dyn_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(argmin_test)
{
migraphx::program p;
......@@ -273,6 +291,51 @@ TEST_CASE(averagepool_3d_test)
EXPECT(p == prog);
}
TEST_CASE(averagepool_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"0",
{migraphx::shape::float_type, {{1, 4, 0}, {3, 3, 0}, {5, 5, 0}, {5, 5, 0}, {5, 5, 0}}});
auto ret = mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::average},
{"padding", {0, 0, 0, 0, 0, 0}},
{"stride", {1, 1, 1}},
{"lengths", {3, 3, 3}}}),
l0);
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
auto prog = migraphx::parse_onnx("averagepool_dyn_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(averagepool_dyn_autopad_error_test)
{
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
EXPECT(test::throws(
[&] { migraphx::parse_onnx("averagepool_dyn_autopad_error_test.onnx", options); }));
}
TEST_CASE(averagepool_dyn_asym_padding_error_test)
{
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
EXPECT(test::throws(
[&] { migraphx::parse_onnx("averagepool_dyn_asym_padding_error_test.onnx", options); }));
}
TEST_CASE(averagepool_dyn_cip_error_test)
{
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
EXPECT(test::throws(
[&] { migraphx::parse_onnx("averagepool_dyn_cip_error_test.onnx", options); }));
}
TEST_CASE(averagepool_notset_test)
{
migraphx::program p;
......@@ -1755,6 +1818,16 @@ migraphx::program create_external_data_prog()
return p;
}
TEST_CASE(external_constant_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
mm->add_literal(migraphx::literal{{migraphx::shape::int64_type, {3}}, {0, 1, 2}});
auto prog = optimize_onnx("external_constant_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(external_data_test)
{
migraphx::program p = create_external_data_prog();
......@@ -1914,6 +1987,23 @@ TEST_CASE(flatten_nonstd_test)
EXPECT(p == prog);
}
TEST_CASE(flatten_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"0",
migraphx::shape{migraphx::shape::float_type, {{1, 4, 0}, {3, 3, 0}, {4, 4, 0}, {5, 5, 0}}});
auto c0 = mm->add_instruction(migraphx::make_op("contiguous"), l0);
auto ret = mm->add_instruction(migraphx::make_op("flatten", {{"axis", 2}}), c0);
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
auto prog = parse_onnx("flatten_dyn_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(floor_test)
{
migraphx::program p;
......@@ -2144,6 +2234,28 @@ TEST_CASE(globalavgpool_test)
EXPECT(p == prog);
}
TEST_CASE(globalavgpool_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto input =
mm->add_parameter("0",
migraphx::shape{migraphx::shape::float_type,
{{1, 4, 0}, {3, 3, 0}, {16, 16, 0}, {16, 16, 0}}});
auto ret = mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::average},
{"lengths", {16, 16}},
{"padding", {0, 0, 0, 0}}}),
input);
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
auto prog = parse_onnx("globalavgpool_dyn_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(globallppool_test)
{
migraphx::program p;
......@@ -2161,6 +2273,29 @@ TEST_CASE(globallppool_test)
EXPECT(p == prog);
}
TEST_CASE(globallppool_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto input =
mm->add_parameter("0",
migraphx::shape{migraphx::shape::float_type,
{{1, 1, 0}, {3, 3, 0}, {16, 32, 0}, {16, 32, 0}}});
auto ret = mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::lpnorm},
{"dyn_global", true},
{"padding", {0, 0, 0, 0}},
{"lengths", {}}}),
input);
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {16, 32, 0};
auto prog = migraphx::parse_onnx("globallppool_dyn_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(globalmaxpool_test)
{
migraphx::program p;
......@@ -2178,6 +2313,28 @@ TEST_CASE(globalmaxpool_test)
EXPECT(p == prog);
}
TEST_CASE(globalmaxpool_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto input =
mm->add_parameter("0",
migraphx::shape{migraphx::shape::float_type,
{{1, 4, 0}, {3, 3, 0}, {32, 32, 0}, {32, 32, 0}}});
auto ret = mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::max},
{"lengths", {32, 32}},
{"padding", {0, 0, 0, 0}}}),
input);
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
auto prog = parse_onnx("globalmaxpool_dyn_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(greater_test)
{
migraphx::program p;
......@@ -5492,6 +5649,23 @@ TEST_CASE(softmax_nonstd_input_test)
EXPECT(p == prog);
}
TEST_CASE(softmax_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"0",
migraphx::shape{migraphx::shape::float_type, {{1, 4, 0}, {3, 3, 0}, {4, 4, 0}, {4, 4, 0}}});
auto ret = mm->add_instruction(migraphx::make_op("softmax", {{"axis", -1}}), l0);
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
auto prog = migraphx::parse_onnx("softmax_dyn_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(softplus_test)
{
migraphx::program p;
......@@ -5690,6 +5864,29 @@ TEST_CASE(squeeze_unsqueeze_test)
EXPECT(p == prog);
}
TEST_CASE(squeeze_unsqueeze_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<int64_t> squeeze_axes{0, 2, 3, 5};
std::vector<int64_t> unsqueeze_axes{0, 1, 3, 5};
auto l0 = mm->add_parameter(
"0",
migraphx::shape{migraphx::shape::float_type,
{{1, 1, 0}, {1, 4, 0}, {1, 1, 0}, {1, 1, 0}, {1, 4, 0}, {1, 1, 0}}});
auto c0 = mm->add_instruction(migraphx::make_op("contiguous"), l0);
auto l1 = mm->add_instruction(migraphx::make_op("squeeze", {{"axes", squeeze_axes}}), c0);
auto c1 = mm->add_instruction(migraphx::make_op("contiguous"), l1);
auto ret = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", unsqueeze_axes}}), c1);
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
auto prog = parse_onnx("squeeze_unsqueeze_dyn_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(squeeze_axes_input_test)
{
migraphx::program p;
......@@ -5973,6 +6170,24 @@ TEST_CASE(transpose_test)
EXPECT(p == prog);
}
TEST_CASE(transpose_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto input = mm->add_parameter(
"0",
migraphx::shape{migraphx::shape::float_type, {{1, 4, 0}, {2, 2, 0}, {2, 2, 0}, {3, 3, 0}}});
std::vector<int64_t> perm{0, 3, 1, 2};
auto t0 = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), input);
mm->add_return({t0});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
auto prog = migraphx::parse_onnx("transpose_dyn_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(topk_attrk_test)
{
migraphx::program p;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment