"docs/en_US/networkmorphismTuner.md" did not exist on "c76dcce59f1ad463e853871b88ec2d556f8a3939"
Commit 6be3baa1 authored by Alan Turner's avatar Alan Turner
Browse files

Merge

parents 3d4fb6ae 214b313f
......@@ -33,38 +33,6 @@
namespace migraphx {
template <class T>
struct implicit_conversion_op
{
T x;
template <index_int N, class U>
constexpr operator vec<U, N>() const
{
if constexpr(vec_size<T>() == 0)
{
return x;
}
else
{
static_assert(vec_size<T>() == N, "Vector mismatch size");
return __builtin_convertvector(x, vec<U, N>);
}
}
template <class U>
constexpr operator U() const
{
return x;
}
};
template <class T>
constexpr implicit_conversion_op<T> implicit_conversion(T x)
{
return {x};
}
template <class F, class T, class... Ts>
__device__ void pointwise_tensor(index idx, F f, T out, Ts... xs)
{
......
......@@ -185,5 +185,37 @@ constexpr auto vec_reduce(T x, Op op)
}
}
template <class T>
struct implicit_conversion_op
{
T x;
template <index_int N, class U>
constexpr operator vec<U, N>() const
{
if constexpr(vec_size<T>() == 0)
{
return x;
}
else
{
static_assert(vec_size<T>() == N, "Vector mismatch size");
return __builtin_convertvector(x, vec<U, N>);
}
}
template <class U>
constexpr operator U() const
{
return x;
}
};
template <class T>
constexpr implicit_conversion_op<T> implicit_conversion(T x)
{
return {x};
}
} // namespace migraphx
#endif // MIGRAPHX_GUARD_KERNELS_VEC_HPP
......@@ -51,17 +51,20 @@ struct layernorm_base
}
check_shapes{inputs, static_cast<const Derived&>(*this)}.has(nargs + N);
auto s = inputs.at(0);
auto t = s.type();
if(not mods.empty())
t = mods.front()->get_output_shapes().front().type();
if(s.scalar())
{
return s;
}
else if(s.broadcasted())
{
return {s.type(), s.lens()};
return {t, s.lens()};
}
else
{
return s.with_lens(s.lens());
return s.with_lens(t, s.lens());
}
}
};
......
......@@ -148,8 +148,6 @@ std::vector<pass> target::get_passes(migraphx::context& gctx, const compile_opti
dead_code_elimination{},
pack_int8_args{},
dead_code_elimination{},
adjust_allocation{gpu_allocation_model{}},
dead_code_elimination{},
fuse_ops{&ctx, options.fast_math},
dead_code_elimination{},
replace_allocate{gpu_allocation_model{}, options.offload_copy},
......
......@@ -49,6 +49,25 @@ TEST_CASE(literal_test)
EXPECT(l4.empty());
}
TEST_CASE(literal_nstd_shape_vector)
{
migraphx::shape nstd_shape{migraphx::shape::float_type, {1, 3, 2, 2}, {12, 1, 6, 3}};
std::vector<float> data(12);
std::iota(data.begin(), data.end(), 0);
auto l0 = migraphx::literal{nstd_shape, data};
// check data buffer is read in correctly
std::vector<float> expected_buffer = {0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11};
const auto* start = reinterpret_cast<const float*>(l0.data());
std::vector<float> l0_data{start, start + 12};
EXPECT(l0_data == expected_buffer);
// check that using visit() (that uses a tensor view) gives data in correct order
std::vector<float> results_vector(12);
l0.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(results_vector == data);
}
TEST_CASE(literal_os1)
{
migraphx::literal l{1};
......
......@@ -238,6 +238,64 @@ def averagepool_3d_test():
return ([node], [x], [out])
@onnx_test
def averagepool_dyn_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT,
[None, 3, 5, 5, 5])
out = helper.make_tensor_value_info('1', TensorProto.FLOAT,
[None, 3, 3, 3, 3])
node = onnx.helper.make_node('AveragePool',
inputs=['0'],
outputs=['1'],
kernel_shape=[3, 3, 3])
return ([node], [x], [out])
@onnx_test
def averagepool_dyn_autopad_error_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [None, 1, 5, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [None, 1, 5, 5])
node = onnx.helper.make_node('AveragePool',
inputs=['x'],
outputs=['y'],
kernel_shape=[2, 2],
auto_pad='SAME_LOWER')
return ([node], [x], [y])
@onnx_test
def averagepool_dyn_asym_padding_error_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [None, 1, 5, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [None, 1, 3, 3])
node = onnx.helper.make_node('AveragePool',
inputs=['x'],
outputs=['y'],
kernel_shape=[2, 2],
strides=[2, 2],
pads=[0, 0, 1, 1])
return ([node], [x], [y])
@onnx_test
def averagepool_dyn_cip_error_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [None, 1, 5, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [None, 1, 1, 1])
node = onnx.helper.make_node('AveragePool',
inputs=['x'],
outputs=['y'],
kernel_shape=[2, 2],
count_include_pad=1)
return ([node], [x], [y])
@onnx_test
def averagepool_notset_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 1, 5, 5])
......@@ -2107,6 +2165,21 @@ def globalavgpool_test():
return ([node], [x], [y])
@onnx_test
def globalavgpool_dyn_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT,
[None, 3, 16, 16])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [None, 3, 1, 1])
node = onnx.helper.make_node(
'GlobalAveragePool',
inputs=['0'],
outputs=['1'],
)
return ([node], [x], [y])
@onnx_test
def globallppool_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 16, 16])
......@@ -2121,6 +2194,21 @@ def globallppool_test():
return ([node], [x], [y])
@onnx_test
def globallppool_dyn_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT,
[1, 3, None, None])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 1, 1])
node = onnx.helper.make_node(
'GlobalLpPool',
inputs=['0'],
outputs=['1'],
)
return ([node], [x], [y])
@onnx_test
def globalmaxpool_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 16, 16])
......@@ -2135,6 +2223,21 @@ def globalmaxpool_test():
return ([node], [x], [y])
@onnx_test
def globalmaxpool_dyn_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT,
[None, 3, 32, 32])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [None, 3, 1, 1])
node = onnx.helper.make_node(
'GlobalMaxPool',
inputs=['0'],
outputs=['1'],
)
return ([node], [x], [y])
@onnx_test
def greater_test():
ax1 = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
......@@ -6315,6 +6418,21 @@ def transpose_test():
return ([node], [x], [y])
@onnx_test
def transpose_dyn_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [None, 2, 2, 3])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [None, 3, 2, 2])
node = onnx.helper.make_node(
'Transpose',
perm=[0, 3, 1, 2],
inputs=['0'],
outputs=['1'],
)
return ([node], [x], [y])
@onnx_test
def transpose_gather_test():
x = helper.make_tensor_value_info('data', TensorProto.FLOAT, [3, 5, 4, 6])
......
......@@ -273,6 +273,51 @@ TEST_CASE(averagepool_3d_test)
EXPECT(p == prog);
}
TEST_CASE(averagepool_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"0",
{migraphx::shape::float_type, {{1, 4, 0}, {3, 3, 0}, {5, 5, 0}, {5, 5, 0}, {5, 5, 0}}});
auto ret = mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::average},
{"padding", {0, 0, 0, 0, 0, 0}},
{"stride", {1, 1, 1}},
{"lengths", {3, 3, 3}}}),
l0);
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
auto prog = migraphx::parse_onnx("averagepool_dyn_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(averagepool_dyn_autopad_error_test)
{
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
EXPECT(test::throws(
[&] { migraphx::parse_onnx("averagepool_dyn_autopad_error_test.onnx", options); }));
}
TEST_CASE(averagepool_dyn_asym_padding_error_test)
{
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
EXPECT(test::throws(
[&] { migraphx::parse_onnx("averagepool_dyn_asym_padding_error_test.onnx", options); }));
}
TEST_CASE(averagepool_dyn_cip_error_test)
{
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
EXPECT(test::throws(
[&] { migraphx::parse_onnx("averagepool_dyn_cip_error_test.onnx", options); }));
}
TEST_CASE(averagepool_notset_test)
{
migraphx::program p;
......@@ -2144,6 +2189,28 @@ TEST_CASE(globalavgpool_test)
EXPECT(p == prog);
}
TEST_CASE(globalavgpool_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto input =
mm->add_parameter("0",
migraphx::shape{migraphx::shape::float_type,
{{1, 4, 0}, {3, 3, 0}, {16, 16, 0}, {16, 16, 0}}});
auto ret = mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::average},
{"lengths", {16, 16}},
{"padding", {0, 0, 0, 0}}}),
input);
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
auto prog = parse_onnx("globalavgpool_dyn_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(globallppool_test)
{
migraphx::program p;
......@@ -2161,6 +2228,29 @@ TEST_CASE(globallppool_test)
EXPECT(p == prog);
}
TEST_CASE(globallppool_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto input =
mm->add_parameter("0",
migraphx::shape{migraphx::shape::float_type,
{{1, 1, 0}, {3, 3, 0}, {16, 32, 0}, {16, 32, 0}}});
auto ret = mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::lpnorm},
{"dyn_global", true},
{"padding", {0, 0, 0, 0}},
{"lengths", {}}}),
input);
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {16, 32, 0};
auto prog = migraphx::parse_onnx("globallppool_dyn_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(globalmaxpool_test)
{
migraphx::program p;
......@@ -2178,6 +2268,28 @@ TEST_CASE(globalmaxpool_test)
EXPECT(p == prog);
}
TEST_CASE(globalmaxpool_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto input =
mm->add_parameter("0",
migraphx::shape{migraphx::shape::float_type,
{{1, 4, 0}, {3, 3, 0}, {32, 32, 0}, {32, 32, 0}}});
auto ret = mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::max},
{"lengths", {32, 32}},
{"padding", {0, 0, 0, 0}}}),
input);
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
auto prog = parse_onnx("globalmaxpool_dyn_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(greater_test)
{
migraphx::program p;
......@@ -5973,6 +6085,24 @@ TEST_CASE(transpose_test)
EXPECT(p == prog);
}
TEST_CASE(transpose_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto input = mm->add_parameter(
"0",
migraphx::shape{migraphx::shape::float_type, {{1, 4, 0}, {2, 2, 0}, {2, 2, 0}, {3, 3, 0}}});
std::vector<int64_t> perm{0, 3, 1, 2};
auto t0 = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", perm}}), input);
mm->add_return({t0});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
auto prog = migraphx::parse_onnx("transpose_dyn_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(topk_attrk_test)
{
migraphx::program p;
......
......@@ -1549,16 +1549,76 @@ TEST_CASE(nms_shape)
score_thres_s);
}
TEST_CASE(pooling_shape)
TEST_CASE(pooling_shape0)
{
migraphx::shape input{migraphx::shape::float_type, {4, 3, 3, 3}};
throws_shape(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::max},
{"padding", {1}},
{"stride", {0}},
{"lengths", {1}}}),
input);
}
TEST_CASE(pooling_shape1)
{
migraphx::shape input{migraphx::shape::float_type, {4, 3, 3, 3}};
migraphx::shape output{migraphx::shape::float_type, {4, 3, 1, 1}};
expect_shape(output,
migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::max},
{"padding", {0, 0}},
{"stride", {3, 3}},
{"lengths", {1, 1}}}),
input);
}
TEST_CASE(pooling_shape2)
{
migraphx::shape input{migraphx::shape::float_type, {4, 3, 3, 3}};
migraphx::shape output{migraphx::shape::float_type, {4, 3, 2, 2}};
expect_shape(output,
migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::max},
{"padding", {0, 0}},
{"stride", {3, 3}},
{"lengths", {1, 1}},
{"ceil_mode", true}}),
input);
}
TEST_CASE(pooling_shape3)
{
migraphx::shape input{migraphx::shape::float_type, {4, 3, 3, 3}};
migraphx::shape output{migraphx::shape::float_type, {4, 3, 3, 3}};
expect_shape(output,
migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::max},
{"padding", {2, 2}},
{"stride", {3, 3}},
{"lengths", {3, 3}},
{"ceil_mode", true}}),
input);
}
TEST_CASE(pooling_dyn_shape0)
{
migraphx::shape input{migraphx::shape::float_type,
{{1, 4, 0}, {3, 3, 3}, {3, 3, 3}, {3, 3, 0}}};
throws_shape(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::max},
{"padding", {1}},
{"stride", {0}},
{"lengths", {1}}}),
input);
}
TEST_CASE(pooling_dyn_shape1)
{
migraphx::shape input{migraphx::shape::float_type,
{{1, 4, 0}, {3, 3, 3}, {3, 3, 3}, {3, 3, 0}}};
migraphx::shape output{migraphx::shape::float_type,
{{1, 4, 0}, {3, 3, 3}, {1, 1, 1}, {1, 1, 0}}};
expect_shape(output,
migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::max},
......@@ -1566,9 +1626,15 @@ TEST_CASE(pooling_shape)
{"stride", {3, 3}},
{"lengths", {1, 1}}}),
input);
}
migraphx::shape output1{migraphx::shape::float_type, {4, 3, 2, 2}};
expect_shape(output1,
TEST_CASE(pooling_dyn_shape2)
{
migraphx::shape input{migraphx::shape::float_type,
{{1, 4, 0}, {5, 5, 0}, {3, 3, 3}, {3, 3, 0}}};
migraphx::shape output{migraphx::shape::float_type,
{{1, 4, 0}, {5, 5, 0}, {2, 2, 2}, {2, 2, 0}}};
expect_shape(output,
migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::max},
{"padding", {0, 0}},
......@@ -1578,6 +1644,37 @@ TEST_CASE(pooling_shape)
input);
}
TEST_CASE(pooling_dyn_shape3)
{
migraphx::shape input{migraphx::shape::float_type,
{{4, 4, 0}, {3, 3, 0}, {4, 12, 8}, {4, 12, 8}}};
migraphx::shape output{migraphx::shape::float_type,
{{4, 4, 0}, {3, 3, 0}, {2, 4, 3}, {2, 4, 3}}};
expect_shape(output,
migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::max},
{"padding", {0, 0}},
{"stride", {3, 3}},
{"lengths", {1, 1}}}),
input);
}
TEST_CASE(pooling_dyn_shape4)
{
migraphx::shape input{migraphx::shape::float_type,
{{4, 4, 0}, {3, 3, 0}, {4, 12, 8}, {4, 12, 8}}};
migraphx::shape output{migraphx::shape::float_type,
{{4, 4, 0}, {3, 3, 0}, {3, 6, 4}, {3, 6, 4}}};
expect_shape(output,
migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::max},
{"padding", {2, 2}},
{"stride", {3, 3}},
{"lengths", {3, 3}},
{"ceil_mode", true}}),
input);
}
TEST_CASE(prefix_scan_sum)
{
{
......@@ -2226,6 +2323,28 @@ TEST_CASE(transpose_shape)
throws_shape(migraphx::make_op("transpose", {{"permutation", {1, 2}}}), input);
}
TEST_CASE(transpose_dyn_shape0)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 4, 0}, {2, 2, 0}}};
migraphx::shape output{migraphx::shape::float_type, {{2, 2, 0}, {1, 4, 0}}};
expect_shape(input, migraphx::make_op("transpose", {{"permutation", {0, 1}}}), input);
expect_shape(output, migraphx::make_op("transpose", {{"permutation", {1, 0}}}), input);
}
TEST_CASE(transpose_dyn_shape1)
{
migraphx::shape input{migraphx::shape::float_type, {{1, 4, 0}, {4, 4, 0}, {4, 4, 0}}};
migraphx::shape output{migraphx::shape::float_type, {{4, 4, 0}, {4, 4, 0}, {1, 4, 0}}};
expect_shape(input, migraphx::make_op("transpose", {{"permutation", {0, 1, 2}}}), input);
expect_shape(output, migraphx::make_op("transpose", {{"permutation", {2, 1, 0}}}), input);
}
TEST_CASE(transpose_axes_error)
{
migraphx::shape input{migraphx::shape::float_type, {2, 2}};
throws_shape(migraphx::make_op("transpose", {{"permutation", {1}}}), input);
}
TEST_CASE(step_test)
{
migraphx::shape s1{migraphx::shape::float_type, {1, 2, 4}};
......
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment