Commit 2f268bc2 authored by Paul's avatar Paul
Browse files

Merge branch 'develop' into mlir-c

parents f75c5a38 aa7ff911
scatter_mul_test:
V
data
indices
updatey"ScatterElements*
axis*
reduction"mulscatter_mul_testZ
data




Z!
indices




Z
update




b
y




B
\ No newline at end of file
scatter_none_test:
W
data
indices
updatey"ScatterElements*
axis*
reduction"nonescatter_none_testZ
data




Z!
indices




Z
update




b
y




B
\ No newline at end of file
......@@ -581,6 +581,33 @@ TEST_CASE(mean_test)
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(mean_integral_test)
{
migraphx::program p = migraphx::parse_onnx("mean_integral_test.onnx");
p.compile(migraphx::ref::target{});
migraphx::shape s{migraphx::shape::int32_type, {2, 2, 2}};
const int num_elms = 8;
const int num_data = 10;
const std::vector<int> scalars{1, 5, 14, 2, 6, 21, 101, 0, -4, -11};
std::vector<std::vector<int>> data;
std::transform(scalars.begin(), scalars.end(), std::back_inserter(data), [&](const auto i) {
return std::vector<int>(num_elms, i);
});
migraphx::parameter_map pp;
for(std::size_t i = 0; i < num_data; ++i)
pp[std::to_string(i)] = migraphx::argument(s, data[i].data());
auto result = p.eval(pp).back();
std::vector<double> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
const auto mean = std::accumulate(scalars.begin(), scalars.end(), 0) / num_data;
std::vector<int> gold(num_elms, mean);
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(nonzero_test)
{
migraphx::program p = migraphx::parse_onnx("nonzero_dynamic_test.onnx");
......@@ -698,6 +725,69 @@ TEST_CASE(resize_upsample_pf_test)
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(reversesequence_4D_verify_test)
{
migraphx::program p = migraphx::parse_onnx("reversesequence_4D_test.onnx");
p.compile(migraphx::ref::target{});
migraphx::shape xs{migraphx::shape::float_type, {2, 2, 2, 2}};
std::vector<float> x_data = {
0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0};
migraphx::parameter_map param_map;
param_map["x"] = migraphx::argument(xs, x_data.data());
auto result = p.eval(param_map).back();
std::vector<float> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {
8.0, 9.0, 10.0, 11.0, 4.0, 5.0, 6.0, 7.0, 0.0, 1.0, 2.0, 3.0, 12.0, 13.0, 14.0, 15.0};
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(reversesequence_batch_verify_test)
{
migraphx::program p = migraphx::parse_onnx("reversesequence_batch_test.onnx");
p.compile(migraphx::ref::target{});
migraphx::shape xs{migraphx::shape::float_type, {4, 4}};
std::vector<float> x_data = {
0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0};
migraphx::parameter_map param_map;
param_map["x"] = migraphx::argument(xs, x_data.data());
auto result = p.eval(param_map).back();
std::vector<float> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {
0.0, 1.0, 2.0, 3.0, 5.0, 4.0, 6.0, 7.0, 10.0, 9.0, 8.0, 11.0, 15.0, 14.0, 13.0, 12.0};
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(reversesequence_time_verify_test)
{
migraphx::program p = migraphx::parse_onnx("reversesequence_time_test.onnx");
p.compile(migraphx::ref::target{});
migraphx::shape xs{migraphx::shape::float_type, {4, 4}};
std::vector<float> x_data = {
0.0, 4.0, 8.0, 12.0, 1.0, 5.0, 9.0, 13.0, 2.0, 6.0, 10.0, 14.0, 3.0, 7.0, 11.0, 15.0};
migraphx::parameter_map param_map;
param_map["x"] = migraphx::argument(xs, x_data.data());
auto result = p.eval(param_map).back();
std::vector<float> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {
3.0, 6.0, 9.0, 12.0, 2.0, 5.0, 8.0, 13.0, 1.0, 4.0, 10.0, 14.0, 0.0, 7.0, 11.0, 15.0};
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(selu_test)
{
migraphx::program p = migraphx::parse_onnx("selu_test.onnx");
......
......@@ -178,6 +178,7 @@ def create_backend_test(testname=None, target_device=None):
backend_test.include(r'.*test_reduce.*')
backend_test.include(r'.*test_ReLU*')
backend_test.include(r'.*test_relu.*')
#backend_test.include(r'.*test_reversesequence.*')
backend_test.include(r'.*test_RoiAlign*')
backend_test.include(r'.*test_roialign.*')
backend_test.include(r'.*test_scatter.*')
......@@ -267,9 +268,6 @@ def create_backend_test(testname=None, target_device=None):
backend_test.exclude(r'test_expand_shape_model2_cpu')
backend_test.exclude(r'test_expand_shape_model3_cpu')
backend_test.exclude(r'test_expand_shape_model4_cpu')
backend_test.exclude(r'test_gathernd_example_float32_cpu')
backend_test.exclude(r'test_gathernd_example_int32_batch_dim1_cpu')
backend_test.exclude(r'test_gathernd_example_int32_cpu')
backend_test.exclude(r'test_identity_sequence_cpu')
backend_test.exclude(r'test_maxpool_2d_uint8_cpu')
backend_test.exclude(r'test_negative_log_likelihood_loss_*')
......
import migraphx
import migraphx, array, sys
def create_buffer(t, data, shape):
a = array.array(t, data)
m = memoryview(a.tobytes())
return m.cast(t, shape)
def test_add_op():
p = migraphx.program()
mm = p.get_main_module()
param_shape = migraphx.shape(lens=[3, 3], type="float")
x = mm.add_parameter("x", param_shape)
y = mm.add_parameter("y", param_shape)
x = mm.add_literal(create_buffer('f', [1.0] * 9, (3, 3)))
y = mm.add_literal(create_buffer('f', [2.0] * 9, (3, 3)))
add_op = mm.add_instruction(migraphx.op("add"), [x, y])
mm.add_return([add_op])
p.compile(migraphx.get_target("ref"))
params = {}
params["x"] = migraphx.generate_argument(param_shape)
params["y"] = migraphx.generate_argument(param_shape)
output = p.run(params)[-1].tolist()
assert output == [
a + b for a, b in zip(params["x"].tolist(), params["y"].tolist())
]
assert output == list([3.0] * 9)
def test_if_then_else():
......@@ -60,5 +61,6 @@ def test_if_then_else():
if __name__ == "__main__":
test_add_op()
if sys.version_info >= (3, 0):
test_add_op()
test_if_then_else()
import migraphx, sys
try:
import numpy as np
except:
sys.exit()
def test_add_op():
p = migraphx.program()
mm = p.get_main_module()
x = mm.add_literal(np.ones((3, 3), dtype='float32'))
y = mm.add_literal(2 * np.ones((3, 3), dtype='float32'))
add_op = mm.add_instruction(migraphx.op("add"), [x, y])
mm.add_return([add_op])
p.compile(migraphx.get_target("ref"))
params = {}
output = p.run(params)[-1].tolist()
assert output == list(3 * np.ones((9), dtype='float32'))
if __name__ == "__main__":
test_add_op()
......@@ -109,6 +109,29 @@ TEST_CASE(transposed1)
EXPECT(eshapes == rshapes);
}
TEST_CASE(non_packed_empty1)
{
std::vector<migraphx::shape> ishapes = {make_shape({1, 12}, {589824, 64})};
std::vector<migraphx::shape> eshapes = {make_shape({12}, {64})};
auto rshapes = migraphx::reduce_dims(ishapes);
EXPECT(eshapes == rshapes);
}
TEST_CASE(non_packed_empty2)
{
std::vector<migraphx::shape> ishapes = {make_shape({12, 1}, {64, 589824})};
std::vector<migraphx::shape> eshapes = {make_shape({12}, {64})};
auto rshapes = migraphx::reduce_dims(ishapes);
EXPECT(eshapes == rshapes);
}
TEST_CASE(single_dim)
{
std::vector<migraphx::shape> ishapes = {make_shape({1}, {1})};
auto rshapes = migraphx::reduce_dims(ishapes);
EXPECT(ishapes == rshapes);
}
TEST_CASE(empty)
{
auto rshapes = migraphx::reduce_dims({});
......
......@@ -1653,6 +1653,203 @@ TEST_CASE(gather_test)
}
}
TEST_CASE(gathernd_test)
{
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape ds{migraphx::shape::float_type, {2, 2}};
migraphx::shape is{migraphx::shape::int64_type, {2, 2}};
std::vector<float> data_vec(2 * 2);
std::iota(data_vec.begin(), data_vec.end(), 0);
std::vector<int64_t> indices_vec{0, 0, 1, 1};
auto data = mm->add_literal(migraphx::literal{ds, data_vec});
auto indices = mm->add_literal(migraphx::literal{is, indices_vec});
mm->add_instruction(migraphx::make_op("gathernd"), data, indices);
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> res_data{};
std::vector<float> gold{0, 3};
result.visit([&](auto output) { res_data.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(res_data, gold));
}
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape ds{migraphx::shape::float_type, {2, 2}};
migraphx::shape is{migraphx::shape::int64_type, {2, 1}};
std::vector<float> data_vec(2 * 2);
std::iota(data_vec.begin(), data_vec.end(), 0);
std::vector<int64_t> indices_vec{1, 0};
auto data = mm->add_literal(migraphx::literal{ds, data_vec});
auto indices = mm->add_literal(migraphx::literal{is, indices_vec});
mm->add_instruction(migraphx::make_op("gathernd"), data, indices);
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> res_data{};
std::vector<float> gold{2, 3, 0, 1};
result.visit([&](auto output) { res_data.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(res_data, gold));
}
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape ds{migraphx::shape::float_type, {2, 3, 1}};
migraphx::shape is{migraphx::shape::int64_type, {2, 2, 1}};
std::vector<float> data_vec(2 * 3 * 1);
std::iota(data_vec.begin(), data_vec.end(), 0);
std::vector<int64_t> indices_vec{1, 0, 0, 1};
auto data = mm->add_literal(migraphx::literal{ds, data_vec});
auto indices = mm->add_literal(migraphx::literal{is, indices_vec});
mm->add_instruction(migraphx::make_op("gathernd"), data, indices);
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> res_data{};
std::vector<float> gold{3, 4, 5, 0, 1, 2, 0, 1, 2, 3, 4, 5};
result.visit([&](auto output) { res_data.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(res_data, gold));
}
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape ds{migraphx::shape::float_type, {2, 3, 2, 3}};
migraphx::shape is{migraphx::shape::int64_type, {2, 2, 2}};
std::vector<float> data_vec(2 * 3 * 2 * 3);
std::iota(data_vec.begin(), data_vec.end(), 0);
std::vector<int64_t> indices_vec{0, 0, 0, 1, 0, 0, 0, 1};
const int batch_dims = 1;
auto data = mm->add_literal(migraphx::literal{ds, data_vec});
auto indices = mm->add_literal(migraphx::literal{is, indices_vec});
mm->add_instruction(
migraphx::make_op("gathernd", {{"batch_dims", batch_dims}}), data, indices);
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> res_data{};
std::vector<float> gold{0, 1, 2, 3, 4, 5, 18, 19, 20, 21, 22, 23};
result.visit([&](auto output) { res_data.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(res_data, gold));
}
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape ds{migraphx::shape::float_type, {2, 3, 1, 3}};
migraphx::shape is{migraphx::shape::int64_type, {2, 3, 2}};
std::vector<float> data_vec(2 * 3 * 1 * 3);
std::iota(data_vec.begin(), data_vec.end(), 0);
std::vector<int64_t> indices_vec{0, 0, 0, 1, 0, 2, 0, 2, 0, 1, 0, 0};
const int batch_dims = 2;
auto data = mm->add_literal(migraphx::literal{ds, data_vec});
auto indices = mm->add_literal(migraphx::literal{is, indices_vec});
mm->add_instruction(
migraphx::make_op("gathernd", {{"batch_dims", batch_dims}}), data, indices);
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> res_data{};
std::vector<float> gold{0, 4, 8, 11, 13, 15};
result.visit([&](auto output) { res_data.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(res_data, gold));
}
{
// k > r - batch_dims
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape ds{migraphx::shape::float_type, {2, 3, 1, 3}};
migraphx::shape is{migraphx::shape::int64_type, {2, 3, 3}};
std::vector<float> data_vec(2 * 3 * 1 * 3);
std::iota(data_vec.begin(), data_vec.end(), 0);
std::vector<int64_t> indices_vec(2 * 3 * 3, 0);
const int batch_dims = 2;
auto data = mm->add_literal(migraphx::literal{ds, data_vec});
auto indices = mm->add_literal(migraphx::literal{is, indices_vec});
EXPECT(test::throws([&] {
mm->add_instruction(
migraphx::make_op("gathernd", {{"batch_dims", batch_dims}}), data, indices);
}));
}
}
TEST_CASE(gathernd_negative_index_test)
{
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape ds{migraphx::shape::float_type, {2, 2}};
migraphx::shape is{migraphx::shape::int64_type, {2, 1, 1}};
std::vector<float> data_vec(2 * 2);
std::iota(data_vec.begin(), data_vec.end(), 0);
std::vector<int64_t> indices_vec{-1, 0};
auto data = mm->add_literal(migraphx::literal{ds, data_vec});
auto indices = mm->add_literal(migraphx::literal{is, indices_vec});
mm->add_instruction(migraphx::make_op("gathernd"), data, indices);
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> res_data{};
std::vector<float> gold{2, 3, 0, 1};
result.visit([&](auto output) { res_data.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(res_data, gold));
}
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape ds{migraphx::shape::float_type, {2, 2}};
migraphx::shape is{migraphx::shape::int64_type, {2, 1, 1}};
std::vector<float> data_vec(2 * 2);
std::iota(data_vec.begin(), data_vec.end(), 0);
std::vector<int64_t> indices_vec{-3, 0};
auto data = mm->add_literal(migraphx::literal{ds, data_vec});
auto indices = mm->add_literal(migraphx::literal{is, indices_vec});
mm->add_instruction(migraphx::make_op("gathernd"), data, indices);
p.compile(migraphx::ref::target{});
EXPECT(test::throws([&] { p.eval({}); }));
}
}
TEST_CASE(globalavgpool_test)
{
migraphx::program p;
......@@ -1674,6 +1871,28 @@ TEST_CASE(globalavgpool_test)
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(globallppool_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto s = migraphx::shape{migraphx::shape::float_type, {1, 3, 2, 2}};
auto op = migraphx::op::pooling{migraphx::op::pooling_mode::lpnorm};
auto lens = s.lens();
op.lengths = {lens[2], lens[3]};
op.lp_order = 2;
std::vector<float> data{0.3, 0.2, 0.4, 0.1, 0.8, 0.5, 0.9, 0.1, 0.1, 0.7, 0.1, 0.6};
auto l0 = mm->add_literal(migraphx::literal{s, data});
mm->add_instruction(op, l0);
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> results_vector(3);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{0.5477225575051662, 1.307669683062202, 0.9327379053088815};
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(globalmaxpool_test)
{
migraphx::program p;
......@@ -2504,6 +2723,63 @@ TEST_CASE(logsoftmax_test_axis_3)
EXPECT(migraphx::verify_range(results_vector, s));
}
TEST_CASE(lppool_test)
{
// L1 norm test
{
migraphx::program p;
auto* mm = p.get_main_module();
auto s = migraphx::shape{migraphx::shape::float_type, {1, 3, 4}};
auto op = migraphx::op::pooling{migraphx::op::pooling_mode::lpnorm};
op.lengths = {2};
op.padding = {0};
op.stride = {1};
op.lp_order = 1;
std::vector<float> data{0.3, 0.2, 0.4, 0.1, 0.8, 0.5, 0.9, 0.1, 0.1, 0.7, 0.1, 0.6};
auto l0 = mm->add_literal(migraphx::literal{s, data});
mm->add_instruction(op, l0);
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{0.5, 0.6, 0.5, 1.3, 1.4, 1.0, 0.8, 0.8, 0.7};
EXPECT(migraphx::verify_range(results_vector, gold));
}
// L2 norm test
{
migraphx::program p;
auto* mm = p.get_main_module();
auto s = migraphx::shape{migraphx::shape::float_type, {1, 3, 4}};
auto op = migraphx::op::pooling{migraphx::op::pooling_mode::lpnorm};
op.lengths = {2};
op.padding = {0};
op.stride = {1};
op.lp_order = 2;
std::vector<float> data{0.3, 0.2, 0.4, 0.1, 0.8, 0.5, 0.9, 0.1, 0.1, 0.7, 0.1, 0.6};
auto l0 = mm->add_literal(migraphx::literal{s, data});
mm->add_instruction(op, l0);
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold{0.36055512754639896,
0.447213595499958,
0.4123105625617661,
0.9433981132056605,
1.0295630140987,
0.9055385138137417,
0.7071067811865475,
0.7071067811865475,
0.6082762530298219};
EXPECT(migraphx::verify_range(results_vector, gold));
}
}
TEST_CASE(lrn_test)
{
migraphx::program p;
......@@ -4179,25 +4455,35 @@ TEST_CASE(rsqrt_test)
EXPECT(migraphx::verify_range(results_vector, gold));
}
TEST_CASE(scatter_test)
// reduction_mode: "scatter_none", "scatter_add", "scatter_mul"
migraphx::program create_scatter_program(const std::string& reduction_mode, int axis)
{
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape sd{migraphx::shape::float_type, {3, 3}};
std::vector<float> vd(sd.elements(), 0.0f);
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape sd{migraphx::shape::float_type, {3, 3}};
std::vector<float> vd(sd.elements(), 0.0f);
migraphx::shape si{migraphx::shape::int32_type, {2, 3}};
std::vector<int> vi = {1, 0, 2, 0, 2, 1};
migraphx::shape si{migraphx::shape::int32_type, {2, 3}};
std::vector<int> vi = {1, 0, 2, 0, 2, 1};
migraphx::shape su{migraphx::shape::float_type, {2, 3}};
std::vector<float> vu = {1.0, 1.1, 1.2, 2.0, 2.1, 2.2};
migraphx::shape su{migraphx::shape::float_type, {2, 3}};
std::vector<float> vu = {1.0, 1.1, 1.2, 2.0, 2.1, 2.2};
auto ld = mm->add_literal(migraphx::literal{sd, vd});
auto li = mm->add_literal(migraphx::literal{si, vi});
auto lu = mm->add_literal(migraphx::literal{su, vu});
auto r = mm->add_instruction(migraphx::make_op("scatter", {{"axis", 0}}), ld, li, lu);
mm->add_return({r});
auto ld = mm->add_literal(migraphx::literal{sd, vd});
auto li = mm->add_literal(migraphx::literal{si, vi});
auto lu = mm->add_literal(migraphx::literal{su, vu});
// scatter_none, formerly the scatter op
auto r = mm->add_instruction(migraphx::make_op(reduction_mode, {{"axis", axis}}), ld, li, lu);
mm->add_return({r});
return p;
}
TEST_CASE(scatter_ax0_test)
{
// this tests what used to be the only scatter op, now changed to 3 sub-ops
// which have their own test case
{
migraphx::program p = create_scatter_program("scatter_none", 0);
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> results_vector;
......@@ -4205,24 +4491,13 @@ TEST_CASE(scatter_test)
std::vector<float> gold = {2.0, 1.1, 0.0, 1.0, 0.0, 2.2, 0.0, 2.1, 1.2};
EXPECT(migraphx::verify_range(results_vector, gold));
}
}
TEST_CASE(scatter_ax_neg_test)
{
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape sd{migraphx::shape::float_type, {3, 3}};
std::vector<float> vd(sd.elements(), 0.0f);
migraphx::shape si{migraphx::shape::int32_type, {2, 3}};
std::vector<int> vi = {1, 0, -1, 0, 2, -2};
migraphx::program p = create_scatter_program("scatter_none", -2);
migraphx::shape su{migraphx::shape::float_type, {2, 3}};
std::vector<float> vu = {1.0, 1.1, 1.2, 2.0, 2.1, 2.2};
auto ld = mm->add_literal(migraphx::literal{sd, vd});
auto li = mm->add_literal(migraphx::literal{si, vi});
auto lu = mm->add_literal(migraphx::literal{su, vu});
auto r = mm->add_instruction(migraphx::make_op("scatter", {{"axis", -2}}), ld, li, lu);
mm->add_return({r});
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> results_vector;
......@@ -4230,30 +4505,177 @@ TEST_CASE(scatter_test)
std::vector<float> gold = {2.0, 1.1, 0.0, 1.0, 0.0, 2.2, 0.0, 2.1, 1.2};
EXPECT(migraphx::verify_range(results_vector, gold));
}
}
TEST_CASE(scatter_ax1_test)
{
{
migraphx::program p = create_scatter_program("scatter_none", 1);
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {1.1, 1.0, 1.2, 2.0, 2.2, 2.1, 0.0, 0.0, 0.0};
EXPECT(migraphx::verify_range(results_vector, gold));
}
}
// similar to create_scatter_program but with different tensor values
// reduction_mode: "scatter_none", "scatter_add", "scatter_mul"
migraphx::program create_scatter_program2(const std::string& reduction_mode, int axis)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape sd{migraphx::shape::float_type, {1, 5}};
std::vector<float> vd({1., 2., 3., 4., 5.});
migraphx::shape si{migraphx::shape::int32_type, {1, 2}};
std::vector<int> vi = {1, 3};
migraphx::shape su{migraphx::shape::float_type, {1, 2}};
std::vector<float> vu = {1.1, 2.1};
auto ld = mm->add_literal(migraphx::literal{sd, vd});
auto li = mm->add_literal(migraphx::literal{si, vi});
auto lu = mm->add_literal(migraphx::literal{su, vu});
auto r = mm->add_instruction(migraphx::make_op(reduction_mode, {{"axis", axis}}), ld, li, lu);
mm->add_return({r});
return p;
}
TEST_CASE(scatter_reduction1_test)
{
{
// Test sub-ops for the three reduction values scatter_none, scatter_add, scatter_mul
migraphx::program p = create_scatter_program2("scatter_none", 1);
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold_none = {1.0, 1.1, 3.0, 2.1, 5.0};
EXPECT(migraphx::verify_range(results_vector, gold_none));
}
}
TEST_CASE(scatter_reduction2_test)
{
{
migraphx::program p = create_scatter_program2("scatter_mul", 1);
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold_mul = {1.0, 2.2, 3.0, 8.4, 5.0};
EXPECT(migraphx::verify_range(results_vector, gold_mul));
}
}
TEST_CASE(scatter_reduction3_test)
{
{
migraphx::program p = create_scatter_program2("scatter_add", 1);
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold_add = {1.0, 3.1, 3.0, 6.1, 5.0};
EXPECT(migraphx::verify_range(results_vector, gold_add));
}
}
TEST_CASE(scatter_reduction_3x3_test)
{
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape sd{migraphx::shape::float_type, {3, 3}};
std::vector<float> vd(sd.elements(), 0.0f);
std::vector<float> vd(sd.elements(), 3.0f);
migraphx::shape si{migraphx::shape::int32_type, {2, 3}};
std::vector<int> vi = {1, 0, 2, 0, 2, 1};
migraphx::shape su{migraphx::shape::float_type, {2, 3}};
std::vector<float> vu = {1.0, 1.1, 1.2, 2.0, 2.1, 2.2};
std::vector<float> vu = {1.0, 1.1, 1.2, 7.0, 7.1, 7.2};
auto ld = mm->add_literal(migraphx::literal{sd, vd});
auto li = mm->add_literal(migraphx::literal{si, vi});
auto lu = mm->add_literal(migraphx::literal{su, vu});
auto r = mm->add_instruction(migraphx::make_op("scatter", {{"axis", 1}}), ld, li, lu);
auto r = mm->add_instruction(migraphx::make_op("scatter_add", {{"axis", 1}}), ld, li, lu);
mm->add_return({r});
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold = {1.1, 1.0, 1.2, 2.0, 2.2, 2.1, 0.0, 0.0, 0.0};
EXPECT(migraphx::verify_range(results_vector, gold));
std::vector<float> gold_a2 = {4.1, 4.0, 4.2, 10.0, 10.2, 10.1, 3.0, 3.0, 3.0};
EXPECT(migraphx::verify_range(results_vector, gold_a2));
}
}
// create a test scatter program with a 3x3 tensor;
// su and si are transposed from previous case
migraphx::program create_scatter_program_3x3(const std::string& reduction_mode, int axis)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape sd{migraphx::shape::float_type, {3, 3}};
std::vector<float> vd(sd.elements(), 3.0f);
migraphx::shape si{migraphx::shape::int32_type, {3, 2}};
std::vector<int> vi = {1, 0, 0, 2, 2, 1};
migraphx::shape su{migraphx::shape::float_type, {3, 2}};
std::vector<float> vu = {1.0, 7.0, 1.1, 7.1, 1.2, 7.2};
auto ld = mm->add_literal(migraphx::literal{sd, vd});
auto li = mm->add_literal(migraphx::literal{si, vi});
auto lu = mm->add_literal(migraphx::literal{su, vu});
auto r = mm->add_instruction(migraphx::make_op(reduction_mode, {{"axis", axis}}), ld, li, lu);
mm->add_return({r});
return p;
}
TEST_CASE(scatter_reduction_3x3_xpose1_test)
{
// test on vertical (0) axis. su and si are transposed from previous case
{
migraphx::program p = create_scatter_program_3x3("scatter_none", 0);
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold_none2 = {1.1, 7.0, 3.0, 1.0, 7.2, 3.0, 1.2, 7.1, 3.0};
EXPECT(migraphx::verify_range(results_vector, gold_none2));
}
}
TEST_CASE(scatter_reduction_3x3_xpose2_test)
{
// test on vertical (0) axis.
{
migraphx::program p = create_scatter_program_3x3("scatter_add", 0);
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold_a3 = {4.1, 10.0, 3.0, 4.0, 10.2, 3.0, 4.2, 10.1, 3.0};
EXPECT(migraphx::verify_range(results_vector, gold_a3));
}
}
TEST_CASE(scatter_reduction_3x3_xpose3_test)
{
{
migraphx::program p = create_scatter_program_3x3("scatter_mul", 0);
p.compile(migraphx::ref::target{});
auto result = p.eval({}).back();
std::vector<float> results_vector;
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
std::vector<float> gold_mul2 = {3.3, 21.0, 3.0, 3.0, 21.6, 3.0, 3.6, 21.3, 3.0};
EXPECT(migraphx::verify_range(results_vector, gold_mul2));
}
}
......
......@@ -1173,12 +1173,6 @@ TEST_CASE(gru_forward_args)
0.3852, -0.1170, -0.2937, 0.2979, -0.1357, 0.4257, 0.3884, -0.2916, 0.1071, 0.0934,
0.3645, -0.4310, -0.3480, 0.0702, -0.1558};
migraphx::shape b_shape{migraphx::shape::float_type, {num_dirct, 6 * hidden_size}};
std::vector<float> bias_data{
0.0560, 0.0310, -0.1669, -0.0781, 0.1793, -0.1758, 0.3173, -0.1650, -0.3732, 0.2946,
-0.0912, 0.3118, 0.1391, 0.2755, 0.2695, -0.1059, -0.2357, 0.3629, -0.2534, -0.0494,
0.0556, 0.0881, -0.2592, -0.2213, 0.2310, -0.4044, 0.1801, 0.1438, 0.3108, -0.3607};
migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}};
std::vector<float> input{-0.8432,
-0.9887,
......@@ -1199,9 +1193,6 @@ TEST_CASE(gru_forward_args)
-1.0536,
-0.2529};
migraphx::shape ih_shape{migraphx::shape::float_type, {num_dirct, batch_size, hidden_size}};
std::vector<float> ih_data{
-0.0468, 0.5691, -0.0882, 0.8340, 0.1483, -0.3902, -0.5348, 0.4178, 1.0175, 0.9212};
float clip = 0.0f;
// 3 args
......@@ -1242,6 +1233,11 @@ TEST_CASE(gru_forward_args)
// 4 args (bias is used)
{
std::vector<float> bias_data{
0.0560, 0.0310, -0.1669, -0.0781, 0.1793, -0.1758, 0.3173, -0.1650, -0.3732, 0.2946,
-0.0912, 0.3118, 0.1391, 0.2755, 0.2695, -0.1059, -0.2357, 0.3629, -0.2534, -0.0494,
0.0556, 0.0881, -0.2592, -0.2213, 0.2310, -0.4044, 0.1801, 0.1438, 0.3108, -0.3607};
migraphx::shape b_shape{migraphx::shape::float_type, {num_dirct, 6 * hidden_size}};
migraphx::program p;
auto* mm = p.get_main_module();
auto seq = mm->add_literal(migraphx::literal{in_shape, input});
......@@ -1280,6 +1276,9 @@ TEST_CASE(gru_forward_args)
// 4 args (ih is used)
{
std::vector<float> ih_data{
-0.0468, 0.5691, -0.0882, 0.8340, 0.1483, -0.3902, -0.5348, 0.4178, 1.0175, 0.9212};
migraphx::shape ih_shape{migraphx::shape::float_type, {num_dirct, batch_size, hidden_size}};
migraphx::program p;
auto* mm = p.get_main_module();
auto seq = mm->add_literal(migraphx::literal{in_shape, input});
......@@ -2210,15 +2209,6 @@ TEST_CASE(gru_bidirectional_args)
0.4101, 0.2641, -0.4110, -0.1681, 0.3582, -0.2089, 0.0852, 0.0963, 0.3866, 0.1955,
-0.2174, 0.1996, -0.2252, 0.1748, 0.1833, -0.3155, 0.2567, -0.4387, 0.3402, 0.0599};
migraphx::shape b_shape{migraphx::shape::float_type, {num_dirct, 6 * hidden_size}};
std::vector<float> bias_data{
-0.1582, -0.0826, 0.4008, 0.0118, 0.2511, 0.1900, -0.2838, 0.2549, -0.2484, 0.2363,
-0.4083, -0.0295, -0.1161, 0.1211, 0.2509, -0.1414, -0.2628, -0.2992, 0.1517, 0.1817,
-0.2783, 0.3183, -0.1629, -0.3108, -0.3418, 0.0411, 0.2203, 0.2187, -0.2990, -0.0416,
0.0209, -0.1024, 0.4443, -0.4420, -0.0330, -0.3591, -0.2990, 0.2167, 0.1395, 0.2317,
0.1318, 0.1909, -0.3615, 0.1953, -0.2582, -0.2217, 0.3723, 0.1458, 0.2630, -0.0377,
0.1754, 0.0800, -0.3964, -0.3247, 0.4219, -0.0900, 0.3553, 0.2614, -0.1298, -0.1124};
migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}};
std::vector<float> input{-0.8432,
-0.9887,
......@@ -2239,11 +2229,6 @@ TEST_CASE(gru_bidirectional_args)
-1.0536,
-0.2529};
migraphx::shape ih_shape{migraphx::shape::float_type, {num_dirct, batch_size, hidden_size}};
std::vector<float> ih_data{-0.0468, 0.5691, -0.0882, 0.8340, 0.1483, -0.3902, -0.5348,
0.4178, 1.0175, 0.9212, -0.0468, 0.5691, -0.0882, 0.8340,
0.1483, -0.3902, -0.5348, 0.4178, 1.0175, 0.9212};
float clip = 0.0f;
// 3 args
......@@ -2288,6 +2273,15 @@ TEST_CASE(gru_bidirectional_args)
// 4 args (bias is used)
{
std::vector<float> bias_data{
-0.1582, -0.0826, 0.4008, 0.0118, 0.2511, 0.1900, -0.2838, 0.2549, -0.2484,
0.2363, -0.4083, -0.0295, -0.1161, 0.1211, 0.2509, -0.1414, -0.2628, -0.2992,
0.1517, 0.1817, -0.2783, 0.3183, -0.1629, -0.3108, -0.3418, 0.0411, 0.2203,
0.2187, -0.2990, -0.0416, 0.0209, -0.1024, 0.4443, -0.4420, -0.0330, -0.3591,
-0.2990, 0.2167, 0.1395, 0.2317, 0.1318, 0.1909, -0.3615, 0.1953, -0.2582,
-0.2217, 0.3723, 0.1458, 0.2630, -0.0377, 0.1754, 0.0800, -0.3964, -0.3247,
0.4219, -0.0900, 0.3553, 0.2614, -0.1298, -0.1124};
migraphx::shape b_shape{migraphx::shape::float_type, {num_dirct, 6 * hidden_size}};
migraphx::program p;
auto* mm = p.get_main_module();
auto seq = mm->add_literal(migraphx::literal{in_shape, input});
......@@ -2330,6 +2324,10 @@ TEST_CASE(gru_bidirectional_args)
// 4 args (ih is used)
{
std::vector<float> ih_data{-0.0468, 0.5691, -0.0882, 0.8340, 0.1483, -0.3902, -0.5348,
0.4178, 1.0175, 0.9212, -0.0468, 0.5691, -0.0882, 0.8340,
0.1483, -0.3902, -0.5348, 0.4178, 1.0175, 0.9212};
migraphx::shape ih_shape{migraphx::shape::float_type, {num_dirct, batch_size, hidden_size}};
migraphx::program p;
auto* mm = p.get_main_module();
auto seq = mm->add_literal(migraphx::literal{in_shape, input});
......@@ -4186,7 +4184,6 @@ TEST_CASE(lstm_bidirectional_var_seq_lens)
-0.83699064, 0.49162736, -0.8271, -0.5683, 0.4562,
-1.2545, 1.2729, -0.4082, -0.4392, -0.9406,
0.7794, 1.8194, -0.5811, 0.2166};
std::vector<int> sl_data{1, 2, 3};
float clip = 0.0f;
migraphx::shape in_shape{migraphx::shape::float_type, {seq_len, batch_size, input_size}};
......@@ -4196,10 +4193,11 @@ TEST_CASE(lstm_bidirectional_var_seq_lens)
migraphx::shape ih_shape{migraphx::shape::float_type, {num_dirct, batch_size, hidden_size}};
migraphx::shape ic_shape{migraphx::shape::float_type, {num_dirct, batch_size, hidden_size}};
migraphx::shape pph_shape{migraphx::shape::float_type, {num_dirct, 3 * hidden_size}};
migraphx::shape sl_shape{migraphx::shape::int32_type, {batch_size}};
// concatenation of hidden states as program output
{
std::vector<int> sl_data{1, 2, 3};
migraphx::shape sl_shape{migraphx::shape::int32_type, {batch_size}};
migraphx::program p;
auto* mm = p.get_main_module();
auto seq = mm->add_literal(migraphx::literal{in_shape, input_data});
......
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
#include <migraphx/apply_alpha_beta.hpp>
struct gemm_add : verify_program<gemm_add>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape m1_shape{migraphx::shape::float_type, {1, 2, 3}};
migraphx::shape m2_shape{migraphx::shape::float_type, {1, 3, 4}};
migraphx::shape m3_shape{migraphx::shape::float_type, {1, 2, 4}};
auto l1 = mm->add_parameter("1", m1_shape);
auto l2 = mm->add_parameter("2", m2_shape);
auto l3 = mm->add_parameter("3", m3_shape);
auto dot = mm->add_instruction(migraphx::make_op("dot"), l1, l2);
mm->add_instruction(migraphx::make_op("add"), dot, l3);
return p;
}
};
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/op/quant_convolution.hpp>
struct quant_conv_int8x4_default : verify_program<quant_conv_int8x4_default>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape a_shape{migraphx::shape::int8_type, {16, 16, 4, 4}};
auto pa = mm->add_parameter("a", a_shape);
migraphx::shape c_shape{migraphx::shape::int8_type, {16, 16, 3, 3}};
auto pc = mm->add_parameter("c", c_shape);
mm->add_instruction(
migraphx::op::quant_convolution{{{0, 0}}, {{1, 1}}, {{1, 1}}, migraphx::op::same},
pa,
pc);
return p;
}
};
......@@ -129,7 +129,6 @@ void run_verify::verify(const std::string& name, const migraphx::program& p) con
auto_print::set_terminate_handler(name);
if(migraphx::enabled(MIGRAPHX_DUMP_TEST{}))
migraphx::save(p, name + ".mxr");
std::vector<std::pair<std::string, result_future>> results;
std::vector<std::string> target_names;
for(const auto& tname : migraphx::get_targets())
{
......@@ -145,6 +144,7 @@ void run_verify::verify(const std::string& name, const migraphx::program& p) con
}
if(not target_names.empty())
{
std::vector<std::pair<std::string, result_future>> results;
migraphx::parameter_map m;
for(auto&& x : p.get_parameter_shapes())
{
......
......@@ -12,7 +12,6 @@ struct test_conv_bias_clipped_relu : verify_program<test_conv_bias_clipped_relu>
{
migraphx::program p;
auto* mm = p.get_main_module();
std::vector<size_t> input_lens{4, 3, 3, 3};
auto input =
mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {4, 3, 3, 3}});
auto weights =
......
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct test_gathernd_batch_dims_1 : verify_program<test_gathernd_batch_dims_1>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape ds{migraphx::shape::float_type, {2, 3, 2, 3}};
migraphx::shape is{migraphx::shape::int64_type, {2, 3, 2}};
std::vector<int64_t> indices{1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0};
auto a0 = mm->add_parameter("data", ds);
auto a1 = mm->add_literal(migraphx::literal{is, indices});
int batch_dims = 1;
mm->add_instruction(migraphx::make_op("gathernd", {{"batch_dims", batch_dims}}), a0, a1);
return p;
}
};
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct test_gathernd_batch_dims_2 : verify_program<test_gathernd_batch_dims_2>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape ds{migraphx::shape::float_type, {2, 3, 1, 3}};
migraphx::shape is{migraphx::shape::int64_type, {2, 3, 2}};
std::vector<int64_t> indices{0, 0, 0, 1, 0, 2, 0, 2, 0, 1, 0, 0};
auto a0 = mm->add_parameter("data", ds);
auto a1 = mm->add_literal(migraphx::literal{is, indices});
int batch_dims = 2;
mm->add_instruction(migraphx::make_op("gathernd", {{"batch_dims", batch_dims}}), a0, a1);
return p;
}
};
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct test_gathernd_default : verify_program<test_gathernd_default>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape ds{migraphx::shape::float_type, {2, 2}};
migraphx::shape is{migraphx::shape::int64_type, {2, 2}};
std::vector<int64_t> indices{0, 0, 1, 1};
auto a0 = mm->add_parameter("data", ds);
auto a1 = mm->add_literal(migraphx::literal{is, indices});
mm->add_instruction(migraphx::make_op("gathernd"), a0, a1);
return p;
}
};
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct test_gathernd_negative_indices : verify_program<test_gathernd_negative_indices>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape ds{migraphx::shape::float_type, {2, 2}};
migraphx::shape is{migraphx::shape::int64_type, {2, 1, 1}};
std::vector<int64_t> indices{-1, 0};
auto a0 = mm->add_parameter("data", ds);
auto a1 = mm->add_literal(migraphx::literal{is, indices});
int batch_dims = 1;
mm->add_instruction(migraphx::make_op("gathernd", {{"batch_dims", batch_dims}}), a0, a1);
return p;
}
};
......@@ -17,7 +17,7 @@ struct test_reduce_op_large : verify_program<test_reduce_op_large<Op, Axis, T>>
auto* mm = p.get_main_module();
migraphx::shape s{T, {3, 1026, 4, 3}};
auto x = mm->add_parameter("x", s);
mm->add_instruction(Op{{1}}, x);
mm->add_instruction(Op{{Axis}}, x);
return p;
};
};
......
......@@ -15,13 +15,14 @@ struct test_reduce_op_small : verify_program<test_reduce_op_small<Op, Axis, T>>
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{T, {3, 4, 8, 8}};
migraphx::shape s{T, {3, 4, 2, 2}};
auto x = mm->add_parameter("x", s);
mm->add_instruction(Op{{1}}, x);
mm->add_instruction(Op{{Axis}}, x);
return p;
};
};
template struct test_reduce_op_small<migraphx::op::reduce_sum, 1, migraphx::shape::float_type>;
template struct test_reduce_op_small<migraphx::op::reduce_sum, 2, migraphx::shape::int32_type>;
template struct test_reduce_op_small<migraphx::op::reduce_mean, 2, migraphx::shape::int32_type>;
template struct test_reduce_op_small<migraphx::op::reduce_max, 2, migraphx::shape::int32_type>;
......
......@@ -18,7 +18,7 @@ struct test_scatter0 : verify_program<test_scatter0>
auto pd = mm->add_parameter("data", sd);
auto li = mm->add_literal(migraphx::literal{si, vi});
auto pu = mm->add_parameter("update", su);
auto r = mm->add_instruction(migraphx::make_op("scatter", {{"axis", -1}}), pd, li, pu);
auto r = mm->add_instruction(migraphx::make_op("scatter_none", {{"axis", -1}}), pd, li, pu);
mm->add_return({r});
return p;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment