Commit 3eaa0969 authored by Alan Turner's avatar Alan Turner
Browse files

Merge remote-tracking branch 'origin/bert-opt' into HEAD

parents e3d0c287 22aa9c5e
......@@ -2055,6 +2055,40 @@ def gather_test():
return ([node], [x, i], [y])
@onnx_test()
def gather_scalar_test():
x = helper.make_tensor_value_info('data', TensorProto.FLOAT, [3, 4, 5, 6])
i = helper.make_tensor_value_info('indices', TensorProto.INT32, [])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [4, 5, 6])
node = onnx.helper.make_node(
'Gather',
inputs=['data', 'indices'],
outputs=['y'],
axis=1,
)
return ([node], [x, i], [y])
@onnx_test()
def gather_dyn_test():
x = helper.make_tensor_value_info('data', TensorProto.FLOAT,
[None, 4, 5, 6])
i = helper.make_tensor_value_info('indices', TensorProto.INT32,
[None, 3, 4, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3, 4, 5])
node = onnx.helper.make_node(
'Gather',
inputs=['data', 'indices'],
outputs=['y'],
axis=1,
)
return ([node], [x, i], [y])
@onnx_test()
def gather_elements_axis0_test():
x = helper.make_tensor_value_info('data', TensorProto.FLOAT, [3, 4])
......@@ -2217,7 +2251,7 @@ def gemm_dyn_outer_test():
@onnx_test()
def gemm_dyn_C_error():
def gemm_dyn_bias_test():
A = helper.make_tensor_value_info('A', TensorProto.FLOAT, [8, None])
B = helper.make_tensor_value_info('B', TensorProto.FLOAT, [8, 7])
C = helper.make_tensor_value_info('C', TensorProto.FLOAT, [1, 7])
......@@ -6773,6 +6807,92 @@ def transpose_gather_test():
return ([td, ti, node], [x, i], [y])
@onnx_test()
def trilu_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4])
node = onnx.helper.make_node(
'Trilu',
inputs=['x'],
outputs=['y'],
)
return ([node], [x], [y])
@onnx_test()
def trilu_batch_diff_k_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 2, 3])
k = np.array([2])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 2, 3])
k_tensor = helper.make_tensor(name='k',
data_type=TensorProto.INT64,
dims=k.shape,
vals=k.astype(np.int64))
node = onnx.helper.make_node(
'Trilu',
inputs=['x', 'k'],
outputs=['y'],
)
return ([node], [x], [y], [k_tensor])
@onnx_test()
def trilu_lower_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4])
node = onnx.helper.make_node('Trilu', inputs=['x'], outputs=['y'], upper=0)
return ([node], [x], [y])
@onnx_test()
def trilu_neg_k_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4])
k = np.array([-1])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4])
k_tensor = helper.make_tensor(name='k',
data_type=TensorProto.INT64,
dims=k.shape,
vals=k.astype(np.int64))
node = onnx.helper.make_node('Trilu', inputs=['x', 'k'], outputs=['y'])
return ([node], [x], [y], [k_tensor])
@onnx_test()
def trilu_out_k_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4])
k = np.array([5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4])
k_tensor = helper.make_tensor(name='k',
data_type=TensorProto.INT64,
dims=k.shape,
vals=k.astype(np.int64))
node = onnx.helper.make_node('Trilu', inputs=['x', 'k'], outputs=['y'])
return ([node], [x], [y], [k_tensor])
@onnx_test()
def trilu_row_one_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1, 4])
k = np.array([1])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 4])
k_tensor = helper.make_tensor(name='k',
data_type=TensorProto.INT64,
dims=k.shape,
vals=k.astype(np.int64))
node = onnx.helper.make_node(
'Trilu',
inputs=['x', 'k'],
outputs=['y'],
)
return ([node], [x], [y], [k_tensor])
@onnx_test()
def undefined_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 4, 5])
......
......@@ -2048,6 +2048,46 @@ TEST_CASE(gather_test)
EXPECT(p == prog);
}
TEST_CASE(gather_scalar_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter("data", migraphx::shape{migraphx::shape::float_type, {3, 4, 5, 6}});
std::vector<size_t> idims{1};
auto l1 =
mm->add_parameter("indices", migraphx::shape{migraphx::shape::int32_type, idims, {0}});
int axis = 1;
mm->add_instruction(migraphx::make_op("gather", {{"axis", axis}}), l0, l1);
auto prog = optimize_onnx("gather_scalar_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(gather_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"data",
migraphx::shape{migraphx::shape::float_type, {{1, 4, 0}, {4, 4, 0}, {5, 5, 0}, {6, 6, 0}}});
auto l1 = mm->add_parameter(
"indices",
migraphx::shape{migraphx::shape::int32_type, {{1, 4, 0}, {3, 3, 0}, {4, 4, 0}, {5, 5, 0}}});
auto cont_l0 = mm->add_instruction(migraphx::make_op("contiguous"), l0);
auto cont_l1 = mm->add_instruction(migraphx::make_op("contiguous"), l1);
int axis = 1;
auto gather_op = migraphx::make_op("gather", {{"axis", axis}});
auto ret = mm->add_instruction(gather_op, cont_l0, cont_l1);
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
auto prog = parse_onnx("gather_dyn_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(gather_elements_axis0_test)
{
migraphx::program p;
......@@ -2278,11 +2318,24 @@ TEST_CASE(gemm_dyn_outer_test)
EXPECT(p == prog);
}
TEST_CASE(gemm_dyn_C_error)
TEST_CASE(gemm_dyn_bias_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto x0 =
mm->add_parameter("A", migraphx::shape{migraphx::shape::float_type, {{8, 8}, {1, 10}}});
auto x1 = mm->add_parameter("B", migraphx::shape{migraphx::shape::float_type, {8, 7}});
auto x2 = mm->add_parameter("C", migraphx::shape{migraphx::shape::float_type, {1, 7}});
auto x0_t = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", {1, 0}}}), x0);
auto dot = mm->add_instruction(migraphx::make_op("dot"), x0_t, x1);
auto x2_b = mm->add_instruction(migraphx::make_op("multibroadcast"), x2, dot);
auto ret = mm->add_instruction(migraphx::make_op("add"), dot, x2_b);
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4, 0};
EXPECT(test::throws([&] { migraphx::parse_onnx("gemm_dyn_C_error.onnx", options); }));
options.default_dyn_dim_value = {1, 10};
auto prog = parse_onnx("gemm_dyn_bias_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(gemm_rank_error)
......@@ -6539,6 +6592,11 @@ TEST_CASE(transpose_gather_test)
EXPECT(p.sort() == prog.sort());
}
TEST_CASE(trilu_neg_k_test)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("trilu_neg_k_test.onnx"); }));
}
TEST_CASE(undefined_test)
{
migraphx::program p;
......
trilu_batch_diff_k_test:i

x
ky"Trilutrilu_batch_diff_k_test*
:BkZ
x



b
y



B
\ No newline at end of file
trilu_neg_k_test:c

x
ky"Trilutrilu_neg_k_test*:
BkZ
x


b
y


B
\ No newline at end of file
trilu_out_k_test:Z

x
ky"Trilutrilu_out_k_test*
:BkZ
x


b
y


B
\ No newline at end of file
trilu_row_one_test:\

x
ky"Trilutrilu_row_one_test*
:BkZ
x


b
y


B
\ No newline at end of file

trilu_test:E
xy"Trilu
trilu_testZ
x


b
y


B
\ No newline at end of file
......@@ -1458,4 +1458,73 @@ TEST_CASE(where_test)
EXPECT(migraphx::verify_range(result_vector, gold));
}
std::vector<float> gen_trilu_test(const migraphx::shape& s, const migraphx::program& p)
{
// input data filled with values 1 to nelements
std::vector<float> x_data(s.elements());
std::iota(x_data.begin(), x_data.end(), 1);
migraphx::parameter_map pp;
pp["x"] = migraphx::argument(s, x_data.data());
auto result = p.eval(pp).back();
std::vector<float> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
return result_vector;
}
TEST_CASE(trilu_test)
{
migraphx::program p = migraphx::parse_onnx("trilu_test.onnx");
std::vector<float> result_vector = gen_trilu_test({migraphx::shape::float_type, {3, 4}}, p);
std::vector<float> gold = {1, 2, 3, 4, 0, 6, 7, 8, 0, 0, 11, 12};
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(trilu_batch_diff_k_test)
{
migraphx::program p = migraphx::parse_onnx("trilu_batch_diff_k_test.onnx");
std::vector<float> result_vector = gen_trilu_test({migraphx::shape::float_type, {2, 2, 3}}, p);
std::vector<float> gold = {0, 0, 3, 0, 0, 0, 0, 0, 9, 0, 0, 0};
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(trilu_lower_test)
{
migraphx::program p = migraphx::parse_onnx("trilu_lower_test.onnx");
std::vector<float> result_vector = gen_trilu_test({migraphx::shape::float_type, {3, 4}}, p);
std::vector<float> gold = {0, 0, 0, 0, 5, 0, 0, 0, 9, 10, 0, 0};
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(trilu_out_k_test)
{
migraphx::program p = migraphx::parse_onnx("trilu_out_k_test.onnx");
std::vector<float> result_vector = gen_trilu_test({migraphx::shape::float_type, {3, 4}}, p);
std::vector<float> gold(12, 0);
EXPECT(migraphx::verify_range(result_vector, gold));
}
TEST_CASE(trilu_row_one_test)
{
migraphx::program p = migraphx::parse_onnx("trilu_row_one_test.onnx");
std::vector<float> result_vector = gen_trilu_test({migraphx::shape::float_type, {1, 4}}, p);
std::vector<float> gold = {0, 2, 3, 4};
EXPECT(migraphx::verify_range(result_vector, gold));
}
int main(int argc, const char* argv[]) { test::run(argc, argv); }
......@@ -831,6 +831,77 @@ TEST_CASE(gather)
}
}
TEST_CASE(gather_dyn0)
{
// Insert dynamic index into dynamic shape
migraphx::shape input{migraphx::shape::float_type,
{{2, 3, 2}, {3, 4, 3}, {6, 9, 7}, {12, 14, 13}}};
migraphx::shape indices{migraphx::shape::int32_type, {{2, 7, 3}, {3, 3, 0}}};
int axis = 1;
expect_shape(migraphx::shape{migraphx::shape::float_type,
{{2, 3, 2}, {2, 7, 3}, {3, 3, 0}, {6, 9, 7}, {12, 14, 13}}},
migraphx::make_op("gather", {{"axis", axis}}),
input,
indices);
}
TEST_CASE(gather_dyn1)
{
// Insert static index into dynamic shape
migraphx::shape input{migraphx::shape::float_type,
{{2, 3, 2}, {3, 4, 3}, {6, 9, 7}, {12, 14, 13}}};
migraphx::shape indices{migraphx::shape::int32_type, {2, 3}};
int axis = 1;
expect_shape(migraphx::shape{migraphx::shape::float_type,
{{2, 3, 2}, {2, 2, 0}, {3, 3, 0}, {6, 9, 7}, {12, 14, 13}}},
migraphx::make_op("gather", {{"axis", axis}}),
input,
indices);
}
TEST_CASE(gather_dyn2)
{
// Insert scalar (static) index into dynamic shape
migraphx::shape input{migraphx::shape::float_type,
{{2, 3, 2}, {3, 4, 3}, {6, 9, 7}, {12, 14, 13}}};
std::vector<std::size_t> mins;
std::vector<std::size_t> maxes;
std::vector<std::size_t> opts;
migraphx::shape indices{migraphx::shape::int32_type, mins, maxes, opts};
int axis = 1;
expect_shape(migraphx::shape{migraphx::shape::float_type, {{2, 3, 2}, {6, 9, 7}, {12, 14, 13}}},
migraphx::make_op("gather", {{"axis", axis}}),
input,
indices);
}
TEST_CASE(gather_dyn3)
{
// Insert dynamic index into static shape, axis 1
migraphx::shape input{migraphx::shape::float_type, {2, 3, 6, 12}};
migraphx::shape indices{migraphx::shape::int32_type, {{2, 3, 2}, {3, 4, 3}}};
int axis = 1;
expect_shape(migraphx::shape{migraphx::shape::float_type,
{{2, 2, 0}, {2, 3, 2}, {3, 4, 3}, {6, 6, 0}, {12, 12, 0}}},
migraphx::make_op("gather", {{"axis", axis}}),
input,
indices);
}
TEST_CASE(gather_dyn4)
{
// Insert dynamic index into static shape, axis 0
migraphx::shape input{migraphx::shape::float_type, {2, 3, 6, 12}};
migraphx::shape indices{migraphx::shape::int32_type, {{2, 3, 2}, {3, 4, 3}}};
int axis = 0;
expect_shape(migraphx::shape{migraphx::shape::float_type,
{{2, 3, 2}, {3, 4, 3}, {3, 3, 0}, {6, 6, 0}, {12, 12, 0}}},
migraphx::make_op("gather", {{"axis", axis}}),
input,
indices);
}
TEST_CASE(get_tuple_elem_test)
{
migraphx::shape s0{migraphx::shape::bool_type, {1, 1}};
......
......@@ -2524,6 +2524,78 @@ TEST_CASE(gather_test)
}
}
TEST_CASE(gather_dyn_test0)
{
// Dynamic data, static indices
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::int32_type, {{2, 5, 0}, {3, 3, 0}}};
auto x = mm->add_parameter("x", s);
std::vector<int> indices{1, 2};
migraphx::shape s_ind{migraphx::shape::int32_type, {1, 2}};
auto ind = mm->add_parameter("indices", s_ind);
mm->add_instruction(migraphx::make_op("gather", {{"axis", 1}}), x, ind);
migraphx::shape sresult{migraphx::shape::int32_type, {{2, 5, 0}, {1, 1, 0}, {2, 2, 0}}};
EXPECT(p.get_output_shapes().back() == sresult);
p.compile(migraphx::ref::target{});
migraphx::shape input_fixed_shape{migraphx::shape::int32_type, {2, 3}};
migraphx::shape input_indices{migraphx::shape::int32_type, {1, 2}};
migraphx::parameter_map params;
std::vector<int> data(2 * 3);
std::iota(data.begin(), data.end(), 0);
params["x"] = migraphx::argument(input_fixed_shape, data.data());
params["indices"] = migraphx::argument(input_indices, indices.data());
auto result = p.eval(params).back();
std::vector<int> gold = {1, 2, 4, 5};
std::vector<int> results_vector(2 * 1 * 2);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(results_vector, gold));
migraphx::shape sfinal{migraphx::shape::int32_type, {2, 1, 2}};
EXPECT(result.get_shape() == sfinal);
}
TEST_CASE(gather_dyn_test1)
{
// Dynamic data, dynamic indices
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape s{migraphx::shape::int32_type, {{2, 5, 0}, {4, 4, 0}}};
auto x = mm->add_parameter("x", s);
migraphx::shape s_ind{migraphx::shape::int32_type, {{1, 8, 7}, {2, 3, 3}}};
auto ind = mm->add_parameter("indices", s_ind);
mm->add_instruction(migraphx::make_op("gather", {{"axis", 0}}), x, ind);
migraphx::shape sresult{migraphx::shape::int32_type, {{1, 8, 7}, {2, 3, 3}, {4, 4, 0}}};
EXPECT(p.get_output_shapes().back() == sresult);
p.compile(migraphx::ref::target{});
migraphx::shape input_fixed_shape{migraphx::shape::int32_type, {3, 4}};
migraphx::shape input_indices_shape{migraphx::shape::int32_type, {1, 2}};
std::vector<int> indices{2, 0};
migraphx::parameter_map params;
std::vector<int> data(3 * 4);
std::iota(data.begin(), data.end(), 0);
params["x"] = migraphx::argument(input_fixed_shape, data.data());
params["indices"] = migraphx::argument(input_indices_shape, indices.data());
auto result = p.eval(params).back();
std::vector<int> gold = {8, 9, 10, 11, 0, 1, 2, 3};
std::vector<int> results_vector(1 * 2 * 4);
result.visit([&](auto output) { results_vector.assign(output.begin(), output.end()); });
EXPECT(migraphx::verify_range(results_vector, gold));
migraphx::shape sfinal{migraphx::shape::int32_type, {1, 2, 4}};
EXPECT(result.get_shape() == sfinal);
}
TEST_CASE(gathernd_test)
{
{
......
......@@ -26,5 +26,8 @@ pip3 install -r requirements.txt
# Add newer cmake to the path
export PATH="/opt/cmake/bin:$PATH"
export CXXFLAGS="-D__HIP_PLATFORM_HCC__=1 -w"
./build.sh --config Release --update --build --parallel --cmake_extra_defines ONNXRUNTIME_VERSION=$(cat ./VERSION_NUMBER) --test --use_migraphx
# pip3 install /code/onnxruntime/build/Linux/Release/dist/*.whl
./build.sh --config Release --cmake_extra_defines CMAKE_HIP_COMPILER=/opt/rocm/llvm/bin/clang++ --update --build --parallel --cmake_extra_defines ONNXRUNTIME_VERSION=$(cat ./VERSION_NUMBER) --skip_tests --rocm_home /opt/rocm --use_migraphx --migraphx_home /opt/rocm --rocm_version=`cat /opt/rocm/.info/version-dev`
cd build/Linux/Release
#Add test launcher for onnxrt tests
../../../tools/ci_build/github/pai/migraphx_test_launcher.sh
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment