Unverified Commit 514bd678 authored by Artur Wojcik's avatar Artur Wojcik Committed by GitHub
Browse files

Merge branch 'develop' into sqlite3_windows

parents bc7adab1 f8bf7bd3
...@@ -5151,6 +5151,223 @@ def qlinearadd_bcast_test(): ...@@ -5151,6 +5151,223 @@ def qlinearadd_bcast_test():
[sc_a, zero_pt_a, sc_b, zero_pt_b, sc_c, zero_pt_c]) [sc_a, zero_pt_a, sc_b, zero_pt_b, sc_c, zero_pt_c])
@onnx_test()
def qlinearconv_test():
# https://xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__QLinearConv.html
x = helper.make_tensor_value_info('X', TensorProto.UINT8, [1, 1, 7, 7])
sc_x = helper.make_tensor('1', TensorProto.FLOAT, [], [0.00369204697])
zero_pt_x = helper.make_tensor('2', TensorProto.UINT8, [], [132])
wt = helper.make_tensor('3', TensorProto.UINT8, [1, 1, 1, 1], [0])
sc_wt = helper.make_tensor('4', TensorProto.FLOAT, [], [0.00172794575])
zero_pt_wt = helper.make_tensor('5', TensorProto.UINT8, [], [255])
sc_y = helper.make_tensor('6', TensorProto.FLOAT, [], [0.00162681262])
zero_pt_y = helper.make_tensor('7', TensorProto.UINT8, [], [123])
out = helper.make_tensor_value_info('out', TensorProto.UINT8, [1, 1, 7, 7])
node = onnx.helper.make_node(
'QLinearConv',
inputs=['X', '1', '2', '3', '4', '5', '6', '7'],
outputs=['out'],
)
return ([node], [x], [out],
[sc_x, zero_pt_x, wt, sc_wt, zero_pt_wt, sc_y, zero_pt_y])
@onnx_test()
def qlinearconv_pad_1_test():
# https://xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__Conv.html
x = helper.make_tensor_value_info('X', TensorProto.UINT8, [1, 1, 5, 5])
sc_x = helper.make_tensor('1', TensorProto.FLOAT, [],
[0.09411764705882353])
zero_pt_x = helper.make_tensor('2', TensorProto.UINT8, [], [0])
wt = helper.make_tensor('3', TensorProto.UINT8, [1, 1, 3, 3],
[1, 1, 1, 1, 1, 1, 1, 1, 1])
sc_wt = helper.make_tensor('4', TensorProto.FLOAT, [], [1.0])
zero_pt_wt = helper.make_tensor('5', TensorProto.UINT8, [], [0])
sc_y = helper.make_tensor('6', TensorProto.FLOAT, [], [0.6352941176470588])
zero_pt_y = helper.make_tensor('7', TensorProto.UINT8, [], [0])
out = helper.make_tensor_value_info('out', TensorProto.UINT8, [1, 1, 5, 5])
node = onnx.helper.make_node(
'QLinearConv',
inputs=['X', '1', '2', '3', '4', '5', '6', '7'],
outputs=['out'],
pads=[1, 1, 1, 1],
)
return ([node], [x], [out],
[sc_x, zero_pt_x, wt, sc_wt, zero_pt_wt, sc_y, zero_pt_y])
@onnx_test()
def qlinearconv_pad_0_test():
# https://xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__Conv.html
x = helper.make_tensor_value_info('X', TensorProto.UINT8, [1, 1, 5, 5])
sc_x = helper.make_tensor('1', TensorProto.FLOAT, [],
[0.09411764705882353])
zero_pt_x = helper.make_tensor('2', TensorProto.UINT8, [], [0])
wt = helper.make_tensor('3', TensorProto.UINT8, [1, 1, 3, 3],
[1, 1, 1, 1, 1, 1, 1, 1, 1])
sc_wt = helper.make_tensor('4', TensorProto.FLOAT, [], [1.0])
zero_pt_wt = helper.make_tensor('5', TensorProto.UINT8, [], [0])
sc_y = helper.make_tensor('6', TensorProto.FLOAT, [], [0.6352941176470588])
zero_pt_y = helper.make_tensor('7', TensorProto.INT8, [], [-128])
out = helper.make_tensor_value_info('out', TensorProto.INT8, [1, 1, 3, 3])
node = onnx.helper.make_node(
'QLinearConv',
inputs=['X', '1', '2', '3', '4', '5', '6', '7'],
outputs=['out'],
pads=[0, 0, 0, 0],
)
return ([node], [x], [out],
[sc_x, zero_pt_x, wt, sc_wt, zero_pt_wt, sc_y, zero_pt_y])
@onnx_test()
def qlinearconv_scale_1D_test():
# https://xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__Conv.html
x = helper.make_tensor_value_info('X', TensorProto.UINT8, [1, 1, 5, 5])
sc_x = helper.make_tensor('1', TensorProto.FLOAT, [],
[0.09411764705882353])
zero_pt_x = helper.make_tensor('2', TensorProto.UINT8, [], [0])
wt = helper.make_tensor(
'3', TensorProto.UINT8, [2, 1, 3, 3],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2])
sc_wt = helper.make_tensor('4', TensorProto.FLOAT, [2], [1.0, 0.5])
zero_pt_wt = helper.make_tensor('5', TensorProto.UINT8, [2], [0, 0])
sc_y = helper.make_tensor('6', TensorProto.FLOAT, [], [0.6352941176470588])
zero_pt_y = helper.make_tensor('7', TensorProto.INT8, [], [-128])
out = helper.make_tensor_value_info('out', TensorProto.INT8, [1, 2, 3, 3])
node = onnx.helper.make_node(
'QLinearConv',
inputs=['X', '1', '2', '3', '4', '5', '6', '7'],
outputs=['out'],
pads=[0, 0, 0, 0],
)
return ([node], [x], [out],
[sc_x, zero_pt_x, wt, sc_wt, zero_pt_wt, sc_y, zero_pt_y])
@onnx_test()
def qlinearglobalavgpool_test():
x = helper.make_tensor_value_info('X', TensorProto.UINT8, [1, 3, 4, 4])
sc_x = helper.make_tensor('X_scale', TensorProto.FLOAT, [], [0.05])
z_pt_x = helper.make_tensor('X_zero_point', TensorProto.UINT8, [], [128])
y = helper.make_tensor_value_info('Y', TensorProto.UINT8, [1, 3, 1, 1])
sc_y = helper.make_tensor('Y_scale', TensorProto.FLOAT, [], [0.025])
z_pt_y = helper.make_tensor('Y_zero_point', TensorProto.UINT8, [], [64])
n = onnx.helper.make_node(
'QLinearGlobalAveragePool',
inputs=['X', 'X_scale', 'X_zero_point', 'Y_scale', 'Y_zero_point'],
outputs=['Y'],
channels_last=0,
)
return ([n], [x], [y], [sc_x, z_pt_x, sc_y, z_pt_y])
def qlinearmatmul_1D_test():
a = helper.make_tensor_value_info('A', TensorProto.UINT8, [8])
sc_a = helper.make_tensor('A_scale', TensorProto.FLOAT, [], [0.05])
zero_pt_a = helper.make_tensor('A_zero_point', TensorProto.UINT8, [], [0])
b = helper.make_tensor_value_info('B', TensorProto.UINT8, [8])
sc_b = helper.make_tensor('B_scale', TensorProto.FLOAT, [], [0.05])
zero_pt_b = helper.make_tensor('B_zero_point', TensorProto.UINT8, [],
[128])
sc_c = helper.make_tensor('C_scale', TensorProto.FLOAT, [], [0.05])
zero_pt_c = helper.make_tensor('C_zero_point', TensorProto.UINT8, [], [64])
c = helper.make_tensor_value_info('C', TensorProto.UINT8, [1])
node = onnx.helper.make_node(
'QLinearMatMul',
inputs=[
'A', 'A_scale', 'A_zero_point', 'B', 'B_scale', 'B_zero_point',
'C_scale', 'C_zero_point'
],
outputs=['C'],
)
return ([node], [a, b], [c],
[sc_a, zero_pt_a, sc_b, zero_pt_b, sc_c, zero_pt_c])
@onnx_test()
def qlinearmatmul_2D_test():
a = helper.make_tensor_value_info('A', TensorProto.UINT8, [1, 8])
sc_a = helper.make_tensor('A_scale', TensorProto.FLOAT, [], [0.05])
zero_pt_a = helper.make_tensor('A_zero_point', TensorProto.UINT8, [], [0])
b = helper.make_tensor_value_info('B', TensorProto.UINT8, [8, 1])
sc_b = helper.make_tensor('B_scale', TensorProto.FLOAT, [], [0.05])
zero_pt_b = helper.make_tensor('B_zero_point', TensorProto.UINT8, [],
[128])
sc_c = helper.make_tensor('C_scale', TensorProto.FLOAT, [], [0.05])
zero_pt_c = helper.make_tensor('C_zero_point', TensorProto.UINT8, [], [64])
c = helper.make_tensor_value_info('C', TensorProto.UINT8, [1, 1])
node = onnx.helper.make_node(
'QLinearMatMul',
inputs=[
'A', 'A_scale', 'A_zero_point', 'B', 'B_scale', 'B_zero_point',
'C_scale', 'C_zero_point'
],
outputs=['C'],
)
return ([node], [a, b], [c],
[sc_a, zero_pt_a, sc_b, zero_pt_b, sc_c, zero_pt_c])
@onnx_test()
def qlinearmatmul_3D_test():
a = helper.make_tensor_value_info('A', TensorProto.UINT8, [2, 2, 4])
sc_a = helper.make_tensor('A_scale', TensorProto.FLOAT, [], [0.0066])
zero_pt_a = helper.make_tensor('A_zero_point', TensorProto.UINT8, [],
[113])
b = helper.make_tensor_value_info('B', TensorProto.UINT8, [2, 4, 3])
sc_b = helper.make_tensor('B_scale', TensorProto.FLOAT, [], [0.00705])
zero_pt_b = helper.make_tensor('B_zero_point', TensorProto.UINT8, [],
[114])
sc_c = helper.make_tensor('C_scale', TensorProto.FLOAT, [], [0.0107])
zero_pt_c = helper.make_tensor('C_zero_point', TensorProto.UINT8, [],
[118])
c = helper.make_tensor_value_info('C', TensorProto.UINT8, [2, 2, 3])
node = onnx.helper.make_node(
'QLinearMatMul',
inputs=[
'A', 'A_scale', 'A_zero_point', 'B', 'B_scale', 'B_zero_point',
'C_scale', 'C_zero_point'
],
outputs=['C'],
)
return ([node], [a, b], [c],
[sc_a, zero_pt_a, sc_b, zero_pt_b, sc_c, zero_pt_c])
@onnx_test() @onnx_test()
def quantizelinear_test(): def quantizelinear_test():
arg0 = helper.make_tensor_value_info('0', TensorProto.FLOAT, [5]) arg0 = helper.make_tensor_value_info('0', TensorProto.FLOAT, [5])
......
...@@ -4904,6 +4904,218 @@ TEST_CASE(qlinearadd_test) ...@@ -4904,6 +4904,218 @@ TEST_CASE(qlinearadd_test)
EXPECT(p.sort() == prog.sort()); EXPECT(p.sort() == prog.sort());
} }
TEST_CASE(qlinearconv_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_parameter("X", {migraphx::shape::uint8_type, {1, 1, 7, 7}});
auto sc_x = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.00369204697}});
auto z_pt_x = mm->add_literal(migraphx::literal{migraphx::shape::uint8_type, {132}});
auto w = mm->add_literal(
migraphx::literal{migraphx::shape{migraphx::shape::uint8_type, {1, 1, 1, 1}}, {0}});
auto sc_w = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.00172794575}});
auto z_pt_w = mm->add_literal(migraphx::literal{migraphx::shape::uint8_type, {255}});
auto sc_y = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.00162681262}});
auto z_pt_y = mm->add_literal(migraphx::literal{migraphx::shape::uint8_type, {123}});
auto scale_x_bcast = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 7, 7}}}), sc_x);
auto z_pt_x_bcast = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 7, 7}}}), z_pt_x);
auto fp_x =
mm->add_instruction(migraphx::make_op("dequantizelinear"), x, scale_x_bcast, z_pt_x_bcast);
auto scale_w_bcast = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 1, 1}}}), sc_w);
auto z_pt_w_bcast = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 1, 1}}}), z_pt_w);
auto fp_w =
mm->add_instruction(migraphx::make_op("dequantizelinear"), w, scale_w_bcast, z_pt_w_bcast);
auto fp_y = mm->add_instruction(migraphx::make_op("convolution"), fp_x, fp_w);
auto scale_y_bcast = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 7, 7}}}), sc_y);
auto z_pt_y_bcast = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 7, 7}}}), z_pt_y);
auto y =
mm->add_instruction(migraphx::make_op("quantizelinear"), fp_y, scale_y_bcast, z_pt_y_bcast);
mm->add_return({y});
auto prog = migraphx::parse_onnx("qlinearconv_test.onnx");
EXPECT(p.sort() == prog.sort());
}
TEST_CASE(qlinearglobalavgpool_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_parameter("X", {migraphx::shape::uint8_type, {1, 3, 4, 4}});
auto sc_x = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.05}});
auto z_pt_x = mm->add_literal(migraphx::literal{migraphx::shape::uint8_type, {128}});
auto sc_y = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.025}});
auto z_pt_y = mm->add_literal(migraphx::literal{migraphx::shape::uint8_type, {64}});
auto scale_x_bcast = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 3, 4, 4}}}), sc_x);
auto z_pt_x_bcast = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 3, 4, 4}}}), z_pt_x);
auto fp_x =
mm->add_instruction(migraphx::make_op("dequantizelinear"), x, scale_x_bcast, z_pt_x_bcast);
auto fp_y =
mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::average},
{"padding", {0, 0, 0, 0}},
{"lengths", {4, 4}}}),
fp_x);
auto scale_y_bcast = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 3, 1, 1}}}), sc_y);
auto z_pt_y_bcast = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 3, 1, 1}}}), z_pt_y);
auto y =
mm->add_instruction(migraphx::make_op("quantizelinear"), fp_y, scale_y_bcast, z_pt_y_bcast);
mm->add_return({y});
auto prog = migraphx::parse_onnx("qlinearglobalavgpool_test.onnx");
EXPECT(p.sort() == prog.sort());
}
TEST_CASE(qlinearmatmul_1D_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto a = mm->add_parameter("A", {migraphx::shape::uint8_type, {8}});
auto b = mm->add_parameter("B", {migraphx::shape::uint8_type, {8}});
auto sc_a = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.05}});
auto z_pt_a = mm->add_literal(migraphx::literal{migraphx::shape::uint8_type, {0}});
auto sc_b = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.05}});
auto z_pt_b = mm->add_literal(migraphx::literal{migraphx::shape::uint8_type, {128}});
auto sc_c = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.05}});
auto z_pt_c = mm->add_literal(migraphx::literal{migraphx::shape::uint8_type, {64}});
auto scale_a_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {8}}}), sc_a);
auto z_pt_a_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {8}}}), z_pt_a);
auto fp_a =
mm->add_instruction(migraphx::make_op("dequantizelinear"), a, scale_a_bcast, z_pt_a_bcast);
auto scale_b_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {8}}}), sc_b);
auto z_pt_b_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {8}}}), z_pt_b);
auto fp_b =
mm->add_instruction(migraphx::make_op("dequantizelinear"), b, scale_b_bcast, z_pt_b_bcast);
auto sq_a = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {0}}}), fp_a);
auto sq_b = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1}}}), fp_b);
auto fp_c = mm->add_instruction(migraphx::make_op("dot"), sq_a, sq_b);
auto sq_c = mm->add_instruction(migraphx::make_op("squeeze", {{"axes", {0}}}), fp_c);
auto scale_c_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {1}}}), sc_c);
auto z_pt_c_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {1}}}), z_pt_c);
auto c =
mm->add_instruction(migraphx::make_op("quantizelinear"), sq_c, scale_c_bcast, z_pt_c_bcast);
mm->add_return({c});
auto prog = migraphx::parse_onnx("qlinearmatmul_1D_test.onnx");
EXPECT(p.sort() == prog.sort());
}
TEST_CASE(qlinearmatmul_2D_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto a = mm->add_parameter("A", {migraphx::shape::uint8_type, {1, 8}});
auto b = mm->add_parameter("B", {migraphx::shape::uint8_type, {8, 1}});
auto sc_a = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.05}});
auto z_pt_a = mm->add_literal(migraphx::literal{migraphx::shape::uint8_type, {0}});
auto sc_b = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.05}});
auto z_pt_b = mm->add_literal(migraphx::literal{migraphx::shape::uint8_type, {128}});
auto sc_c = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.05}});
auto z_pt_c = mm->add_literal(migraphx::literal{migraphx::shape::uint8_type, {64}});
auto scale_a_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {1, 8}}}), sc_a);
auto z_pt_a_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {1, 8}}}), z_pt_a);
auto fp_a =
mm->add_instruction(migraphx::make_op("dequantizelinear"), a, scale_a_bcast, z_pt_a_bcast);
auto scale_b_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {8, 1}}}), sc_b);
auto z_pt_b_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {8, 1}}}), z_pt_b);
auto fp_b =
mm->add_instruction(migraphx::make_op("dequantizelinear"), b, scale_b_bcast, z_pt_b_bcast);
auto fp_c = mm->add_instruction(migraphx::make_op("dot"), fp_a, fp_b);
auto scale_c_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {1, 1}}}), sc_c);
auto z_pt_c_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {1, 1}}}), z_pt_c);
auto c =
mm->add_instruction(migraphx::make_op("quantizelinear"), fp_c, scale_c_bcast, z_pt_c_bcast);
mm->add_return({c});
auto prog = migraphx::parse_onnx("qlinearmatmul_2D_test.onnx");
EXPECT(p.sort() == prog.sort());
}
TEST_CASE(quantizelinear_test) TEST_CASE(quantizelinear_test)
{ {
migraphx::program p; migraphx::program p;
......
qlinearmatmul_3D_test:
]
A
A_scale
A_zero_point
B
B_scale
B_zero_point
C_scale
C_zero_pointC" QLinearMatMulqlinearmatmul_3D_test*"D;BA_scale**qB A_zero_point*";BB_scale**rB B_zero_point*"O/<BC_scale**vB C_zero_pointZ
A



Z
B



b
C



B
\ No newline at end of file
...@@ -1270,7 +1270,7 @@ TEST_CASE(qlinearadd_test) ...@@ -1270,7 +1270,7 @@ TEST_CASE(qlinearadd_test)
pp["B"] = migraphx::argument(b, data_b.data()); pp["B"] = migraphx::argument(b, data_b.data());
auto result = p.eval(pp).back(); auto result = p.eval(pp).back();
std::vector<unsigned char> result_vector; std::vector<uint8_t> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); }); result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<uint8_t> gold = {64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, std::vector<uint8_t> gold = {64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64,
...@@ -1318,6 +1318,215 @@ TEST_CASE(qlinearadd_bcast_test) ...@@ -1318,6 +1318,215 @@ TEST_CASE(qlinearadd_bcast_test)
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold)); EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
} }
TEST_CASE(qlinearconv_test)
{
// https://xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__QLinearConv.html
migraphx::program p = migraphx::parse_onnx("qlinearconv_test.onnx");
p.compile(migraphx::make_target("ref"));
migraphx::shape sx{migraphx::shape::uint8_type, {1, 1, 7, 7}};
std::vector<uint8_t> x_data = {255, 174, 162, 25, 203, 168, 58, 15, 59, 237, 95, 129, 0,
64, 56, 242, 153, 221, 168, 12, 166, 232, 178, 186, 195, 237,
162, 237, 188, 39, 124, 77, 80, 102, 43, 127, 230, 21, 83,
41, 40, 134, 255, 154, 92, 141, 42, 148, 247};
migraphx::parameter_map pp;
pp["X"] = migraphx::argument(sx, x_data.data());
auto result = p.eval(pp).back();
std::vector<uint8_t> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<uint8_t> gold = {0, 81, 93, 230, 52, 87, 197, 240, 196, 18, 160, 126, 255,
191, 199, 13, 102, 34, 87, 243, 89, 23, 77, 69, 60, 18,
93, 18, 67, 216, 131, 178, 175, 153, 212, 128, 25, 234, 172,
214, 215, 121, 0, 101, 163, 114, 213, 107, 8};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(qlinearconv_pad_0_test)
{
// https:xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__Conv.html
migraphx::program p = migraphx::parse_onnx("qlinearconv_pad_0_test.onnx");
p.compile(migraphx::make_target("ref"));
migraphx::shape sx{migraphx::shape::uint8_type, {1, 1, 5, 5}};
std::vector<uint8_t> x_data = {0, 11, 21, 32, 42, 53, 64, 74, 85, 96, 106, 117, 128,
138, 149, 159, 170, 181, 191, 202, 212, 223, 234, 244, 255};
migraphx::parameter_map pp;
pp["X"] = migraphx::argument(sx, x_data.data());
auto result = p.eval(pp).back();
std::vector<int8_t> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
// # (1, 1, 3, 3) output tensor
std::vector<int8_t> gold = {-43, -29, -15, 28, 42, 56, 99, 113, 127};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(qlinearconv_pad_1_test)
{
// https:xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__Conv.html
migraphx::program p = migraphx::parse_onnx("qlinearconv_pad_1_test.onnx");
p.compile(migraphx::make_target("ref"));
migraphx::shape sx{migraphx::shape::uint8_type, {1, 1, 5, 5}};
std::vector<uint8_t> x_data = {0, 11, 21, 32, 42, 53, 64, 74, 85, 96, 106, 117, 128,
138, 149, 159, 170, 181, 191, 202, 212, 223, 234, 244, 255};
migraphx::parameter_map pp;
pp["X"] = migraphx::argument(sx, x_data.data());
auto result = p.eval(pp).back();
std::vector<uint8_t> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
// # (1, 1, 5, 5) output tensor
std::vector<uint8_t> gold = {19, 33, 43, 52, 38, 52, 85, 99, 113, 80, 99, 156, 170,
184, 128, 146, 227, 241, 255, 175, 113, 175, 184, 194, 132};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(qlinearconv_scale_1D_test)
{
// https:xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__Conv.html
migraphx::program p = migraphx::parse_onnx("qlinearconv_scale_1D_test.onnx");
p.compile(migraphx::make_target("ref"));
migraphx::shape sx{migraphx::shape::uint8_type, {1, 1, 5, 5}};
std::vector<uint8_t> x_data = {0, 11, 21, 32, 42, 53, 64, 74, 85, 96, 106, 117, 128,
138, 149, 159, 170, 181, 191, 202, 212, 223, 234, 244, 255};
migraphx::parameter_map pp;
pp["X"] = migraphx::argument(sx, x_data.data());
auto result = p.eval(pp).back();
std::vector<int8_t> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
// # (1, 2, 3, 3) output tensor
std::vector<int8_t> gold = {
-43, -29, -15, 28, 42, 56, 99, 113, 127, -43, -29, -15, 28, 42, 56, 99, 113, 127};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(qlinearglobalavgpool_test)
{
// github.com/microsoft/onnxruntime/blob/main/docs/ContribOperators.md
// #com.microsoft.QLinearGlobalAveragePool
migraphx::program p = migraphx::parse_onnx("qlinearglobalavgpool_test.onnx");
p.compile(migraphx::make_target("ref"));
migraphx::shape sh_x{migraphx::shape::uint8_type, {1, 3, 4, 4}};
std::vector<uint8_t> data_x = {160, 156, 152, 148, 144, 140, 136, 132, 124, 120, 116, 112,
108, 104, 100, 96, 64, 72, 80, 88, 96, 104, 112, 120,
136, 144, 152, 160, 168, 176, 184, 192, 120, 121, 122, 123,
124, 125, 126, 127, 129, 130, 131, 132, 133, 134, 135, 136};
migraphx::parameter_map pp;
pp["X"] = migraphx::argument(sh_x, data_x.data());
auto result = p.eval(pp).back();
std::vector<uint8_t> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<uint8_t> gold = {64, 64, 64};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(qlinearmatmul_1D_test)
{
migraphx::program p = migraphx::parse_onnx("qlinearmatmul_1D_test.onnx");
p.compile(migraphx::make_target("ref"));
migraphx::shape a{migraphx::shape::uint8_type, {8}};
std::vector<uint8_t> data_a = {2, 4, 6, 8, 10, 12, 14, 16};
migraphx::shape b{migraphx::shape::uint8_type, {8}};
std::vector<uint8_t> data_b = {126, 130, 124, 132, 122, 134, 120, 136};
migraphx::parameter_map pp;
pp["A"] = migraphx::argument(a, data_a.data());
pp["B"] = migraphx::argument(b, data_b.data());
auto result = p.eval(pp).back();
std::vector<uint8_t> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<uint8_t> gold = {66};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(qlinearmatmul_2D_test)
{
migraphx::program p = migraphx::parse_onnx("qlinearmatmul_2D_test.onnx");
p.compile(migraphx::make_target("ref"));
migraphx::shape a{migraphx::shape::uint8_type, {1, 8}};
std::vector<uint8_t> data_a = {2, 4, 6, 8, 10, 12, 14, 16};
migraphx::shape b{migraphx::shape::uint8_type, {8, 1}};
std::vector<uint8_t> data_b = {126, 130, 124, 132, 122, 134, 120, 136};
migraphx::parameter_map pp;
pp["A"] = migraphx::argument(a, data_a.data());
pp["B"] = migraphx::argument(b, data_b.data());
auto result = p.eval(pp).back();
std::vector<uint8_t> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<uint8_t> gold = {66};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(qlinearmatmul_3D_test)
{
// https://xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__QLinearMatMul.html
migraphx::program p = migraphx::parse_onnx("qlinearmatmul_3D_test.onnx");
p.compile(migraphx::make_target("ref"));
migraphx::shape a{migraphx::shape::uint8_type, {2, 2, 4}};
std::vector<uint8_t> data_a = {
208, 236, 0, 238, 3, 214, 255, 29, 208, 236, 0, 238, 3, 214, 255, 29};
migraphx::shape b{migraphx::shape::uint8_type, {2, 4, 3}};
std::vector<uint8_t> data_b = {152, 51, 244, 60, 26, 255, 0, 127, 246, 127, 254, 247,
152, 51, 244, 60, 26, 255, 0, 127, 246, 127, 254, 247};
migraphx::parameter_map pp;
pp["A"] = migraphx::argument(a, data_a.data());
pp["B"] = migraphx::argument(b, data_b.data());
auto result = p.eval(pp).back();
std::vector<uint8_t> result_vector;
result.visit([&](auto output) { result_vector.assign(output.begin(), output.end()); });
std::vector<uint8_t> gold = {168, 115, 255, 1, 66, 151, 168, 115, 255, 1, 66, 151};
EXPECT(migraphx::verify::verify_rms_range(result_vector, gold));
}
TEST_CASE(resize_downsample_f_test) TEST_CASE(resize_downsample_f_test)
{ {
migraphx::program p = migraphx::parse_onnx("resize_downsample_f_test.onnx"); migraphx::program p = migraphx::parse_onnx("resize_downsample_f_test.onnx");
......
...@@ -62,4 +62,41 @@ TEST_CASE(broadcast_transpose_inner_broadcast) ...@@ -62,4 +62,41 @@ TEST_CASE(broadcast_transpose_inner_broadcast)
EXPECT(m1 == m2); EXPECT(m1 == m2);
} }
TEST_CASE(broadcast_transpose_inner_broadcast_generic)
{
// first optimizes broadcast+transpose to unsqueeze+transpose+broadcast,
// then finds inner broadcast to become mul+broadcast
migraphx::module m1;
{
auto l1 = m1.add_parameter("x", {migraphx::shape::float_type, {5, 10}});
auto l2 = m1.add_parameter("y", {migraphx::shape::float_type, {5}});
auto mb1 =
m1.add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {3, 5, 10}}}), l1);
auto mb2 =
m1.add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {3, 10, 5}}}), l2);
auto t1 =
m1.add_instruction(migraphx::make_op("transpose", {{"permutation", {0, 2, 1}}}), mb2);
auto mul = m1.add_instruction(migraphx::make_op("mul"), mb1, t1);
m1.add_return({mul});
}
run_pass(m1);
migraphx::module m2;
{
auto l1 = m2.add_parameter("x", {migraphx::shape::float_type, {5, 10}});
auto l2 = m2.add_parameter("y", {migraphx::shape::float_type, {5}});
auto unsqueeze = m2.add_instruction(migraphx::make_op("unsqueeze", {{"axes", {0, 1}}}), l2);
auto transpose = m2.add_instruction(
migraphx::make_op("transpose", {{"permutation", {0, 2, 1}}}), unsqueeze);
auto mb1 =
m2.add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {1, 5, 10}}}), l1);
auto mb2 = m2.add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 5, 10}}}), transpose);
auto mul = m2.add_instruction(migraphx::make_op("mul"), mb1, mb2);
auto mb3 = m2.add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {3, 5, 10}}}), mul);
m2.add_return({mb3});
}
EXPECT(m1 == m2);
}
int main(int argc, const char* argv[]) { test::run(argc, argv); } int main(int argc, const char* argv[]) { test::run(argc, argv); }
##################################################################################### #####################################################################################
# The MIT License (MIT) # The MIT License (MIT)
# #
# Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved. # Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy # Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal # of this software and associated documentation files (the "Software"), to deal
...@@ -23,8 +23,14 @@ ...@@ -23,8 +23,14 @@
##################################################################################### #####################################################################################
include(PythonModules) include(PythonModules)
set(VENV ${CMAKE_BINARY_DIR}/test/py/venv)
set(VENV_ONNX ${CMAKE_BINARY_DIR}/test/py/venv-onnx)
set(REQUIREMENTS ${CMAKE_CURRENT_SOURCE_DIR}/requirements.txt)
set(REQUIREMENTS_ONNX ${CMAKE_CURRENT_SOURCE_DIR}/requirements-onnx.txt)
set(PYTHON_VERSION_TO_DISABLE_ONNX 3.6)
function(add_py_test NAME SCRIPT)
function(add_py_venv_fixture FIXTURE_NAME VIRTUAL_ENV_DIR REQUIREMENTS_FILE)
foreach(PYTHON_VERSION ${PYTHON_VERSIONS}) foreach(PYTHON_VERSION ${PYTHON_VERSIONS})
set (ENV_COMMAND ${CMAKE_COMMAND} -E env set (ENV_COMMAND ${CMAKE_COMMAND} -E env
"PYTHONPATH=$<TARGET_FILE_DIR:migraphx_pybind_${PYTHON_VERSION}>" "PYTHONPATH=$<TARGET_FILE_DIR:migraphx_pybind_${PYTHON_VERSION}>"
...@@ -32,28 +38,57 @@ function(add_py_test NAME SCRIPT) ...@@ -32,28 +38,57 @@ function(add_py_test NAME SCRIPT)
"MALLOC_CHECK_=3" "MALLOC_CHECK_=3"
) )
set(PYTHON_EXECUTABLE ${PYTHON_${PYTHON_VERSION}_EXECUTABLE}) set(PYTHON_EXECUTABLE ${PYTHON_${PYTHON_VERSION}_EXECUTABLE})
add_test(
NAME test_py_${PYTHON_VERSION}_${NAME}
COMMAND ${ENV_COMMAND} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/${SCRIPT} ${ARGN})
add_custom_target(test_py_${PYTHON_VERSION}_${NAME}
COMMAND ${ENV_COMMAND} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/${SCRIPT} ${ARGN}
COMMENT "${PYTHON_EXECUTABLE} ${SCRIPT}")
if(NOT TEST py_${PYTHON_VERSION}_${FIXTURE_NAME}_initialize_env)
if (NOT (${FIXTURE_NAME} STREQUAL "onnx" AND ${PYTHON_VERSION} STREQUAL ${PYTHON_VERSION_TO_DISABLE_ONNX}))
add_test(NAME py_${PYTHON_VERSION}_${FIXTURE_NAME}_initialize_env COMMAND ${PYTHON_EXECUTABLE} -m venv ${VIRTUAL_ENV_DIR}/${PYTHON_VERSION} --clear)
set_tests_properties(py_${PYTHON_VERSION}_${FIXTURE_NAME}_initialize_env PROPERTIES FIXTURES_SETUP ${FIXTURE_NAME}_${PYTHON_VERSION}_INIT_VENV)
set(PYTHON_EXECUTABLE ${VIRTUAL_ENV_DIR}/${PYTHON_VERSION}/bin/python)
add_test(
NAME py_${PYTHON_VERSION}_${FIXTURE_NAME}_setup_env
COMMAND ${PYTHON_EXECUTABLE} -m pip install -r ${REQUIREMENTS_FILE})
set_tests_properties(py_${PYTHON_VERSION}_${FIXTURE_NAME}_setup_env PROPERTIES FIXTURES_REQUIRED ${FIXTURE_NAME}_${PYTHON_VERSION}_INIT_VENV)
set_tests_properties(py_${PYTHON_VERSION}_${FIXTURE_NAME}_setup_env PROPERTIES FIXTURES_SETUP ${FIXTURE_NAME}_${PYTHON_VERSION}_VENV)
endif()
endif()
endforeach()
endfunction()
function(add_py_test NAME SCRIPT FIXTURE_NAME VENV_DIR)
foreach(PYTHON_VERSION ${PYTHON_VERSIONS})
set (ENV_COMMAND ${CMAKE_COMMAND} -E env
"PYTHONPATH=$<TARGET_FILE_DIR:migraphx_pybind_${PYTHON_VERSION}>"
"PYTHONMALLOC=debug"
"MALLOC_CHECK_=3"
)
set(PYTHON_EXECUTABLE ${VENV_DIR}/${PYTHON_VERSION}/bin/python)
if(NOT (${FIXTURE_NAME} STREQUAL "onnx" AND ${PYTHON_VERSION} STREQUAL ${PYTHON_VERSION_TO_DISABLE_ONNX}))
add_test(
NAME test_py_${PYTHON_VERSION}_${NAME}
COMMAND ${ENV_COMMAND} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/${SCRIPT} ${ARGN})
set_tests_properties(test_py_${PYTHON_VERSION}_${NAME} PROPERTIES FIXTURES_REQUIRED ${FIXTURE_NAME}_${PYTHON_VERSION}_VENV)
add_custom_target(test_py_${PYTHON_VERSION}_${NAME}
COMMAND ${ENV_COMMAND} ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/${SCRIPT} ${ARGN}
COMMENT "${PYTHON_EXECUTABLE} ${SCRIPT}")
endif()
endforeach() endforeach()
endfunction() endfunction()
add_dependencies(tests migraphx_py) add_dependencies(tests migraphx_py)
add_dependencies(check migraphx_py) add_dependencies(check migraphx_py)
add_py_test(ref test_cpu.py WORKING_DIRECTORY ${TEST_ONNX_DIR}) add_py_venv_fixture(common ${VENV} ${REQUIREMENTS})
add_py_test(save_load test_save_load.py WORKING_DIRECTORY ${TEST_ONNX_DIR}) add_py_venv_fixture(onnx ${VENV_ONNX} ${REQUIREMENTS_ONNX})
add_py_test(op test_op.py WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(shape test_shape.py WORKING_DIRECTORY ${TEST_ONNX_DIR}) add_py_test(ref test_cpu.py common ${VENV} WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(module_construct test_module_construct.py WORKING_DIRECTORY ${TEST_ONNX_DIR}) add_py_test(save_load test_save_load.py common ${VENV} WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(literal test_literal.py WORKING_DIRECTORY ${TEST_ONNX_DIR}) add_py_test(op test_op.py common ${VENV} WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(shape test_shape.py common ${VENV} WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(module_construct test_module_construct.py common ${VENV} WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(literal test_literal.py common ${VENV} WORKING_DIRECTORY ${TEST_ONNX_DIR})
if(MIGRAPHX_ENABLE_GPU) if(MIGRAPHX_ENABLE_GPU)
add_py_test(gpu_offload test_gpu_offload.py WORKING_DIRECTORY ${TEST_ONNX_DIR}) add_py_test(gpu_offload test_gpu_offload.py common ${VENV} WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(gpu test_gpu.py WORKING_DIRECTORY ${TEST_ONNX_DIR}) add_py_test(gpu test_gpu.py common ${VENV} WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(array test_array.py WORKING_DIRECTORY ${TEST_ONNX_DIR}) add_py_test(array test_array.py common ${VENV} WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(backend onnx_backend_test.py WORKING_DIRECTORY ${TEST_ONNX_DIR}) add_py_test(backend onnx_backend_test.py onnx ${VENV_ONNX} WORKING_DIRECTORY ${TEST_ONNX_DIR})
add_py_test(gpu_async test_gpu_async.py WORKING_DIRECTORY ${TEST_ONNX_DIR}) add_py_test(gpu_async test_gpu_async.py common ${VENV} WORKING_DIRECTORY ${TEST_ONNX_DIR})
endif() endif()
#####################################################################################
# The MIT License (MIT)
#
# Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#####################################################################################
onnx==1.14.1
protobuf==3.20.2
numpy==1.21.6
packaging==23.0
pytest==6.0.1
\ No newline at end of file
#####################################################################################
# The MIT License (MIT)
#
# Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#####################################################################################
numpy==1.21.6
\ No newline at end of file
/* /*
* The MIT License (MIT) * The MIT License (MIT)
* *
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved. * Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy * Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal * of this software and associated documentation files (the "Software"), to deal
...@@ -669,6 +669,23 @@ TEST_CASE(simplify_inner_broadcast_different_broadcasts) ...@@ -669,6 +669,23 @@ TEST_CASE(simplify_inner_broadcast_different_broadcasts)
EXPECT(m1 == m2); EXPECT(m1 == m2);
} }
TEST_CASE(simplify_inner_broadcast_no_common_axis)
{
auto b = migraphx::make_op("multibroadcast", {{"out_lens", {1, 5, 10}}});
migraphx::module m1;
{
auto x = m1.add_parameter("x", {migraphx::shape::int32_type, {5, 10}});
auto y = m1.add_parameter("y", {migraphx::shape::int32_type, {1, 5, 1}});
auto xb = m1.add_instruction(b, x);
auto yb = m1.add_instruction(b, y);
auto sum = m1.add_instruction(migraphx::make_op("add"), xb, yb);
m1.add_instruction(pass_op{}, sum);
}
migraphx::module m2 = m1;
run_pass(m1);
EXPECT(m1 == m2);
}
TEST_CASE(simplify_add_conv1) TEST_CASE(simplify_add_conv1)
{ {
migraphx::module m; migraphx::module m;
......
...@@ -67,6 +67,57 @@ migraphx::module make_concat_multibroadcast(const std::vector<size_t>& in_lens, ...@@ -67,6 +67,57 @@ migraphx::module make_concat_multibroadcast(const std::vector<size_t>& in_lens,
return m; return m;
} }
TEST_CASE(broadcast_transpose)
{
migraphx::module m1;
{
auto l = m1.add_parameter("x", {migraphx::shape::float_type, {5}});
auto mb =
m1.add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {2, 3, 5}}}), l);
auto t1 =
m1.add_instruction(migraphx::make_op("transpose", {{"permutation", {2, 0, 1}}}), mb);
m1.add_return({t1});
}
run_pass(m1);
migraphx::module m2;
{
auto l = m2.add_parameter("x", {migraphx::shape::float_type, {5}});
auto u1 = m2.add_instruction(migraphx::make_op("unsqueeze", {{"axes", {0, 1}}}), l);
auto t1 =
m2.add_instruction(migraphx::make_op("transpose", {{"permutation", {2, 0, 1}}}), u1);
auto mb =
m2.add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {5, 2, 3}}}), t1);
m2.add_return({mb});
}
EXPECT(m1 == m2);
}
TEST_CASE(broadcast_transpose_opt)
{
// extra transpose from transformation will be optimized out
migraphx::module m1;
{
auto l = m1.add_parameter("x", {migraphx::shape::float_type, {5}});
auto mb =
m1.add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {2, 3, 5}}}), l);
auto t1 =
m1.add_instruction(migraphx::make_op("transpose", {{"permutation", {1, 0, 2}}}), mb);
m1.add_return({t1});
}
run_pass(m1);
migraphx::module m2;
{
auto l = m2.add_parameter("x", {migraphx::shape::float_type, {5}});
auto u1 = m2.add_instruction(migraphx::make_op("unsqueeze", {{"axes", {0, 1}}}), l);
auto mb =
m2.add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {3, 2, 5}}}), u1);
m2.add_return({mb});
}
EXPECT(m1 == m2);
}
TEST_CASE(broadcast_transpose_scalar) TEST_CASE(broadcast_transpose_scalar)
{ {
migraphx::module m1; migraphx::module m1;
......
/*
* The MIT License (MIT)
*
* Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "verify_program.hpp"
#include <migraphx/program.hpp>
#include <migraphx/generate.hpp>
#include <migraphx/make_op.hpp>
struct ck_gemm_softmax_gemm : verify_program<ck_gemm_softmax_gemm>
{
migraphx::program create_program() const
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape m1_shape{migraphx::shape::half_type, {1, 12, 256, 256}};
migraphx::shape m2_shape{migraphx::shape::half_type, {1, 12, 256, 256}};
auto m2_elements = m2_shape.elements();
auto a = mm->add_parameter("1", m1_shape);
auto b = mm->add_parameter("2", m1_shape);
auto b1 = mm->add_parameter("3", m1_shape);
std::vector<float> eights(m2_elements, 0.125);
auto eight = mm->add_literal(migraphx::literal{m2_shape, eights});
std::vector<float> zeros(m2_elements, 0);
auto zero = mm->add_literal(migraphx::literal{m2_shape, zeros});
b = mm->add_instruction(migraphx::make_op("transpose", {{"permutation", {0, 1, 3, 2}}}), b);
auto gemm1 = mm->add_instruction(migraphx::make_op("dot"), a, b);
auto scale = mm->add_instruction(migraphx::make_op("mul"), gemm1, eight);
auto bias = mm->add_instruction(migraphx::make_op("add"), scale, zero);
auto softmax = mm->add_instruction(migraphx::make_op("softmax", {{"axis", -1}}), bias);
mm->add_instruction(migraphx::make_op("dot"), softmax, b1);
return p;
}
};
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
##################################################################################### #####################################################################################
# The MIT License (MIT) # The MIT License (MIT)
# #
# Copyright (c) 2015-2022 Advanced Micro Devices, Inc. All rights reserved. # Copyright (c) 2015-2023 Advanced Micro Devices, Inc. All rights reserved.
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy # Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal # of this software and associated documentation files (the "Software"), to deal
...@@ -51,6 +51,7 @@ else ...@@ -51,6 +51,7 @@ else
openmp-extras \ openmp-extras \
python3-dev \ python3-dev \
python3-pip \ python3-pip \
python3-venv \
rocblas-dev \ rocblas-dev \
rocm-cmake rocm-cmake
fi fi
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment