Commit 8d32c6b8 authored by Paul's avatar Paul
Browse files

Merge branch 'develop' into blas_tuning

parents 23cb7917 f25606f9
constant-of-shape:
!const_of_shape_no_value_attr_test:
6shape"Constant*#
value**B shape_tensor 
value*:B shape_tensor

shapey"ConstantOfShapeconstant_of_shapeb
shapey"ConstantOfShape!const_of_shape_no_value_attr_testb
y



B
B
\ No newline at end of file
constant_no_attributes_test:)
"Constantconstant_no_attributes_testB
\ No newline at end of file
constant_value_int_test:7
"Constant*
value_int@ constant_value_int_testB
\ No newline at end of file
constant_value_ints_test:=
!"Constant*
value_ints@@@ constant_value_ints_testB
\ No newline at end of file
......@@ -270,23 +270,26 @@ def averagepool_dyn_test():
node = onnx.helper.make_node('AveragePool',
inputs=['0'],
outputs=['1'],
kernel_shape=[3, 3, 3])
kernel_shape=[3, 3, 3],
strides=[2, 2, 2],
pads=[1, 1, 1, 1, 1, 1])
return ([node], [x], [out])
@onnx_test()
def averagepool_dyn_autopad_error_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [None, 1, 5, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [None, 1, 5, 5])
def averagepool_dyn_autopad_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT,
[None, 3, 5, 5, 5])
out = helper.make_tensor_value_info('1', TensorProto.FLOAT,
[None, 3, 3, 3, 3])
node = onnx.helper.make_node('AveragePool',
inputs=['x'],
outputs=['y'],
kernel_shape=[2, 2],
auto_pad='SAME_LOWER')
return ([node], [x], [y])
inputs=['0'],
outputs=['1'],
kernel_shape=[3, 3, 3],
strides=[2, 2, 2],
auto_pad='SAME_UPPER')
return ([node], [x], [out])
@onnx_test()
......@@ -579,6 +582,29 @@ def cast_test():
return ([node], [x], [y])
@onnx_test()
def castlike_test():
input = helper.make_tensor_value_info('0', TensorProto.FLOAT16, [10])
target_type = helper.make_tensor_value_info('1', TensorProto.FLOAT, [10])
output = helper.make_tensor_value_info('out', TensorProto.FLOAT, [10])
node = onnx.helper.make_node('CastLike',
inputs=['0', '1'],
outputs=['out'])
return ([node], [input, target_type], [output])
@onnx_test()
def castlike_error_test():
input = helper.make_tensor_value_info('0', TensorProto.FLOAT16, [10])
output = helper.make_tensor_value_info('out', TensorProto.FLOAT, [10])
node = onnx.helper.make_node('CastLike', inputs=['0'], outputs=['out'])
return ([node], [input], [output])
@onnx_test()
def ceil_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10])
......@@ -822,6 +848,76 @@ def constant_test():
return ([node], [], [y])
@onnx_test()
def constant_value_float_test():
node = onnx.helper.make_node('Constant',
inputs=[],
outputs=[],
value_float=[1.0])
return ([node], [], [])
@onnx_test()
def constant_value_floats_test():
node = onnx.helper.make_node('Constant',
inputs=[],
outputs=[],
value_floats=[1.0, 2.0, 3.0])
return ([node], [], [])
@onnx_test()
def constant_value_int_test():
node = onnx.helper.make_node('Constant',
inputs=[],
outputs=[],
value_int=[1])
return ([node], [], [])
@onnx_test()
def constant_value_ints_test():
node = onnx.helper.make_node('Constant',
inputs=[],
outputs=[],
value_ints=[1, 2, 3])
return ([node], [], [])
@onnx_test()
def constant_no_attributes_test():
node = onnx.helper.make_node('Constant', inputs=[], outputs=[])
return ([node], [], [])
@onnx_test()
def constant_multiple_attributes_test():
x = np.array([0, 1, 2])
node = onnx.helper.make_node('Constant',
inputs=[],
outputs=[],
value_floats=[1.0, 2.0],
value_ints=[1, 2],
value=onnx.helper.make_tensor(
name='const_tensor',
data_type=TensorProto.FLOAT,
dims=x.shape,
vals=x.flatten().astype(float)))
return ([node], [], [])
@onnx_test()
def constant_fill_test():
value = helper.make_tensor_value_info('value', TensorProto.FLOAT, [2, 3])
......@@ -934,9 +1030,9 @@ def const_of_shape_empty_input_test():
[10])
empty_val = np.array([]).astype(np.int64)
empty_ts = helper.make_tensor(name='empty_tensor',
data_type=TensorProto.INT32,
data_type=TensorProto.INT64,
dims=empty_val.shape,
vals=empty_val.flatten().astype(int))
vals=empty_val.flatten().astype(np.int64))
shape_const = helper.make_node(
'Constant',
inputs=[],
......@@ -962,9 +1058,9 @@ def const_of_shape_float_test():
shape_val = np.array([2, 3, 4]).astype(np.int64)
shape_ts = helper.make_tensor(name='shape_tensor',
data_type=TensorProto.INT32,
data_type=TensorProto.INT64,
dims=shape_val.shape,
vals=shape_val.flatten().astype(int))
vals=shape_val.flatten().astype(np.int64))
shape_const = helper.make_node(
'Constant',
......@@ -982,22 +1078,44 @@ def const_of_shape_float_test():
return ([shape_const, node], [], [y])
@onnx_test()
def const_of_shape_default_test():
shape_val = np.array([2, 3, 4]).astype(np.int64)
shape_ts = helper.make_tensor(name='shape_tensor',
data_type=TensorProto.INT64,
dims=shape_val.shape,
vals=shape_val.flatten().astype(np.int64))
shape_const = helper.make_node(
'Constant',
inputs=[],
outputs=['shape'],
value=shape_ts,
)
y = helper.make_tensor_value_info('y', TensorProto.INT64, [2, 3, 4])
node = onnx.helper.make_node('ConstantOfShape',
inputs=['shape'],
outputs=['y'])
return ([shape_const, node], [], [y])
@onnx_test()
def const_of_shape_int64_test():
tensor_val = onnx.helper.make_tensor('value', onnx.TensorProto.INT64, [1],
[10])
shape_val = np.array([2, 3, 4]).astype(np.int64)
shape_ts = helper.make_tensor(name='shape_tensor',
data_type=TensorProto.INT32,
data_type=TensorProto.INT64,
dims=shape_val.shape,
vals=shape_val.flatten().astype(int))
vals=shape_val.flatten().astype(np.int64))
shape_const = helper.make_node(
'Constant',
inputs=[],
outputs=['shape'],
value=shape_ts,
)
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3, 4])
y = helper.make_tensor_value_info('y', TensorProto.INT64, [2, 3, 4])
node = onnx.helper.make_node('ConstantOfShape',
inputs=['shape'],
......@@ -1011,9 +1129,9 @@ def const_of_shape_int64_test():
def const_of_shape_no_value_attr_test():
shape_val = np.array([2, 3, 4]).astype(np.int64)
shape_ts = helper.make_tensor(name='shape_tensor',
data_type=TensorProto.INT32,
data_type=TensorProto.INT64,
dims=shape_val.shape,
vals=shape_val.flatten().astype(int))
vals=shape_val.flatten().astype(np.int64))
shape_const = helper.make_node(
'Constant',
inputs=[],
......@@ -1031,6 +1149,40 @@ def const_of_shape_no_value_attr_test():
return ([shape_const, node], [], [y])
@onnx_test()
def const_of_shape_dyn_float_test():
tensor_val = onnx.helper.make_tensor('value', onnx.TensorProto.FLOAT, [1],
[10])
output_dims = helper.make_tensor_value_info('output_dims',
TensorProto.INT64, [3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3, 4])
node = onnx.helper.make_node('ConstantOfShape',
inputs=['output_dims'],
outputs=['y'],
value=tensor_val)
return ([node], [output_dims], [y])
@onnx_test()
def const_of_shape_dyn_int64_test():
tensor_val = onnx.helper.make_tensor('value', onnx.TensorProto.INT64, [1],
[10])
output_dims = helper.make_tensor_value_info('output_dims',
TensorProto.INT64, [3])
y = helper.make_tensor_value_info('y', TensorProto.INT64, [2, 3, 4])
node = onnx.helper.make_node('ConstantOfShape',
inputs=['output_dims'],
outputs=['y'],
value=tensor_val)
return ([node], [output_dims], [y])
@onnx_test()
def conv_1d_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 5])
......@@ -3456,7 +3608,6 @@ def instance_norm_dyn_batch_test():
outputs=['3'])
return ([node], [x, scale, bias], [y])
return ([node], [x, scale, bias], [y])
@onnx_test()
......@@ -4945,6 +5096,278 @@ def prelu_brcst_test():
return ([node], [arg0, arg1], [arg_out])
@onnx_test()
def qlinearadd_test():
a = helper.make_tensor_value_info('A', TensorProto.UINT8, [64])
sc_a = helper.make_tensor('A_scale', TensorProto.FLOAT, [], [0.05])
zero_pt_a = helper.make_tensor('A_zero_point', TensorProto.UINT8, [], [0])
b = helper.make_tensor_value_info('B', TensorProto.UINT8, [64])
sc_b = helper.make_tensor('B_scale', TensorProto.FLOAT, [], [0.05])
zero_pt_b = helper.make_tensor('B_zero_point', TensorProto.UINT8, [],
[128])
sc_c = helper.make_tensor('C_scale', TensorProto.FLOAT, [], [0.05])
zero_pt_c = helper.make_tensor('C_zero_point', TensorProto.UINT8, [], [64])
c = helper.make_tensor_value_info('C', TensorProto.UINT8, [64])
node = onnx.helper.make_node(
'QLinearAdd',
inputs=[
'A', 'A_scale', 'A_zero_point', 'B', 'B_scale', 'B_zero_point',
'C_scale', 'C_zero_point'
],
outputs=['C'],
)
return ([node], [a, b], [c],
[sc_a, zero_pt_a, sc_b, zero_pt_b, sc_c, zero_pt_c])
@onnx_test()
def qlinearadd_bcast_test():
a = helper.make_tensor_value_info('A', TensorProto.INT8, [64])
sc_a = helper.make_tensor('A_scale', TensorProto.FLOAT, [], [0.05])
zero_pt_a = helper.make_tensor('A_zero_point', TensorProto.INT8, [], [0])
b = helper.make_tensor_value_info('B', TensorProto.INT8, [1, 1, 64])
sc_b = helper.make_tensor('B_scale', TensorProto.FLOAT, [], [0.05])
zero_pt_b = helper.make_tensor('B_zero_point', TensorProto.INT8, [], [32])
sc_c = helper.make_tensor('C_scale', TensorProto.FLOAT, [], [0.05])
zero_pt_c = helper.make_tensor('C_zero_point', TensorProto.INT8, [], [-64])
c = helper.make_tensor_value_info('C', TensorProto.INT8, [1, 1, 64])
node = onnx.helper.make_node(
'QLinearAdd',
inputs=[
'A', 'A_scale', 'A_zero_point', 'B', 'B_scale', 'B_zero_point',
'C_scale', 'C_zero_point'
],
outputs=['C'],
)
return ([node], [a, b], [c],
[sc_a, zero_pt_a, sc_b, zero_pt_b, sc_c, zero_pt_c])
@onnx_test()
def qlinearconv_test():
# https://xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__QLinearConv.html
x = helper.make_tensor_value_info('X', TensorProto.UINT8, [1, 1, 7, 7])
sc_x = helper.make_tensor('1', TensorProto.FLOAT, [], [0.00369204697])
zero_pt_x = helper.make_tensor('2', TensorProto.UINT8, [], [132])
wt = helper.make_tensor('3', TensorProto.UINT8, [1, 1, 1, 1], [0])
sc_wt = helper.make_tensor('4', TensorProto.FLOAT, [], [0.00172794575])
zero_pt_wt = helper.make_tensor('5', TensorProto.UINT8, [], [255])
sc_y = helper.make_tensor('6', TensorProto.FLOAT, [], [0.00162681262])
zero_pt_y = helper.make_tensor('7', TensorProto.UINT8, [], [123])
out = helper.make_tensor_value_info('out', TensorProto.UINT8, [1, 1, 7, 7])
node = onnx.helper.make_node(
'QLinearConv',
inputs=['X', '1', '2', '3', '4', '5', '6', '7'],
outputs=['out'],
)
return ([node], [x], [out],
[sc_x, zero_pt_x, wt, sc_wt, zero_pt_wt, sc_y, zero_pt_y])
@onnx_test()
def qlinearconv_pad_1_test():
# https://xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__Conv.html
x = helper.make_tensor_value_info('X', TensorProto.UINT8, [1, 1, 5, 5])
sc_x = helper.make_tensor('1', TensorProto.FLOAT, [],
[0.09411764705882353])
zero_pt_x = helper.make_tensor('2', TensorProto.UINT8, [], [0])
wt = helper.make_tensor('3', TensorProto.UINT8, [1, 1, 3, 3],
[1, 1, 1, 1, 1, 1, 1, 1, 1])
sc_wt = helper.make_tensor('4', TensorProto.FLOAT, [], [1.0])
zero_pt_wt = helper.make_tensor('5', TensorProto.UINT8, [], [0])
sc_y = helper.make_tensor('6', TensorProto.FLOAT, [], [0.6352941176470588])
zero_pt_y = helper.make_tensor('7', TensorProto.UINT8, [], [0])
out = helper.make_tensor_value_info('out', TensorProto.UINT8, [1, 1, 5, 5])
node = onnx.helper.make_node(
'QLinearConv',
inputs=['X', '1', '2', '3', '4', '5', '6', '7'],
outputs=['out'],
pads=[1, 1, 1, 1],
)
return ([node], [x], [out],
[sc_x, zero_pt_x, wt, sc_wt, zero_pt_wt, sc_y, zero_pt_y])
@onnx_test()
def qlinearconv_pad_0_test():
# https://xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__Conv.html
x = helper.make_tensor_value_info('X', TensorProto.UINT8, [1, 1, 5, 5])
sc_x = helper.make_tensor('1', TensorProto.FLOAT, [],
[0.09411764705882353])
zero_pt_x = helper.make_tensor('2', TensorProto.UINT8, [], [0])
wt = helper.make_tensor('3', TensorProto.UINT8, [1, 1, 3, 3],
[1, 1, 1, 1, 1, 1, 1, 1, 1])
sc_wt = helper.make_tensor('4', TensorProto.FLOAT, [], [1.0])
zero_pt_wt = helper.make_tensor('5', TensorProto.UINT8, [], [0])
sc_y = helper.make_tensor('6', TensorProto.FLOAT, [], [0.6352941176470588])
zero_pt_y = helper.make_tensor('7', TensorProto.INT8, [], [-128])
out = helper.make_tensor_value_info('out', TensorProto.INT8, [1, 1, 3, 3])
node = onnx.helper.make_node(
'QLinearConv',
inputs=['X', '1', '2', '3', '4', '5', '6', '7'],
outputs=['out'],
pads=[0, 0, 0, 0],
)
return ([node], [x], [out],
[sc_x, zero_pt_x, wt, sc_wt, zero_pt_wt, sc_y, zero_pt_y])
@onnx_test()
def qlinearconv_scale_1D_test():
# https://xadupre.github.io/draft/onnx/onnx_doc_folder/onnx__Conv.html
x = helper.make_tensor_value_info('X', TensorProto.UINT8, [1, 1, 5, 5])
sc_x = helper.make_tensor('1', TensorProto.FLOAT, [],
[0.09411764705882353])
zero_pt_x = helper.make_tensor('2', TensorProto.UINT8, [], [0])
wt = helper.make_tensor(
'3', TensorProto.UINT8, [2, 1, 3, 3],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2])
sc_wt = helper.make_tensor('4', TensorProto.FLOAT, [2], [1.0, 0.5])
zero_pt_wt = helper.make_tensor('5', TensorProto.UINT8, [2], [0, 0])
sc_y = helper.make_tensor('6', TensorProto.FLOAT, [], [0.6352941176470588])
zero_pt_y = helper.make_tensor('7', TensorProto.INT8, [], [-128])
out = helper.make_tensor_value_info('out', TensorProto.INT8, [1, 2, 3, 3])
node = onnx.helper.make_node(
'QLinearConv',
inputs=['X', '1', '2', '3', '4', '5', '6', '7'],
outputs=['out'],
pads=[0, 0, 0, 0],
)
return ([node], [x], [out],
[sc_x, zero_pt_x, wt, sc_wt, zero_pt_wt, sc_y, zero_pt_y])
@onnx_test()
def qlinearglobalavgpool_test():
x = helper.make_tensor_value_info('X', TensorProto.UINT8, [1, 3, 4, 4])
sc_x = helper.make_tensor('X_scale', TensorProto.FLOAT, [], [0.05])
z_pt_x = helper.make_tensor('X_zero_point', TensorProto.UINT8, [], [128])
y = helper.make_tensor_value_info('Y', TensorProto.UINT8, [1, 3, 1, 1])
sc_y = helper.make_tensor('Y_scale', TensorProto.FLOAT, [], [0.025])
z_pt_y = helper.make_tensor('Y_zero_point', TensorProto.UINT8, [], [64])
n = onnx.helper.make_node(
'QLinearGlobalAveragePool',
inputs=['X', 'X_scale', 'X_zero_point', 'Y_scale', 'Y_zero_point'],
outputs=['Y'],
channels_last=0,
)
return ([n], [x], [y], [sc_x, z_pt_x, sc_y, z_pt_y])
def qlinearmatmul_1D_test():
a = helper.make_tensor_value_info('A', TensorProto.UINT8, [8])
sc_a = helper.make_tensor('A_scale', TensorProto.FLOAT, [], [0.05])
zero_pt_a = helper.make_tensor('A_zero_point', TensorProto.UINT8, [], [0])
b = helper.make_tensor_value_info('B', TensorProto.UINT8, [8])
sc_b = helper.make_tensor('B_scale', TensorProto.FLOAT, [], [0.05])
zero_pt_b = helper.make_tensor('B_zero_point', TensorProto.UINT8, [],
[128])
sc_c = helper.make_tensor('C_scale', TensorProto.FLOAT, [], [0.05])
zero_pt_c = helper.make_tensor('C_zero_point', TensorProto.UINT8, [], [64])
c = helper.make_tensor_value_info('C', TensorProto.UINT8, [1])
node = onnx.helper.make_node(
'QLinearMatMul',
inputs=[
'A', 'A_scale', 'A_zero_point', 'B', 'B_scale', 'B_zero_point',
'C_scale', 'C_zero_point'
],
outputs=['C'],
)
return ([node], [a, b], [c],
[sc_a, zero_pt_a, sc_b, zero_pt_b, sc_c, zero_pt_c])
@onnx_test()
def qlinearmatmul_2D_test():
a = helper.make_tensor_value_info('A', TensorProto.UINT8, [1, 8])
sc_a = helper.make_tensor('A_scale', TensorProto.FLOAT, [], [0.05])
zero_pt_a = helper.make_tensor('A_zero_point', TensorProto.UINT8, [], [0])
b = helper.make_tensor_value_info('B', TensorProto.UINT8, [8, 1])
sc_b = helper.make_tensor('B_scale', TensorProto.FLOAT, [], [0.05])
zero_pt_b = helper.make_tensor('B_zero_point', TensorProto.UINT8, [],
[128])
sc_c = helper.make_tensor('C_scale', TensorProto.FLOAT, [], [0.05])
zero_pt_c = helper.make_tensor('C_zero_point', TensorProto.UINT8, [], [64])
c = helper.make_tensor_value_info('C', TensorProto.UINT8, [1, 1])
node = onnx.helper.make_node(
'QLinearMatMul',
inputs=[
'A', 'A_scale', 'A_zero_point', 'B', 'B_scale', 'B_zero_point',
'C_scale', 'C_zero_point'
],
outputs=['C'],
)
return ([node], [a, b], [c],
[sc_a, zero_pt_a, sc_b, zero_pt_b, sc_c, zero_pt_c])
@onnx_test()
def qlinearmatmul_3D_test():
a = helper.make_tensor_value_info('A', TensorProto.UINT8, [2, 2, 4])
sc_a = helper.make_tensor('A_scale', TensorProto.FLOAT, [], [0.0066])
zero_pt_a = helper.make_tensor('A_zero_point', TensorProto.UINT8, [],
[113])
b = helper.make_tensor_value_info('B', TensorProto.UINT8, [2, 4, 3])
sc_b = helper.make_tensor('B_scale', TensorProto.FLOAT, [], [0.00705])
zero_pt_b = helper.make_tensor('B_zero_point', TensorProto.UINT8, [],
[114])
sc_c = helper.make_tensor('C_scale', TensorProto.FLOAT, [], [0.0107])
zero_pt_c = helper.make_tensor('C_zero_point', TensorProto.UINT8, [],
[118])
c = helper.make_tensor_value_info('C', TensorProto.UINT8, [2, 2, 3])
node = onnx.helper.make_node(
'QLinearMatMul',
inputs=[
'A', 'A_scale', 'A_zero_point', 'B', 'B_scale', 'B_zero_point',
'C_scale', 'C_zero_point'
],
outputs=['C'],
)
return ([node], [a, b], [c],
[sc_a, zero_pt_a, sc_b, zero_pt_b, sc_c, zero_pt_c])
@onnx_test()
def quantizelinear_test():
arg0 = helper.make_tensor_value_info('0', TensorProto.FLOAT, [5])
......@@ -5642,6 +6065,24 @@ def reshape_non_standard_test():
return ([trans, res], [x], [y])
@onnx_test()
def reshape_variable_input_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [4, 2, 3])
x_shape = helper.make_tensor_value_info('1', TensorProto.INT64, [2])
y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3, 8])
node = onnx.helper.make_node('Reshape', inputs=['0', '1'], outputs=['2'])
return ([node], [x, x_shape], [y])
@onnx_test()
def reshape_variable_input_dyn_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [None, 2, 3])
x_shape = helper.make_tensor_value_info('1', TensorProto.INT64, [2])
y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [None, 6])
node = onnx.helper.make_node('Reshape', inputs=['0', '1'], outputs=['2'])
return ([node], [x, x_shape], [y])
@onnx_test()
def resize_downsample_f_test():
scales = np.array([1.0, 1.0, 0.6, 0.6], dtype=np.float32)
......@@ -6414,6 +6855,30 @@ def slice_test():
return ([node], [x], [y])
@onnx_test()
def slice_constant_test():
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 2])
x_tensor = helper.make_tensor(name='x_tensor',
data_type=TensorProto.FLOAT,
dims=[3, 2],
vals=[0, 1, 2, 3, 4, 5])
x = onnx.helper.make_node('Constant',
inputs=[],
outputs=['x'],
value=x_tensor)
node = onnx.helper.make_node('Slice',
inputs=['x'],
axes=[0, 1],
starts=[1, 0],
ends=[2, 2],
outputs=['1'])
return ([x, node], [], [y])
@onnx_test()
def slice_dyn_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [None, None, 2])
......@@ -6746,6 +7211,92 @@ def slice_max_end_test():
return ([node], [x], [y])
@onnx_test()
def slice_var_input_static0():
data = helper.make_tensor_value_info('data', TensorProto.FLOAT, [3, 2])
starts = helper.make_tensor_value_info('starts', TensorProto.INT32, [2])
ends = helper.make_tensor_value_info('ends', TensorProto.INT32, [2])
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 2])
node = onnx.helper.make_node('Slice',
inputs=['data', 'starts', 'ends'],
axes=[0, 1],
outputs=['output'])
return ([node], [data, starts, ends], [output])
@onnx_test()
def slice_var_input_static1():
data = helper.make_tensor_value_info('data', TensorProto.FLOAT, [3, 2])
starts = helper.make_tensor_value_info('starts', TensorProto.INT64, [2])
ends = helper.make_tensor_value_info('ends', TensorProto.INT64, [2])
axes = helper.make_tensor_value_info('axes', TensorProto.INT64, [2])
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 2])
node = onnx.helper.make_node('Slice',
inputs=['data', 'starts', 'ends', 'axes'],
outputs=['output'])
return ([node], [data, starts, ends, axes], [output])
@onnx_test()
def slice_var_input_dyn0():
data = helper.make_tensor_value_info('data', TensorProto.FLOAT, [None, 2])
starts = helper.make_tensor_value_info('starts', TensorProto.INT32, [2])
ends = helper.make_tensor_value_info('ends', TensorProto.INT32, [2])
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 2])
node = onnx.helper.make_node('Slice',
inputs=['data', 'starts', 'ends'],
axes=[0, 1],
outputs=['output'])
return ([node], [data, starts, ends], [output])
@onnx_test()
def slice_var_input_dyn1():
data = helper.make_tensor_value_info('data', TensorProto.FLOAT, [None, 2])
starts = helper.make_tensor_value_info('starts', TensorProto.INT32, [2])
ends = helper.make_tensor_value_info('ends', TensorProto.INT32, [2])
axes = helper.make_tensor_value_info('axes', TensorProto.INT32, [2])
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 2])
node = onnx.helper.make_node('Slice',
inputs=['data', 'starts', 'ends', 'axes'],
outputs=['output'])
return ([node], [data, starts, ends, axes], [output])
@onnx_test()
def slice_var_input_steps_error():
step = np.array([2, 1])
step_tensor = helper.make_tensor(name="step",
data_type=TensorProto.INT32,
dims=step.shape,
vals=step.astype(int))
arg_step = helper.make_node("Constant",
inputs=[],
outputs=['arg_step'],
value=step_tensor)
data = helper.make_tensor_value_info('data', TensorProto.FLOAT, [3, 2])
starts = helper.make_tensor_value_info('starts', TensorProto.FLOAT, [2])
ends = helper.make_tensor_value_info('ends', TensorProto.FLOAT, [2])
axes = helper.make_tensor_value_info('axes', TensorProto.FLOAT, [2])
output = helper.make_tensor_value_info('output', TensorProto.FLOAT, [1, 2])
node = onnx.helper.make_node(
'Slice',
inputs=['data', 'starts', 'ends', 'axes', 'arg_step'],
outputs=['output'])
return ([arg_step, node], [data, starts, ends, axes], [output])
@onnx_test()
def softmax_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3])
......
......@@ -24,7 +24,7 @@
#include <iostream>
#include <vector>
#include <migraphx/literal.hpp>
#include <migraphx/operators.hpp>
#include <migraphx/op/common.hpp>
#include <migraphx/program.hpp>
#include <migraphx/instruction.hpp>
#include <migraphx/pass_manager.hpp>
......
......@@ -292,16 +292,21 @@ TEST_CASE(averagepool_3d_test)
TEST_CASE(averagepool_dyn_test)
{
// Pooling with dynamic input and no auto padding
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"0", {migraphx::shape::float_type, {{1, 4}, {3, 3}, {5, 5}, {5, 5}, {5, 5}}});
auto ret = mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::average},
{"padding", {0, 0, 0, 0, 0, 0}},
{"stride", {1, 1, 1}},
{"lengths", {3, 3, 3}}}),
l0);
auto ret =
mm->add_instruction(migraphx::make_op("pooling",
{
{"mode", migraphx::op::pooling_mode::average},
{"stride", {2, 2, 2}},
{"lengths", {3, 3, 3}},
{"padding", {1, 1, 1, 1, 1, 1}},
{"padding_mode", 0},
}),
l0);
mm->add_return({ret});
migraphx::onnx_options options;
......@@ -310,12 +315,29 @@ TEST_CASE(averagepool_dyn_test)
EXPECT(p == prog);
}
TEST_CASE(averagepool_dyn_autopad_error_test)
TEST_CASE(averagepool_dyn_autopad_test)
{
// Pooling with dynamic input and auto padding. Default padding values will be overridden.
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_parameter(
"0", {migraphx::shape::float_type, {{1, 4}, {3, 3}, {5, 5}, {5, 5}, {5, 5}}});
auto ret = mm->add_instruction(
migraphx::make_op("pooling",
{
{"mode", migraphx::op::pooling_mode::average},
{"stride", {2, 2, 2}},
{"lengths", {3, 3, 3}},
{"padding", {0, 0, 0, 0, 0, 0}},
{"padding_mode", migraphx::op::padding_mode_t::same_upper},
}),
l0);
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4};
EXPECT(test::throws(
[&] { migraphx::parse_onnx("averagepool_dyn_autopad_error_test.onnx", options); }));
auto prog = migraphx::parse_onnx("averagepool_dyn_autopad_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(averagepool_dyn_asym_padding_error_test)
......@@ -340,10 +362,10 @@ TEST_CASE(averagepool_notset_test)
auto* mm = p.get_main_module();
auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 5}});
auto ins = mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::average},
{"padding", {2, 2, 2, 2}},
{"stride", {2, 2}},
{"lengths", {6, 6}}}),
{{"mode", migraphx::op::pooling_mode::average},
{"padding", {2, 2, 2, 2}},
{"stride", {2, 2}},
{"lengths", {6, 6}}}),
input);
auto ret = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {2, 3}}, {"starts", {1, 1}}, {"ends", {2, 2}}}), ins);
......@@ -360,11 +382,11 @@ TEST_CASE(averagepool_nt_cip_test)
auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 5}});
std::vector<int64_t> pads = {0, 0, 0, 0, 0, 0, 1, 1};
auto ins_pad = mm->add_instruction(migraphx::make_op("pad", {{"pads", pads}}), input);
auto ret = mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::average},
{"padding", {0, 0, 0, 0}},
{"stride", {2, 2}},
{"lengths", {6, 6}}}),
auto ret = mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::average},
{"padding", {0, 0, 0, 0}},
{"stride", {2, 2}},
{"lengths", {6, 6}}}),
ins_pad);
mm->add_return({ret});
......@@ -374,16 +396,22 @@ TEST_CASE(averagepool_nt_cip_test)
TEST_CASE(averagepool_same_lower_test)
{
// auto_pad mode of SAME_LOWER with a static input shape is handled in parsing and
// padding_mode is set to default_ when the operation is created
migraphx::program p;
auto* mm = p.get_main_module();
auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 5}});
auto ins = mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::average},
{"padding", {1, 1, 1, 1}},
{"stride", {1, 1}},
{"lengths", {2, 2}}}),
input);
auto ret = mm->add_instruction(
auto ins = mm->add_instruction(
migraphx::make_op("pooling",
{
{"mode", migraphx::op::pooling_mode::average},
{"padding", {1, 1, 1, 1}},
{"stride", {1, 1}},
{"lengths", {2, 2}},
{"padding_mode", migraphx::op::padding_mode_t::default_},
}),
input);
auto ret = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {2, 3}}, {"starts", {0, 0}}, {"ends", {5, 5}}}), ins);
mm->add_return({ret});
auto prog = migraphx::parse_onnx("averagepool_same_lower_test.onnx");
......@@ -398,11 +426,11 @@ TEST_CASE(averagepool_sl_cip_test)
auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 5}});
std::vector<int64_t> pads = {0, 0, 1, 1, 0, 0, 0, 0};
auto ins_pad = mm->add_instruction(migraphx::make_op("pad", {{"pads", pads}}), input);
auto ret = mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::average},
{"padding", {0, 0, 0, 0}},
{"stride", {1, 1}},
{"lengths", {2, 2}}}),
auto ret = mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::average},
{"padding", {0, 0, 0, 0}},
{"stride", {1, 1}},
{"lengths", {2, 2}}}),
ins_pad);
mm->add_return({ret});
auto prog = migraphx::parse_onnx("averagepool_sl_cip_test.onnx");
......@@ -416,10 +444,10 @@ TEST_CASE(averagepool_same_upper_test)
auto* mm = p.get_main_module();
auto input = mm->add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 5}});
auto ins = mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::average},
{"padding", {1, 1, 1, 1}},
{"stride", {1, 1}},
{"lengths", {2, 2}}}),
{{"mode", migraphx::op::pooling_mode::average},
{"padding", {1, 1, 1, 1}},
{"stride", {1, 1}},
{"lengths", {2, 2}}}),
input);
auto ret = mm->add_instruction(
migraphx::make_op("slice", {{"axes", {2, 3}}, {"starts", {1, 1}}, {"ends", {6, 6}}}), ins);
......@@ -659,6 +687,26 @@ TEST_CASE(cast_test)
EXPECT(p == prog);
}
TEST_CASE(castlike_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l = mm->add_parameter("0", migraphx::shape{migraphx::shape::half_type, {10}});
mm->add_parameter("1", migraphx::shape{migraphx::shape::float_type, {10}});
mm->add_instruction(
migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::float_type)}}),
l);
auto prog = optimize_onnx("castlike_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(castlike_error_test)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("castlike_error_test.onnx"); }));
}
TEST_CASE(ceil_test)
{
migraphx::program p;
......@@ -902,6 +950,58 @@ TEST_CASE(constant_test)
EXPECT(p == prog);
}
TEST_CASE(constant_value_float_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
mm->add_literal(migraphx::literal{migraphx::shape{migraphx::shape::float_type, {1}}, {1.0f}});
auto prog = optimize_onnx("constant_value_float_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(constant_value_floats_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
mm->add_literal(
migraphx::literal{migraphx::shape{migraphx::shape::float_type, {3}}, {1.0f, 2.0f, 3.0f}});
auto prog = optimize_onnx("constant_value_floats_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(constant_value_int_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
mm->add_literal(migraphx::literal{migraphx::shape{migraphx::shape::int64_type, {1}}, {1}});
auto prog = optimize_onnx("constant_value_int_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(constant_value_ints_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
mm->add_literal(
migraphx::literal{migraphx::shape{migraphx::shape::int64_type, {3}}, {1, 2, 3}});
auto prog = optimize_onnx("constant_value_ints_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(constant_no_attributes_test)
{
EXPECT(test::throws([&] { optimize_onnx("constant_no_attributes_test.onnx"); }));
}
TEST_CASE(constant_multiple_attributes_test)
{
EXPECT(test::throws([&] { optimize_onnx("constant_multiple_attributes_test.onnx"); }));
}
TEST_CASE(constant_fill_test)
{
migraphx::program p;
......@@ -960,11 +1060,25 @@ TEST_CASE(constant_one_val_int64_test)
EXPECT(p == prog);
}
TEST_CASE(const_of_shape_default_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape output_dims_shape(migraphx::shape::int64_type, {3});
mm->add_literal(migraphx::literal(output_dims_shape, {2, 3, 4}));
migraphx::shape output_shape{migraphx::shape::float_type, {2, 3, 4}};
std::vector<float> vec(output_shape.elements(), 0.0);
mm->add_literal(migraphx::literal(output_shape, vec));
auto prog = optimize_onnx("const_of_shape_default_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(const_of_shape_empty_input_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
mm->add_literal(migraphx::literal(migraphx::shape::int32_type));
mm->add_literal(migraphx::literal(migraphx::shape::int64_type));
migraphx::shape s(migraphx::shape::int64_type, {1}, {0});
std::vector<int64_t> vec(s.elements(), 10);
mm->add_literal(migraphx::literal(s, vec));
......@@ -977,7 +1091,7 @@ TEST_CASE(const_of_shape_float_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape ss(migraphx::shape::int32_type, {3});
migraphx::shape ss(migraphx::shape::int64_type, {3});
mm->add_literal(migraphx::literal(ss, {2, 3, 4}));
migraphx::shape s(migraphx::shape::float_type, {2, 3, 4});
std::vector<float> vec(s.elements(), 10.0f);
......@@ -991,8 +1105,10 @@ TEST_CASE(const_of_shape_int64_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape ss(migraphx::shape::int32_type, {3});
// output_dims
migraphx::shape ss(migraphx::shape::int64_type, {3});
mm->add_literal(migraphx::literal(ss, {2, 3, 4}));
// constant shape literal
migraphx::shape s(migraphx::shape::int64_type, {2, 3, 4});
std::vector<int64_t> vec(s.elements(), 10);
mm->add_literal(migraphx::literal(s, vec));
......@@ -1005,7 +1121,7 @@ TEST_CASE(const_of_shape_no_value_attr_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
migraphx::shape ss(migraphx::shape::int32_type, {3});
migraphx::shape ss(migraphx::shape::int64_type, {3});
mm->add_literal(migraphx::literal(ss, {2, 3, 4}));
migraphx::shape s(migraphx::shape::float_type, {2, 3, 4});
std::vector<float> vec(s.elements(), 0.0f);
......@@ -1015,6 +1131,42 @@ TEST_CASE(const_of_shape_no_value_attr_test)
EXPECT(p == prog);
}
TEST_CASE(const_of_shape_dyn_float_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto od_param =
mm->add_parameter("output_dims", migraphx::shape{migraphx::shape::int64_type, {3}});
auto alloc_ins = mm->add_instruction(
migraphx::make_op("allocate", {{"buf_type", migraphx::shape::float_type}}), od_param);
migraphx::shape dv_shape(migraphx::shape::float_type, {1}, {0});
auto dv_lit = mm->add_literal(migraphx::literal(dv_shape, {10}));
auto fill_ins = mm->add_instruction(migraphx::make_op("fill"), dv_lit, alloc_ins);
mm->add_return({fill_ins});
migraphx::onnx_options options;
auto prog = parse_onnx("const_of_shape_dyn_float_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(const_of_shape_dyn_int64_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto od_param =
mm->add_parameter("output_dims", migraphx::shape{migraphx::shape::int64_type, {3}});
auto alloc_ins = mm->add_instruction(
migraphx::make_op("allocate", {{"buf_type", migraphx::shape::int64_type}}), od_param);
migraphx::shape dv_shape(migraphx::shape::int64_type, {1}, {0});
auto dv_lit = mm->add_literal(migraphx::literal(dv_shape, {10}));
auto fill_ins = mm->add_instruction(migraphx::make_op("fill"), dv_lit, alloc_ins);
mm->add_return({fill_ins});
migraphx::onnx_options options;
auto prog = parse_onnx("const_of_shape_dyn_int64_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(conv_autopad_fail_test)
{
EXPECT(test::throws([&] { optimize_onnx("conv_autopad_fail_test.onnx"); }));
......@@ -1482,7 +1634,7 @@ TEST_CASE(conv_transpose_input_pads_asymm_1d_test)
auto l1 = mm->add_parameter("w", {migraphx::shape::float_type, {1, 2, 3}});
auto l2 = mm->add_instruction(
migraphx::make_op("convolution_backwards",
{{"padding", {0}}, {"stride", {2}}, {"dilation", {1}}}),
{{"padding", {0}}, {"stride", {2}}, {"dilation", {1}}}),
l0,
l1);
mm->add_instruction(migraphx::make_op("slice", {{"axes", {2}}, {"starts", {0}}, {"ends", {6}}}),
......@@ -1516,7 +1668,7 @@ TEST_CASE(conv_transpose_output_padding_3d_test)
auto l1 = mm->add_parameter("w", {migraphx::shape::float_type, {1, 2, 3, 3, 3}});
auto l2 = mm->add_instruction(
migraphx::make_op("convolution_backwards",
{{"padding", {0, 0, 0}}, {"stride", {3, 2, 2}}, {"dilation", {1, 1, 1}}}),
{{"padding", {0, 0, 0}}, {"stride", {3, 2, 2}}, {"dilation", {1, 1, 1}}}),
l0,
l1);
mm->add_instruction(migraphx::make_op("pad", {{"pads", {0, 0, 0, 0, 0, 0, 0, 1, 1, 1}}}), l2);
......@@ -1549,7 +1701,7 @@ TEST_CASE(conv_transpose_output_shape_3d_test)
auto l1 = mm->add_parameter("w", {migraphx::shape::float_type, {1, 2, 3, 3, 3}});
auto l2 = mm->add_instruction(
migraphx::make_op("convolution_backwards",
{{"padding", {0, 0, 0}}, {"stride", {3, 2, 2}}, {"dilation", {1, 1, 1}}}),
{{"padding", {0, 0, 0}}, {"stride", {3, 2, 2}}, {"dilation", {1, 1, 1}}}),
l0,
l1);
mm->add_instruction(migraphx::make_op("pad", {{"pads", {0, 0, 0, 0, 0, 0, 0, 1, 1, 1}}}), l2);
......@@ -1620,8 +1772,7 @@ TEST_CASE(depthtospace_test)
mm->add_instruction(migraphx::make_op("reshape", {{"dims", {2, 2, 2, 2, 5, 5}}}), l0);
auto tmp2 = mm->add_instruction(
migraphx::make_op("transpose", {{"permutation", {0, 3, 4, 1, 5, 2}}}), tmp1);
auto tmp3 = mm->add_instruction(migraphx::make_op("contiguous"), tmp2);
mm->add_instruction(migraphx::make_op("reshape", {{"dims", {2, 2, 10, 10}}}), tmp3);
mm->add_instruction(migraphx::make_op("reshape", {{"dims", {2, 2, 10, 10}}}), tmp2);
auto prog = optimize_onnx("depthtospace_test.onnx");
EXPECT(p == prog);
}
......@@ -1635,8 +1786,7 @@ TEST_CASE(depthtospace_crd_test)
mm->add_instruction(migraphx::make_op("reshape", {{"dims", {2, 2, 2, 2, 5, 5}}}), l0);
auto tmp2 = mm->add_instruction(
migraphx::make_op("transpose", {{"permutation", {0, 1, 4, 2, 5, 3}}}), tmp1);
auto tmp3 = mm->add_instruction(migraphx::make_op("contiguous"), tmp2);
mm->add_instruction(migraphx::make_op("reshape", {{"dims", {2, 2, 10, 10}}}), tmp3);
mm->add_instruction(migraphx::make_op("reshape", {{"dims", {2, 2, 10, 10}}}), tmp2);
auto prog = optimize_onnx("depthtospace_crd_test.onnx");
EXPECT(p == prog);
}
......@@ -1650,8 +1800,7 @@ TEST_CASE(depthtospace_simple_test)
mm->add_instruction(migraphx::make_op("reshape", {{"dims", {1, 2, 2, 2, 2, 3}}}), l0);
auto tmp2 = mm->add_instruction(
migraphx::make_op("transpose", {{"permutation", {0, 3, 4, 1, 5, 2}}}), tmp1);
auto tmp3 = mm->add_instruction(migraphx::make_op("contiguous"), tmp2);
mm->add_instruction(migraphx::make_op("reshape", {{"dims", {1, 2, 4, 6}}}), tmp3);
mm->add_instruction(migraphx::make_op("reshape", {{"dims", {1, 2, 4, 6}}}), tmp2);
auto prog = optimize_onnx("depthtospace_simple_test.onnx");
EXPECT(p == prog);
}
......@@ -1665,8 +1814,7 @@ TEST_CASE(spacetodepth_test)
mm->add_instruction(migraphx::make_op("reshape", {{"dims", {2, 2, 5, 2, 5, 2}}}), l0);
auto tmp2 = mm->add_instruction(
migraphx::make_op("transpose", {{"permutation", {0, 3, 5, 1, 2, 4}}}), tmp1);
auto tmp3 = mm->add_instruction(migraphx::make_op("contiguous"), tmp2);
mm->add_instruction(migraphx::make_op("reshape", {{"dims", {2, 8, 5, 5}}}), tmp3);
mm->add_instruction(migraphx::make_op("reshape", {{"dims", {2, 8, 5, 5}}}), tmp2);
auto prog = optimize_onnx("spacetodepth_test.onnx");
EXPECT(p == prog);
}
......@@ -1680,8 +1828,7 @@ TEST_CASE(spacetodepth_simple_test)
mm->add_instruction(migraphx::make_op("reshape", {{"dims", {1, 2, 2, 2, 3, 2}}}), l0);
auto tmp2 = mm->add_instruction(
migraphx::make_op("transpose", {{"permutation", {0, 3, 5, 1, 2, 4}}}), tmp1);
auto tmp3 = mm->add_instruction(migraphx::make_op("contiguous"), tmp2);
mm->add_instruction(migraphx::make_op("reshape", {{"dims", {1, 8, 2, 3}}}), tmp3);
mm->add_instruction(migraphx::make_op("reshape", {{"dims", {1, 8, 2, 3}}}), tmp2);
auto prog = optimize_onnx("spacetodepth_simple_test.onnx");
EXPECT(p == prog);
}
......@@ -1849,7 +1996,7 @@ TEST_CASE(equal_test)
auto eq = mm->add_instruction(migraphx::make_op("equal"), input1, input2);
auto ret = mm->add_instruction(
migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::bool_type)}}),
{{"target_type", migraphx::to_value(migraphx::shape::bool_type)}}),
eq);
mm->add_return({ret});
......@@ -1869,7 +2016,7 @@ TEST_CASE(equal_bool_test)
auto input2 = mm->add_parameter("x2", sb);
auto cin1 = mm->add_instruction(
migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::bool_type)}}),
{{"target_type", migraphx::to_value(migraphx::shape::bool_type)}}),
input1);
auto ret = mm->add_instruction(migraphx::make_op("equal"), cin1, input2);
mm->add_return({ret});
......@@ -2579,7 +2726,7 @@ TEST_CASE(greater_test)
auto gr = mm->add_instruction(migraphx::make_op("greater"), input1, input2);
auto ret = mm->add_instruction(
migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::bool_type)}}),
{{"target_type", migraphx::to_value(migraphx::shape::bool_type)}}),
gr);
mm->add_return({ret});
......@@ -2598,7 +2745,7 @@ TEST_CASE(greater_bool_test)
auto input2 = mm->add_parameter("x2", sb);
auto cin1 = mm->add_instruction(
migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::bool_type)}}),
{{"target_type", migraphx::to_value(migraphx::shape::bool_type)}}),
input1);
auto ret = mm->add_instruction(migraphx::make_op("greater"), cin1, input2);
mm->add_return({ret});
......@@ -3455,7 +3602,7 @@ TEST_CASE(less_test)
auto le = mm->add_instruction(migraphx::make_op("less"), input1, input2);
auto ret = mm->add_instruction(
migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::bool_type)}}),
{{"target_type", migraphx::to_value(migraphx::shape::bool_type)}}),
le);
mm->add_return({ret});
......@@ -3474,7 +3621,7 @@ TEST_CASE(less_bool_test)
auto input2 = mm->add_parameter("x2", sb);
auto cin1 = mm->add_instruction(
migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::bool_type)}}),
{{"target_type", migraphx::to_value(migraphx::shape::bool_type)}}),
input1);
auto ret = mm->add_instruction(migraphx::make_op("less"), cin1, input2);
mm->add_return({ret});
......@@ -4704,6 +4851,271 @@ TEST_CASE(prelu_brcst_test)
EXPECT(p == prog);
}
TEST_CASE(qlinearadd_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto a = mm->add_parameter("A", {migraphx::shape::uint8_type, {64}});
auto b = mm->add_parameter("B", {migraphx::shape::uint8_type, {64}});
auto sc_a = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.05}});
auto z_pt_a = mm->add_literal(migraphx::literal{migraphx::shape::uint8_type, {0}});
auto sc_b = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.05}});
auto z_pt_b = mm->add_literal(migraphx::literal{migraphx::shape::uint8_type, {128}});
auto sc_c = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.05}});
auto z_pt_c = mm->add_literal(migraphx::literal{migraphx::shape::uint8_type, {64}});
auto scale_a_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {64}}}), sc_a);
auto z_pt_a_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {64}}}), z_pt_a);
auto fp_a =
mm->add_instruction(migraphx::make_op("dequantizelinear"), a, scale_a_bcast, z_pt_a_bcast);
auto scale_b_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {64}}}), sc_b);
auto z_pt_b_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {64}}}), z_pt_b);
auto fp_b =
mm->add_instruction(migraphx::make_op("dequantizelinear"), b, scale_b_bcast, z_pt_b_bcast);
auto fp_c = mm->add_instruction(migraphx::make_op("add"), fp_a, fp_b);
auto scale_c_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {64}}}), sc_c);
auto z_pt_c_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {64}}}), z_pt_c);
auto c =
mm->add_instruction(migraphx::make_op("quantizelinear"), fp_c, scale_c_bcast, z_pt_c_bcast);
mm->add_return({c});
auto prog = migraphx::parse_onnx("qlinearadd_test.onnx");
EXPECT(p.sort() == prog.sort());
}
TEST_CASE(qlinearconv_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_parameter("X", {migraphx::shape::uint8_type, {1, 1, 7, 7}});
auto sc_x = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.00369204697}});
auto z_pt_x = mm->add_literal(migraphx::literal{migraphx::shape::uint8_type, {132}});
auto w = mm->add_literal(
migraphx::literal{migraphx::shape{migraphx::shape::uint8_type, {1, 1, 1, 1}}, {0}});
auto sc_w = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.00172794575}});
auto z_pt_w = mm->add_literal(migraphx::literal{migraphx::shape::uint8_type, {255}});
auto sc_y = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.00162681262}});
auto z_pt_y = mm->add_literal(migraphx::literal{migraphx::shape::uint8_type, {123}});
auto scale_x_bcast = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 7, 7}}}), sc_x);
auto z_pt_x_bcast = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 7, 7}}}), z_pt_x);
auto fp_x =
mm->add_instruction(migraphx::make_op("dequantizelinear"), x, scale_x_bcast, z_pt_x_bcast);
auto scale_w_bcast = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 1, 1}}}), sc_w);
auto z_pt_w_bcast = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 1, 1}}}), z_pt_w);
auto fp_w =
mm->add_instruction(migraphx::make_op("dequantizelinear"), w, scale_w_bcast, z_pt_w_bcast);
auto fp_y = mm->add_instruction(migraphx::make_op("convolution"), fp_x, fp_w);
auto scale_y_bcast = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 7, 7}}}), sc_y);
auto z_pt_y_bcast = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 1, 7, 7}}}), z_pt_y);
auto y =
mm->add_instruction(migraphx::make_op("quantizelinear"), fp_y, scale_y_bcast, z_pt_y_bcast);
mm->add_return({y});
auto prog = migraphx::parse_onnx("qlinearconv_test.onnx");
EXPECT(p.sort() == prog.sort());
}
TEST_CASE(qlinearglobalavgpool_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto x = mm->add_parameter("X", {migraphx::shape::uint8_type, {1, 3, 4, 4}});
auto sc_x = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.05}});
auto z_pt_x = mm->add_literal(migraphx::literal{migraphx::shape::uint8_type, {128}});
auto sc_y = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.025}});
auto z_pt_y = mm->add_literal(migraphx::literal{migraphx::shape::uint8_type, {64}});
auto scale_x_bcast = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 3, 4, 4}}}), sc_x);
auto z_pt_x_bcast = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 3, 4, 4}}}), z_pt_x);
auto fp_x =
mm->add_instruction(migraphx::make_op("dequantizelinear"), x, scale_x_bcast, z_pt_x_bcast);
auto fp_y =
mm->add_instruction(migraphx::make_op("pooling",
{{"mode", migraphx::op::pooling_mode::average},
{"padding", {0, 0, 0, 0}},
{"lengths", {4, 4}}}),
fp_x);
auto scale_y_bcast = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 3, 1, 1}}}), sc_y);
auto z_pt_y_bcast = mm->add_instruction(
migraphx::make_op("multibroadcast", {{"out_lens", {1, 3, 1, 1}}}), z_pt_y);
auto y =
mm->add_instruction(migraphx::make_op("quantizelinear"), fp_y, scale_y_bcast, z_pt_y_bcast);
mm->add_return({y});
auto prog = migraphx::parse_onnx("qlinearglobalavgpool_test.onnx");
EXPECT(p.sort() == prog.sort());
}
TEST_CASE(qlinearmatmul_1D_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto a = mm->add_parameter("A", {migraphx::shape::uint8_type, {8}});
auto b = mm->add_parameter("B", {migraphx::shape::uint8_type, {8}});
auto sc_a = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.05}});
auto z_pt_a = mm->add_literal(migraphx::literal{migraphx::shape::uint8_type, {0}});
auto sc_b = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.05}});
auto z_pt_b = mm->add_literal(migraphx::literal{migraphx::shape::uint8_type, {128}});
auto sc_c = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.05}});
auto z_pt_c = mm->add_literal(migraphx::literal{migraphx::shape::uint8_type, {64}});
auto scale_a_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {8}}}), sc_a);
auto z_pt_a_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {8}}}), z_pt_a);
auto fp_a =
mm->add_instruction(migraphx::make_op("dequantizelinear"), a, scale_a_bcast, z_pt_a_bcast);
auto scale_b_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {8}}}), sc_b);
auto z_pt_b_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {8}}}), z_pt_b);
auto fp_b =
mm->add_instruction(migraphx::make_op("dequantizelinear"), b, scale_b_bcast, z_pt_b_bcast);
auto sq_a = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {0}}}), fp_a);
auto sq_b = mm->add_instruction(migraphx::make_op("unsqueeze", {{"axes", {1}}}), fp_b);
auto fp_c = mm->add_instruction(migraphx::make_op("dot"), sq_a, sq_b);
auto sq_c = mm->add_instruction(migraphx::make_op("squeeze", {{"axes", {0}}}), fp_c);
auto scale_c_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {1}}}), sc_c);
auto z_pt_c_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {1}}}), z_pt_c);
auto c =
mm->add_instruction(migraphx::make_op("quantizelinear"), sq_c, scale_c_bcast, z_pt_c_bcast);
mm->add_return({c});
auto prog = migraphx::parse_onnx("qlinearmatmul_1D_test.onnx");
EXPECT(p.sort() == prog.sort());
}
TEST_CASE(qlinearmatmul_2D_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto a = mm->add_parameter("A", {migraphx::shape::uint8_type, {1, 8}});
auto b = mm->add_parameter("B", {migraphx::shape::uint8_type, {8, 1}});
auto sc_a = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.05}});
auto z_pt_a = mm->add_literal(migraphx::literal{migraphx::shape::uint8_type, {0}});
auto sc_b = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.05}});
auto z_pt_b = mm->add_literal(migraphx::literal{migraphx::shape::uint8_type, {128}});
auto sc_c = mm->add_literal(migraphx::literal{migraphx::shape::float_type, {0.05}});
auto z_pt_c = mm->add_literal(migraphx::literal{migraphx::shape::uint8_type, {64}});
auto scale_a_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {1, 8}}}), sc_a);
auto z_pt_a_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {1, 8}}}), z_pt_a);
auto fp_a =
mm->add_instruction(migraphx::make_op("dequantizelinear"), a, scale_a_bcast, z_pt_a_bcast);
auto scale_b_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {8, 1}}}), sc_b);
auto z_pt_b_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {8, 1}}}), z_pt_b);
auto fp_b =
mm->add_instruction(migraphx::make_op("dequantizelinear"), b, scale_b_bcast, z_pt_b_bcast);
auto fp_c = mm->add_instruction(migraphx::make_op("dot"), fp_a, fp_b);
auto scale_c_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {1, 1}}}), sc_c);
auto z_pt_c_bcast =
mm->add_instruction(migraphx::make_op("multibroadcast", {{"out_lens", {1, 1}}}), z_pt_c);
auto c =
mm->add_instruction(migraphx::make_op("quantizelinear"), fp_c, scale_c_bcast, z_pt_c_bcast);
mm->add_return({c});
auto prog = migraphx::parse_onnx("qlinearmatmul_2D_test.onnx");
EXPECT(p.sort() == prog.sort());
}
TEST_CASE(quantizelinear_test)
{
migraphx::program p;
......@@ -5051,7 +5463,7 @@ TEST_CASE(reducel1_dyn_test)
// a shape with 4 dynamic dimensions
auto l0 = mm->add_parameter("x",
migraphx::shape{migraphx::shape::float_type,
{{3, 3}, {3, 5}, {4, 6, {5}}, {5, 7, {6}}}});
{{3, 3}, {3, 5}, {4, 6, {5}}, {5, 7, {6}}}});
auto abs_ins = mm->add_instruction(migraphx::make_op("abs"), l0);
auto sum_ins =
mm->add_instruction(migraphx::make_op("reduce_sum", {{"axes", {-2}}}), abs_ins);
......@@ -5071,7 +5483,7 @@ TEST_CASE(reducel1_dyn_test)
// No axes given in the onnx file. Parser should default to all axes.
auto l0 = mm->add_parameter("x",
migraphx::shape{migraphx::shape::float_type,
{{3, 3}, {3, 5}, {4, 6, {5}}, {5, 7, {6}}}});
{{3, 3}, {3, 5}, {4, 6, {5}}, {5, 7, {6}}}});
auto abs_ins = mm->add_instruction(migraphx::make_op("abs"), l0);
auto sum_ins =
mm->add_instruction(migraphx::make_op("reduce_sum", {{"axes", {0, 1, 2, 3}}}), abs_ins);
......@@ -5286,12 +5698,9 @@ TEST_CASE(reshape_test)
migraphx::literal{migraphx::shape{migraphx::shape::int64_type, {2}}, reshape_dims});
auto l0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::float_type, {4, 2, 3}});
op.dims = reshape_dims;
auto c0 = mm->add_instruction(migraphx::make_op("contiguous"), l0);
mm->add_instruction(op, c0);
auto c1 = mm->add_instruction(migraphx::make_op("contiguous"), l0);
mm->add_instruction(op, c1);
mm->add_instruction(op, l0);
mm->add_instruction(op, l0);
auto prog = optimize_onnx("reshape_test.onnx");
EXPECT(p == prog);
}
......@@ -5304,13 +5713,44 @@ TEST_CASE(reshape_non_standard_test)
auto x = mm->add_parameter("x", s);
auto tran_x =
mm->add_instruction(migraphx::make_op("transpose", {{"permutation", {0, 2, 1}}}), x);
auto cont_x = mm->add_instruction(migraphx::make_op("contiguous"), tran_x);
mm->add_instruction(migraphx::make_op("reshape", {{"dims", {4, 3, 2}}}), cont_x);
mm->add_instruction(migraphx::make_op("reshape", {{"dims", {4, 3, 2}}}), tran_x);
auto prog = optimize_onnx("reshape_non_standard_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(reshape_variable_input_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto p0 = mm->add_parameter("0", migraphx::shape{migraphx::shape::float_type, {4, 2, 3}});
auto p1 = mm->add_parameter("1", migraphx::shape{migraphx::shape::int64_type, {2}});
auto alloc = mm->add_instruction(
migraphx::make_op("allocate", {{"buf_type", migraphx::shape::float_type}}), p1);
mm->add_instruction(migraphx::make_op("reshape"), p0, alloc);
auto prog = optimize_onnx("reshape_variable_input_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(reshape_variable_input_dyn_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto p0 = mm->add_parameter(
"0", migraphx::shape{migraphx::shape::float_type, {{1, 4}, {2, 2}, {3, 3}}});
auto p1 = mm->add_parameter("1", migraphx::shape{migraphx::shape::int64_type, {2}});
auto alloc = mm->add_instruction(
migraphx::make_op("allocate", {{"buf_type", migraphx::shape::float_type}}), p1);
auto reshape = mm->add_instruction(migraphx::make_op("reshape"), p0, alloc);
mm->add_return({reshape});
migraphx::onnx_options options;
options.default_dyn_dim_value = {1, 4};
auto prog = parse_onnx("reshape_variable_input_dyn_test.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(resize_downsample_c_test)
{
migraphx::program p;
......@@ -5871,7 +6311,13 @@ TEST_CASE(roialign_default_test)
auto rois = mm->add_parameter("rois", srois);
auto bi = mm->add_parameter("batch_ind", sbi);
auto r = mm->add_instruction(migraphx::make_op("roialign"), x, rois, bi);
// Due to the onnx model using opset 12, the coordinate_transformation_mode should be set to
// output_half_pixel
auto r = mm->add_instruction(
migraphx::make_op("roialign", {{"coordinate_transformation_mode", "output_half_pixel"}}),
x,
rois,
bi);
mm->add_return({r});
auto prog = migraphx::parse_onnx("roialign_default_test.onnx");
......@@ -6294,6 +6740,19 @@ TEST_CASE(slice_test)
EXPECT(p == prog);
}
TEST_CASE(slice_constant_test)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto l0 = mm->add_literal(migraphx::literal{
migraphx::shape{migraphx::shape::float_type, {3, 2}}, {0, 1, 2, 3, 4, 5}});
mm->add_instruction(
migraphx::make_op("slice", {{"axes", {0, 1}}, {"starts", {1, 0}}, {"ends", {2, 2}}}), l0);
auto prog = optimize_onnx("slice_constant_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(slice_dyn_test)
{
migraphx::program p;
......@@ -6426,6 +6885,74 @@ TEST_CASE(slice_max_end_test)
EXPECT(p == prog);
}
TEST_CASE(slice_var_input_static0)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto data = mm->add_parameter("data", migraphx::shape{migraphx::shape::float_type, {3, 2}});
auto starts = mm->add_parameter("starts", migraphx::shape{migraphx::shape::int32_type, {2}});
auto ends = mm->add_parameter("ends", migraphx::shape{migraphx::shape::int32_type, {2}});
mm->add_instruction(migraphx::make_op("slice", {{"axes", {0, 1}}}), data, starts, ends);
auto prog = optimize_onnx("slice_var_input_static0.onnx");
EXPECT(p == prog);
}
TEST_CASE(slice_var_input_static1)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto data = mm->add_parameter("data", migraphx::shape{migraphx::shape::float_type, {3, 2}});
auto starts = mm->add_parameter("starts", migraphx::shape{migraphx::shape::int64_type, {2}});
auto ends = mm->add_parameter("ends", migraphx::shape{migraphx::shape::int64_type, {2}});
auto axes = mm->add_parameter("axes", migraphx::shape{migraphx::shape::int64_type, {2}});
mm->add_instruction(migraphx::make_op("slice"), data, starts, ends, axes);
auto prog = optimize_onnx("slice_var_input_static1.onnx");
EXPECT(p == prog);
}
TEST_CASE(slice_var_input_dyn0)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto data =
mm->add_parameter("data", migraphx::shape{migraphx::shape::float_type, {{3, 8}, {2, 2}}});
auto starts = mm->add_parameter("starts", migraphx::shape{migraphx::shape::int32_type, {2}});
auto ends = mm->add_parameter("ends", migraphx::shape{migraphx::shape::int32_type, {2}});
auto ret =
mm->add_instruction(migraphx::make_op("slice", {{"axes", {0, 1}}}), data, starts, ends);
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {3, 8};
auto prog = parse_onnx("slice_var_input_dyn0.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(slice_var_input_dyn1)
{
migraphx::program p;
auto* mm = p.get_main_module();
auto data =
mm->add_parameter("data", migraphx::shape{migraphx::shape::float_type, {{3, 8}, {2, 2}}});
auto starts = mm->add_parameter("starts", migraphx::shape{migraphx::shape::int32_type, {2}});
auto ends = mm->add_parameter("ends", migraphx::shape{migraphx::shape::int32_type, {2}});
auto axes = mm->add_parameter("axes", migraphx::shape{migraphx::shape::int32_type, {2}});
auto ret = mm->add_instruction(migraphx::make_op("slice"), data, starts, ends, axes);
mm->add_return({ret});
migraphx::onnx_options options;
options.default_dyn_dim_value = {3, 8};
auto prog = parse_onnx("slice_var_input_dyn1.onnx", options);
EXPECT(p == prog);
}
TEST_CASE(slice_var_input_steps_error)
{
EXPECT(test::throws([&] { migraphx::parse_onnx("slice_var_input_steps_error.onnx"); }));
}
TEST_CASE(softmax_test)
{
migraphx::program p;
......@@ -6674,7 +7201,7 @@ TEST_CASE(squeeze_unsqueeze_dyn_test)
std::vector<int64_t> unsqueeze_axes{0, 1, 3, 5};
auto l0 = mm->add_parameter("0",
migraphx::shape{migraphx::shape::float_type,
{{1, 1}, {1, 4}, {1, 1}, {1, 1}, {1, 4}, {1, 1}}});
{{1, 1}, {1, 4}, {1, 1}, {1, 1}, {1, 4}, {1, 1}}});
auto c0 = mm->add_instruction(migraphx::make_op("contiguous"), l0);
auto l1 = mm->add_instruction(migraphx::make_op("squeeze", {{"axes", squeeze_axes}}), c0);
auto c1 = mm->add_instruction(migraphx::make_op("contiguous"), l1);
......@@ -6754,7 +7281,7 @@ TEST_CASE(sum_int_test)
auto input2 = mm->add_parameter("2", migraphx::shape{migraphx::shape::uint32_type, {3}});
auto cin0 = mm->add_instruction(
migraphx::make_op("convert",
{{"target_type", migraphx::to_value(migraphx::shape::uint32_type)}}),
{{"target_type", migraphx::to_value(migraphx::shape::uint32_type)}}),
input0);
auto cin1 = mm->add_instruction(
migraphx::make_op("convert",
......
qlinearmatmul_3D_test:
]
A
A_scale
A_zero_point
B
B_scale
B_zero_point
C_scale
C_zero_pointC" QLinearMatMulqlinearmatmul_3D_test*"D;BA_scale**qB A_zero_point*";BB_scale**rB B_zero_point*"O/<BC_scale**vB C_zero_pointZ
A



Z
B



b
C



B
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment