Commit 20b1d690 authored by Paul's avatar Paul
Browse files

Merge branch 'develop' into tests

parents 17aaaa1e ba729cfc
 erf-example:A
xy"Erftest_erfZ
x


b
y


B
expand:
7shape"Constant*$
value**B shape_tensor

x
shapey"ExpandexpandZ
x



b
y




B
import numpy as np
import onnx
from onnx import helper
from onnx import numpy_helper
from onnx import AttributeProto, TensorProto, GraphProto
def onnx_test(op_test):
def run_test():
op_info = op_test()
if len(op_info) > 3:
graph_def = helper.make_graph(op_info[0],
op_test.__name__,
op_info[1],
op_info[2],
initializer=op_info[3])
else:
graph_def = helper.make_graph(op_info[0], op_test.__name__,
op_info[1], op_info[2])
model_def = helper.make_model(graph_def,
producer_name=op_test.__name__)
onnx.save(model_def, '{}.onnx'.format(op_test.__name__))
return run_test
@onnx_test
def acos_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [10])
node = onnx.helper.make_node(
'Acos',
inputs=['x'],
outputs=['y'],
)
return ([node], [x], [y])
@onnx_test
def add_bcast_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 4, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3, 4])
z = helper.make_tensor_value_info('2', TensorProto.FLOAT, [2, 3, 4, 5])
node = onnx.helper.make_node('Add',
inputs=['0', '1'],
broadcast=1,
axis=1,
outputs=['2'])
return ([node], [x, y], [z])
@onnx_test
def add_fp16_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT16, [1])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT16, [1])
z = helper.make_tensor_value_info('2', TensorProto.FLOAT16, [1])
node = onnx.helper.make_node(
'Add',
inputs=['0', '1'],
outputs=['2'],
)
return (
[node],
[x, y],
[z],
# '0' -> 1.5, '1' -> 2.5
[
onnx.helper.make_tensor('0', TensorProto.FLOAT16, [1], [15872]),
onnx.helper.make_tensor('1', TensorProto.FLOAT16, [1], [16640])
])
@onnx_test
def add_scalar_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 4, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [])
z = helper.make_tensor_value_info('2', TensorProto.FLOAT, [2, 3, 4, 5])
node = onnx.helper.make_node('Add', inputs=['0', '1'], outputs=['2'])
return ([node], [x, y], [z],
[helper.make_tensor('1', TensorProto.FLOAT, [], [1])])
@onnx_test
def argmax_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 6])
node = onnx.helper.make_node('ArgMax',
inputs=['x'],
outputs=['y'],
axis=2,
keepdims=0)
return ([node], [x], [y])
@onnx_test
def argmin_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 5])
node = onnx.helper.make_node('ArgMin',
inputs=['x'],
outputs=['y'],
axis=3,
keepdims=0)
return ([node], [x], [y])
@onnx_test
def asin_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [10])
node = onnx.helper.make_node(
'Asin',
inputs=['x'],
outputs=['y'],
)
return ([node], [x], [y])
@onnx_test
def atan_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [10])
node = onnx.helper.make_node(
'Atan',
inputs=['x'],
outputs=['y'],
)
return ([node], [x], [y])
@onnx_test
def cast_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT16, [10])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [10])
node = onnx.helper.make_node('Cast', inputs=['x'], outputs=['y'], to=1)
return ([node], [x], [y])
@onnx_test
def clip_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3])
node = onnx.helper.make_node('Clip',
inputs=['0'],
outputs=['1'],
max=6.0,
min=0.0)
return ([node], [x], [y])
@onnx_test
def concat_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 4, 3])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [7, 4, 3])
z = helper.make_tensor_value_info('2', TensorProto.FLOAT, [9, 4, 3])
node = onnx.helper.make_node(
'Concat',
inputs=['0', '1'],
axis=0,
outputs=['2'],
)
return ([node], [x, y], [z])
@onnx_test
def constant_test():
x = np.array([0, 1, 2])
y = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3])
node = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=['0'],
value=onnx.helper.make_tensor(
name='const_tensor',
data_type=TensorProto.FLOAT,
dims=x.shape,
vals=x.flatten().astype(float),
),
)
return ([node], [], [y])
@onnx_test
def constant_fill_test():
value = helper.make_tensor_value_info('value', TensorProto.FLOAT, [2, 3])
node = onnx.helper.make_node(
'ConstantFill',
inputs=[],
outputs=['value'],
dtype=1,
value=1.0,
shape=[2, 3],
input_as_shape=0,
)
return ([node], [], [value])
@onnx_test
def constant_fill_input_as_shape_test():
np_shape = np.array([2, 3])
shape = helper.make_tensor_value_info('shape', TensorProto.INT32, [2])
value = helper.make_tensor_value_info('value', TensorProto.FLOAT, [2, 3])
ts_shape = helper.make_tensor(name='shape_tensor',
data_type=TensorProto.INT32,
dims=np_shape.shape,
vals=np_shape.flatten().astype(int))
const_shape_node = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=['shape'],
value=ts_shape,
)
node = onnx.helper.make_node(
'ConstantFill',
inputs=['shape'],
outputs=['value'],
dtype=1,
value=1.0,
input_as_shape=1,
)
return ([const_shape_node, node], [], [value])
@onnx_test
def constant_scalar_test():
x = np.array([1])
y = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1])
node = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=['0'],
value=onnx.helper.make_tensor(
name='const_tensor',
data_type=TensorProto.INT32,
dims=x.shape,
vals=x.flatten().astype(int),
),
)
return ([node], [], [y])
@onnx_test
def const_of_shape_empty_input_test():
tensor_val = onnx.helper.make_tensor('value', onnx.TensorProto.INT64, [1],
[10])
shape_val = np.array([2, 3, 4]).astype(np.int64)
empty_val = np.array([]).astype(np.int64)
empty_ts = helper.make_tensor(name='empty_tensor',
data_type=TensorProto.INT32,
dims=empty_val.shape,
vals=empty_val.flatten().astype(int))
shape_const = helper.make_node(
'Constant',
inputs=[],
outputs=['shape'],
value=empty_ts,
)
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3, 4])
node = onnx.helper.make_node(
'ConstantOfShape',
inputs=['shape'],
outputs=['y'],
value=tensor_val,
)
return ([shape_const, node], [], [y])
@onnx_test
def const_of_shape_float_test():
tensor_val = onnx.helper.make_tensor('value', onnx.TensorProto.FLOAT, [1],
[10])
shape_val = np.array([2, 3, 4]).astype(np.int64)
shape_ts = helper.make_tensor(name='shape_tensor',
data_type=TensorProto.INT32,
dims=shape_val.shape,
vals=shape_val.flatten().astype(int))
shape_const = helper.make_node(
'Constant',
inputs=[],
outputs=['shape'],
value=shape_ts,
)
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3, 4])
node = onnx.helper.make_node('ConstantOfShape',
inputs=['shape'],
outputs=['y'],
value=tensor_val)
return ([shape_const, node], [], [y])
@onnx_test
def const_of_shape_int64_test():
tensor_val = onnx.helper.make_tensor('value', onnx.TensorProto.INT64, [1],
[10])
shape_val = np.array([2, 3, 4]).astype(np.int64)
shape_ts = helper.make_tensor(name='shape_tensor',
data_type=TensorProto.INT32,
dims=shape_val.shape,
vals=shape_val.flatten().astype(int))
shape_const = helper.make_node(
'Constant',
inputs=[],
outputs=['shape'],
value=shape_ts,
)
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3, 4])
node = onnx.helper.make_node('ConstantOfShape',
inputs=['shape'],
outputs=['y'],
value=tensor_val)
return ([shape_const, node], [], [y])
@onnx_test
def const_of_shape_no_value_attr_test():
shape_val = np.array([2, 3, 4]).astype(np.int64)
shape_ts = helper.make_tensor(name='shape_tensor',
data_type=TensorProto.INT32,
dims=shape_val.shape,
vals=shape_val.flatten().astype(int))
shape_const = helper.make_node(
'Constant',
inputs=[],
outputs=['shape'],
value=shape_ts,
)
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3, 4])
node = onnx.helper.make_node(
'ConstantOfShape',
inputs=['shape'],
outputs=['y'],
)
return ([shape_const, node], [], [y])
@onnx_test
def conv_autopad_fail_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 32, 32])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 1, 1])
out = helper.make_tensor_value_info('2', TensorProto.FLOAT, [1, 1, 34, 34])
node = onnx.helper.make_node('Conv',
inputs=['0', '1'],
outputs=['2'],
dilations=[1, 1],
strides=[1, 1],
auto_pad='SAME',
pads=[0, 0, 1, 1, 0, 0, 1, 1])
return ([node], [x, y], [out])
@onnx_test
def conv_bias_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 32, 32])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 5, 5])
z = helper.make_tensor_value_info('2', TensorProto.FLOAT, [1])
out = helper.make_tensor_value_info('3', TensorProto.FLOAT, [1, 2, 28, 28])
node = onnx.helper.make_node('Conv',
inputs=['0', '1', '2'],
outputs=['3'],
dilations=[1, 1],
strides=[1, 1])
return ([node], [x, y, z], [out])
@onnx_test
def conv_bn_relu_maxpool_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 32, 32])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 5, 5])
z = helper.make_tensor_value_info('2', TensorProto.FLOAT, [1])
m = helper.make_tensor_value_info('3', TensorProto.FLOAT, [1])
n = helper.make_tensor_value_info('4', TensorProto.FLOAT, [1])
k = helper.make_tensor_value_info('5', TensorProto.FLOAT, [1])
l = helper.make_tensor_value_info('6', TensorProto.FLOAT, [1])
out = helper.make_tensor_value_info('10', TensorProto.FLOAT,
[1, 1, 14, 14])
node0 = onnx.helper.make_node('Conv',
inputs=['0', '1', '2'],
outputs=['7'],
dilations=[1, 1],
strides=[1, 1],
pads=[0, 0, 0, 0])
node1 = onnx.helper.make_node('BatchNormalization',
inputs=['7', '3', '4', '5', '6'],
outputs=['8'],
epsilon=9.99999974737875e-06,
momentum=0.899999976158142)
node2 = onnx.helper.make_node('Relu', inputs=['8'], outputs=['9'])
node3 = onnx.helper.make_node('MaxPool',
inputs=['9'],
outputs=['10'],
pads=[0, 0, 0, 0],
strides=[2, 2],
kernel_shape=[2, 2])
return ([node0, node1, node2, node3], [x, y, z, m, n, k, l], [out])
@onnx_test
def conv_relu_maxpool_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 32, 32])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 5, 5])
z = helper.make_tensor_value_info('2', TensorProto.FLOAT, [1])
out = helper.make_tensor_value_info('5', TensorProto.FLOAT, [1, 1, 14, 14])
node1 = onnx.helper.make_node('Conv',
inputs=['0', '1', '2'],
outputs=['3'],
dilations=[1, 1],
strides=[1, 1],
pads=[0, 0, 0, 0])
node2 = onnx.helper.make_node('Relu', inputs=['3'], outputs=['4'])
node3 = onnx.helper.make_node('MaxPool',
inputs=['4'],
outputs=['5'],
pads=[0, 0, 0, 0],
strides=[2, 2],
kernel_shape=[2, 2])
return ([node1, node2, node3], [x, y, z], [out])
@onnx_test
def conv_relu_maxpool_x2_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 32, 32])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [5, 3, 5, 5])
z = helper.make_tensor_value_info('2', TensorProto.FLOAT, [5])
m = helper.make_tensor_value_info('3', TensorProto.FLOAT, [1, 5, 5, 5])
n = helper.make_tensor_value_info('4', TensorProto.FLOAT, [1])
out = helper.make_tensor_value_info('10', TensorProto.FLOAT, [1, 1, 5, 5])
node1 = onnx.helper.make_node('Conv',
inputs=['0', '1', '2'],
outputs=['5'],
dilations=[1, 1],
strides=[1, 1],
pads=[0, 0, 0, 0])
node2 = onnx.helper.make_node('Relu', inputs=['5'], outputs=['6'])
node3 = onnx.helper.make_node('MaxPool',
inputs=['6'],
outputs=['7'],
pads=[0, 0, 0, 0],
strides=[2, 2],
kernel_shape=[2, 2])
node4 = onnx.helper.make_node('Conv',
inputs=['7', '3', '4'],
outputs=['8'],
dilations=[1, 1],
strides=[1, 1],
pads=[0, 0, 0, 0])
node5 = onnx.helper.make_node('Relu', inputs=['8'], outputs=['9'])
node6 = onnx.helper.make_node('MaxPool',
inputs=['9'],
outputs=['10'],
pads=[0, 0, 0, 0],
strides=[2, 2],
kernel_shape=[2, 2])
return ([node1, node2, node3, node4, node5, node6], [x, y, z, m, n], [out])
@onnx_test
def cos_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [10])
node = onnx.helper.make_node(
'Cos',
inputs=['x'],
outputs=['y'],
)
return ([node], [x], [y])
@onnx_test
def cosh_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1])
node = onnx.helper.make_node(
'Cosh',
inputs=['x'],
outputs=['y'],
)
return ([node], [x], [y])
@onnx_test
def dropout_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 2, 2])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 2, 2])
node = onnx.helper.make_node(
'Dropout',
inputs=['0'],
outputs=['1'],
)
return ([node], [x], [y])
@onnx_test
def elu_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3])
node = onnx.helper.make_node('Elu',
inputs=['0'],
outputs=['1'],
alpha=0.01)
return ([node], [x], [y])
@onnx_test
def erf_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10, 15])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [10, 15])
node = onnx.helper.make_node(
'Erf',
inputs=['x'],
outputs=['y'],
)
return ([node], [x], [y])
@onnx_test
def exp_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [10])
node = onnx.helper.make_node(
'Exp',
inputs=['x'],
outputs=['y'],
)
return ([node], [x], [y])
@onnx_test
def expand_test():
shape_val = np.array([2, 3, 4, 5]).astype(np.int64)
shape_ts = helper.make_tensor(name='shape_tensor',
data_type=TensorProto.INT32,
dims=shape_val.shape,
vals=shape_val.flatten().astype(int))
shape_const = helper.make_node(
'Constant',
inputs=[],
outputs=['shape'],
value=shape_ts,
)
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 1, 1])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3, 4, 5])
node = onnx.helper.make_node('Expand',
inputs=['x', 'shape'],
outputs=['y'])
return ([shape_const, node], [x], [y])
@onnx_test
def flatten_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 4, 5])
y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [6, 20])
y2 = helper.make_tensor_value_info('3', TensorProto.FLOAT, [2, 60])
node = onnx.helper.make_node('Flatten',
inputs=['0'],
axis=2,
outputs=['2'])
node2 = onnx.helper.make_node('Flatten', inputs=['0'], outputs=['3'])
return ([node, node2], [x], [y, y2])
@onnx_test
def gather_test():
x = helper.make_tensor_value_info('data', TensorProto.FLOAT, [3, 4, 5, 6])
i = helper.make_tensor_value_info('indices', TensorProto.INT32,
[2, 3, 4, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [2, 3, 4, 5])
node = onnx.helper.make_node(
'Gather',
inputs=['data', 'indices'],
outputs=['y'],
axis=1,
)
return ([node], [x, i], [y])
@onnx_test
def gemm_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [5, 7])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [11, 5])
z = helper.make_tensor_value_info('2', TensorProto.FLOAT, [])
a = helper.make_tensor_value_info('3', TensorProto.FLOAT, [7, 11])
node = onnx.helper.make_node('Gemm',
inputs=['0', '1', '2'],
outputs=['3'],
alpha=2.0,
beta=2.0,
transA=1,
transB=1)
return ([node], [x, y, z], [a])
@onnx_test
def gemm_ex_test():
m1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 1, 5, 6])
m2 = helper.make_tensor_value_info('2', TensorProto.FLOAT, [1, 1, 5, 7])
m3 = helper.make_tensor_value_info('3', TensorProto.FLOAT, [1, 1, 6, 7])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 6, 7])
node = onnx.helper.make_node('Gemm',
inputs=['1', '2', '3'],
outputs=['y'],
alpha=0.5,
beta=0.8,
transA=1)
return ([node], [m1, m2, m3], [y])
@onnx_test
def gemm_ex_brcst_test():
m1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 1, 5, 6])
m2 = helper.make_tensor_value_info('2', TensorProto.FLOAT, [1, 1, 5, 7])
m3 = helper.make_tensor_value_info('3', TensorProto.FLOAT, [1, 1, 6, 1])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1, 1, 6, 7])
node = onnx.helper.make_node('Gemm',
inputs=['1', '2', '3'],
outputs=['y'],
alpha=0.5,
beta=0.8,
transA=1)
return ([node], [m1, m2, m3], [y])
@onnx_test
def globalavgpool_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 16, 16])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 1, 1])
node = onnx.helper.make_node(
'GlobalAveragePool',
inputs=['0'],
outputs=['1'],
)
return ([node], [x], [y])
@onnx_test
def globalmaxpool_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 16, 16])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 1, 1])
node = onnx.helper.make_node(
'GlobalMaxPool',
inputs=['0'],
outputs=['1'],
)
return ([node], [x], [y])
@onnx_test
def group_conv_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 4, 16, 16])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [4, 1, 3, 3])
z = helper.make_tensor_value_info('2', TensorProto.FLOAT, [1, 4, 14, 14])
node = onnx.helper.make_node(
'Conv',
inputs=['0', '1'],
group=4,
outputs=['2'],
)
return ([node], [x, y], [z])
@onnx_test
def imagescaler_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3, 16, 16])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 16, 16])
node = onnx.helper.make_node('ImageScaler',
inputs=['0'],
outputs=['1'],
bias=[0.01, 0.02, 0.03],
scale=0.5)
return ([node], [x], [y])
@onnx_test
def implicit_add_bcast_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 4, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3, 4, 1])
z = helper.make_tensor_value_info('2', TensorProto.FLOAT, [2, 3, 4, 5])
node = onnx.helper.make_node(
'Add',
inputs=['0', '1'],
outputs=['2'],
)
return ([node], [x, y], [z])
@onnx_test
def implicit_pow_bcast_test():
arg0 = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 4, 5])
arg1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3, 4, 1])
arg_out = helper.make_tensor_value_info('out', TensorProto.FLOAT,
[2, 3, 4, 5])
node = onnx.helper.make_node(
'Pow',
inputs=['0', '1'],
outputs=['out'],
)
return ([node], [arg0, arg1], [arg_out])
@onnx_test
def implicit_sub_bcast_test():
arg0 = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 4, 5])
arg1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [4, 5])
arg_out = helper.make_tensor_value_info('out', TensorProto.FLOAT,
[2, 3, 4, 5])
node = onnx.helper.make_node(
'Sub',
inputs=['0', '1'],
outputs=['out'],
)
return ([node], [arg0, arg1], [arg_out])
@onnx_test
def leaky_relu_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3])
node = onnx.helper.make_node('LeakyRelu',
inputs=['0'],
outputs=['1'],
alpha=0.01)
return ([node], [x], [y])
@onnx_test
def log_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [10])
node = onnx.helper.make_node(
'Log',
inputs=['x'],
outputs=['y'],
)
return ([node], [x], [y])
@onnx_test
def logsoftmax_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 5, 6])
node = onnx.helper.make_node('LogSoftmax',
inputs=['x'],
outputs=['y'],
axis=1)
return ([node], [x], [y])
@onnx_test
def lrn_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 28, 24, 24])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 28, 24, 24])
node = onnx.helper.make_node('LRN',
inputs=['0'],
size=5,
alpha=0.0001,
beta=0.75,
bias=1.0,
outputs=['1'])
return ([node], [x], [y])
@onnx_test
def matmul_bmbm_test():
m1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3, 6, 7])
m2 = helper.make_tensor_value_info('2', TensorProto.FLOAT, [5, 2, 1, 7, 8])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [5, 2, 3, 6, 8])
node = onnx.helper.make_node(
'MatMul',
inputs=['1', '2'],
outputs=['y'],
)
return ([node], [m1, m2], [y])
@onnx_test
def matmul_bmv_test():
m1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3, 6, 7])
m2 = helper.make_tensor_value_info('2', TensorProto.FLOAT, [7])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 6])
node = onnx.helper.make_node(
'MatMul',
inputs=['1', '2'],
outputs=['y'],
)
return ([node], [m1, m2], [y])
@onnx_test
def matmul_mv_test():
m1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [6, 7])
m2 = helper.make_tensor_value_info('2', TensorProto.FLOAT, [7])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [6])
node = onnx.helper.make_node(
'MatMul',
inputs=['1', '2'],
outputs=['y'],
)
return ([node], [m1, m2], [y])
@onnx_test
def matmul_vbm_test():
m1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [7])
m2 = helper.make_tensor_value_info('2', TensorProto.FLOAT, [5, 7, 8])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [5, 8])
node = onnx.helper.make_node(
'MatMul',
inputs=['1', '2'],
outputs=['y'],
)
return ([node], [m1, m2], [y])
@onnx_test
def matmul_vm_test():
m1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [7])
m2 = helper.make_tensor_value_info('2', TensorProto.FLOAT, [7, 8])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [8])
node = onnx.helper.make_node(
'MatMul',
inputs=['1', '2'],
outputs=['y'],
)
return ([node], [m1, m2], [y])
@onnx_test
def matmul_vv_test():
m1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [7])
m2 = helper.make_tensor_value_info('2', TensorProto.FLOAT, [7])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1])
node = onnx.helper.make_node(
'MatMul',
inputs=['1', '2'],
outputs=['y'],
)
return ([node], [m1, m2], [y])
@onnx_test
def max_test():
a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3])
b = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3])
c = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3])
y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3])
node = onnx.helper.make_node(
'Max',
inputs=['0', '1', '2'],
outputs=['3'],
)
return ([node], [a, b, c], [y])
@onnx_test
def min_test():
a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3])
b = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3])
c = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3])
y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3])
node = onnx.helper.make_node(
'Min',
inputs=['0', '1', '2'],
outputs=['3'],
)
return ([node], [a, b, c], [y])
@onnx_test
def no_pad_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 2])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [2, 2])
node = onnx.helper.make_node('Pad',
inputs=['0'],
pads=[0, 0, 0, 0],
outputs=['1'])
return ([node], [x], [y])
@onnx_test
def pad_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 2])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [4, 4])
node = onnx.helper.make_node('Pad',
inputs=['0'],
pads=[1, 1, 1, 1],
outputs=['1'])
return ([node], [x], [y])
@onnx_test
def pow_test():
arg0 = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 4, 5])
arg1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [2, 3, 4, 5])
arg_out = helper.make_tensor_value_info('out', TensorProto.FLOAT,
[2, 3, 4, 5])
node = onnx.helper.make_node(
'Pow',
inputs=['0', '1'],
outputs=['out'],
)
return ([node], [arg0, arg1], [arg_out])
@onnx_test
def reducemean_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4])
axes = [2, 3]
node = onnx.helper.make_node('ReduceMean',
inputs=['x'],
outputs=['y'],
axes=axes,
keepdims=0)
return ([node], [x], [y])
@onnx_test
def reducemean_keepdims_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 1, 6])
axes = [2]
node = onnx.helper.make_node('ReduceMean',
inputs=['x'],
outputs=['y'],
axes=axes,
keepdims=1)
return ([node], [x], [y])
@onnx_test
def reducesum_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 1, 1])
axes = [2]
node = onnx.helper.make_node('ReduceSum',
inputs=['x'],
outputs=['y'],
axes=axes,
keepdims=0)
return ([node], [x], [y])
@onnx_test
def reducesum_multiaxis_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 1, 1])
axes = [2, 3]
node = onnx.helper.make_node('ReduceSum',
inputs=['x'],
outputs=['y'],
axes=axes,
keepdims=0)
return ([node], [x], [y])
@onnx_test
def reducesum_keepdims_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [3, 4, 1, 1])
axes = [2, 3]
node = onnx.helper.make_node('ReduceSum',
inputs=['x'],
outputs=['y'],
axes=axes,
keepdims=1)
return ([node], [x], [y])
@onnx_test
def reshape_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [4, 2, 3])
x_shape = helper.make_tensor_value_info('1', TensorProto.INT64, [2])
x_shape_list = [3, 8]
y = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3, 8])
y2 = helper.make_tensor_value_info('3', TensorProto.FLOAT, [3, 8])
node = onnx.helper.make_node('Reshape', inputs=['0', '1'], outputs=['2'])
node2 = onnx.helper.make_node('Reshape',
inputs=['0'],
shape=x_shape_list,
outputs=['3'])
return ([node, node2], [x, x_shape], [y, y2],
[helper.make_tensor('1', TensorProto.INT64, [2], [3, 8])])
@onnx_test
def reshape_non_standard_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [2, 3, 4])
trans_x = helper.make_tensor_value_info('trans_x', TensorProto.FLOAT,
[2, 4, 3])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [4, 3, 2])
trans = helper.make_node(
'Transpose',
inputs=['x'],
outputs=['trans_x'],
perm=[0, 2, 1],
)
res = onnx.helper.make_node('Reshape',
inputs=['trans_x'],
outputs=['y'],
shape=[4, 3, 2])
return ([trans, res], [x], [y])
@onnx_test
def shape_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [3, 4, 5, 6])
y = helper.make_tensor_value_info('y', TensorProto.INT64, [4])
node = onnx.helper.make_node(
'Shape',
inputs=['x'],
outputs=['y'],
)
return ([node], [x], [y])
@onnx_test
def shape_gather_test():
values = np.array([1])
value = helper.make_tensor_value_info('value', TensorProto.INT32, [1])
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [7, 3, 10])
y = helper.make_tensor_value_info('y', TensorProto.INT64, [3])
z = helper.make_tensor_value_info('z', TensorProto.FLOAT, [1])
value_tensor = helper.make_tensor(name='const_tensor',
data_type=TensorProto.INT32,
dims=values.shape,
vals=values.flatten().astype(int))
node_const = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=['value'],
value=value_tensor,
)
node_shape = onnx.helper.make_node(
'Shape',
inputs=['x'],
outputs=['y'],
)
node_gather = helper.make_node(
'Gather',
inputs=['y', 'value'],
outputs=['z'],
axis=0,
)
return ([node_const, node_shape, node_gather], [x], [z])
@onnx_test
def sign_test():
x = helper.make_tensor_value_info('x', TensorProto.DOUBLE, [10, 5])
y = helper.make_tensor_value_info('y', TensorProto.DOUBLE, [10, 5])
node = onnx.helper.make_node(
'Sign',
inputs=['x'],
outputs=['y'],
)
return ([node], [x], [y])
@onnx_test
def sin_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [10])
node = onnx.helper.make_node(
'Sin',
inputs=['x'],
outputs=['y'],
)
return ([node], [x], [y])
@onnx_test
def sinh_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [10])
node = onnx.helper.make_node(
'Sinh',
inputs=['x'],
outputs=['y'],
)
return ([node], [x], [y])
@onnx_test
def slice_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3, 2])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 2])
node = onnx.helper.make_node('Slice',
inputs=['0'],
axes=[0, 1],
starts=[1, 0],
ends=[2, 2],
outputs=['1'])
return ([node], [x], [y])
@onnx_test
def softmax_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 3])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3])
node = onnx.helper.make_node('Softmax', inputs=['0'], outputs=['1'])
return ([node], [x], [y])
@onnx_test
def sqrt_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10, 15])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [10, 15])
node = onnx.helper.make_node(
'Sqrt',
inputs=['x'],
outputs=['y'],
)
return ([node], [x], [y])
@onnx_test
def squeeze_unsqueeze_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT,
[1, 3, 1, 1, 2, 1])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3, 2])
z = helper.make_tensor_value_info('2', TensorProto.FLOAT,
[1, 1, 3, 1, 2, 1])
node = onnx.helper.make_node('Squeeze',
inputs=['0'],
axes=[0, 2, 3, 5],
outputs=['1'])
node2 = onnx.helper.make_node('Unsqueeze',
inputs=['1'],
axes=[0, 1, 3, 5],
outputs=['2'])
return ([node, node2], [x], [z])
@onnx_test
def sub_bcast_test():
arg0 = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 4, 5])
arg1 = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3, 4])
arg_out = helper.make_tensor_value_info('out', TensorProto.FLOAT,
[2, 3, 4, 5])
node = onnx.helper.make_node(
'Sub',
inputs=['0', '1'],
outputs=['out'],
broadcast=1,
axis=1,
)
return ([node], [arg0, arg1], [arg_out])
@onnx_test
def sub_scalar_test():
values = np.array([1])
arg_node = helper.make_tensor_value_info('0', TensorProto.FLOAT,
[2, 3, 4, 5])
arg_out = helper.make_tensor_value_info('out', TensorProto.FLOAT,
[2, 3, 4, 5])
values_tensor = helper.make_tensor(name='const',
data_type=TensorProto.FLOAT,
dims=values.shape,
vals=values.flatten().astype(float))
arg_const = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=['arg_const'],
value=values_tensor,
)
node = onnx.helper.make_node(
'Sub',
inputs=['0', 'arg_const'],
outputs=['out'],
)
return ([arg_const, node], [arg_node], [arg_out])
@onnx_test
def sum_test():
a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3])
b = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3])
c = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3])
y = helper.make_tensor_value_info('3', TensorProto.FLOAT, [3])
node = onnx.helper.make_node(
'Sum',
inputs=['0', '1', '2'],
outputs=['3'],
)
return ([node], [a, b, c], [y])
@onnx_test
def sum_test():
a = helper.make_tensor_value_info('0', TensorProto.FLOAT, [3])
b = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3])
c = helper.make_tensor_value_info('2', TensorProto.FLOAT, [3])
y = helper.make_tensor_value_info('3', TensorProto.FLOAT, [3])
node = onnx.helper.make_node(
'Sum',
inputs=['0', '1', '2'],
outputs=['3'],
)
return ([node], [a, b, c], [y])
@onnx_test
def tan_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [10])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [10])
node = onnx.helper.make_node(
'Tan',
inputs=['x'],
outputs=['y'],
)
return ([node], [x], [y])
@onnx_test
def tanh_test():
x = helper.make_tensor_value_info('x', TensorProto.FLOAT, [1])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT, [1])
node = onnx.helper.make_node(
'Tanh',
inputs=['x'],
outputs=['y'],
)
return ([node], [x], [y])
@onnx_test
def transpose_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [1, 2, 2, 3])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [1, 3, 2, 2])
node = onnx.helper.make_node(
'Transpose',
perm=[0, 3, 1, 2],
inputs=['0'],
outputs=['1'],
)
return ([node], [x], [y])
@onnx_test
def transpose_gather_test():
x = helper.make_tensor_value_info('data', TensorProto.FLOAT, [3, 5, 4, 6])
i = helper.make_tensor_value_info('indices', TensorProto.INT32,
[2, 4, 3, 5])
y = helper.make_tensor_value_info('y', TensorProto.FLOAT,
[3, 2, 3, 4, 5, 4, 5, 6])
td = onnx.helper.make_node(
'Transpose',
inputs=['data'],
outputs=['tdata'],
perm=[0, 2, 1, 3],
)
ti = onnx.helper.make_node('Transpose',
inputs=['indices'],
outputs=['tindices'],
perm=[0, 2, 1, 3])
node = onnx.helper.make_node(
'Gather',
inputs=['tdata', 'tindices'],
outputs=['y'],
axis=1,
)
return ([td, ti, node], [x, i], [y])
@onnx_test
def unknown_test():
x = helper.make_tensor_value_info('0', TensorProto.FLOAT, [2, 3, 4, 5])
y = helper.make_tensor_value_info('1', TensorProto.FLOAT, [3, 4])
z = helper.make_tensor_value_info('2', TensorProto.FLOAT, [2, 3, 4, 5])
a = helper.make_tensor_value_info('3', TensorProto.FLOAT, [2, 3, 4, 5])
node = onnx.helper.make_node('Unknown', inputs=['0', '1'], outputs=['2'])
node2 = onnx.helper.make_node('Unknown', inputs=['2'], outputs=['3'])
return ([node, node2], [x, y], [a])
implicit_bcast-example:q implicit_bcast-example:u
 
0 0
12"Addtest-multi_bcastZ 12"Addtest-multi_bcastZ
...@@ -7,14 +7,15 @@ ...@@ -7,14 +7,15 @@
 
 
 
Z Z
1 1
 
 
b 
b
2 2
 
 
 
 
B B
\ No newline at end of file
pow2:q

0
1out"Powpow_testZ
0




Z
1



b
out




B
 subtraction2:q add2:q
 
0 0
1out"Sub subtraction2Z 1out"Sub subtraction2Z
...@@ -10,11 +10,11 @@ ...@@ -10,11 +10,11 @@
Z Z
1 1
 
 
b b
out out
 
 
 
 
B B
\ No newline at end of file
leaky_relu-example:R leaky_relu-example:R
" "
01" LeakyRelu* 01" LeakyRelu*
alpha alpha
...@@ -11,4 +11,4 @@ test-modelZ ...@@ -11,4 +11,4 @@ test-modelZ
1 1
 
B B
\ No newline at end of file
 cosh-example:;
xy"Cosh test_coshZ
x

b
y

B
\ No newline at end of file
...@@ -4,344 +4,519 @@ ...@@ -4,344 +4,519 @@
#include <migraphx/operators.hpp> #include <migraphx/operators.hpp>
#include <migraphx/program.hpp> #include <migraphx/program.hpp>
#include <migraphx/instruction.hpp> #include <migraphx/instruction.hpp>
#include <migraphx/instruction_ref.hpp>
#include <migraphx/onnx.hpp> #include <migraphx/onnx.hpp>
#include "test.hpp" #include "test.hpp"
TEST_CASE(pytorch_conv_bias_test) TEST_CASE(acos_test)
{ {
migraphx::program p; migraphx::program p;
auto l0 = p.add_parameter("0", {migraphx::shape::float_type, {1, 3, 32, 32}}); auto input = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {10}});
auto l1 = p.add_parameter("1", {migraphx::shape::float_type, {1, 3, 5, 5}}); p.add_instruction(migraphx::op::acos{}, input);
auto l2 = p.add_parameter("2", {migraphx::shape::float_type, {1}});
uint64_t axis = 1; auto prog = migraphx::parse_onnx("acos_test.onnx");
auto l3 = p.add_instruction(migraphx::op::convolution{}, l0, l1);
auto l4 = p.add_instruction(migraphx::op::broadcast{axis, l3->get_shape().lens()}, l2);
p.add_instruction(migraphx::op::add{}, l3, l4);
auto prog = migraphx::parse_onnx("conv.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(pytorch_conv_relu_maxpool) TEST_CASE(add_bcast_test)
{ {
migraphx::program p; migraphx::program p;
auto l0 = p.add_parameter("0", {migraphx::shape::float_type, {1, 3, 32, 32}}); auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 5}});
auto l1 = p.add_parameter("1", {migraphx::shape::float_type, {1, 3, 5, 5}}); auto l1 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {3, 4}});
auto l2 = p.add_parameter("2", {migraphx::shape::float_type, {1}}); auto l2 = p.add_instruction(migraphx::op::broadcast{1, l0->get_shape().lens()}, l1);
uint64_t axis = 1; p.add_instruction(migraphx::op::add{}, l0, l2);
auto l3 = p.add_instruction(migraphx::op::convolution{}, l0, l1);
auto l4 = p.add_instruction(migraphx::op::broadcast{axis, l3->get_shape().lens()}, l2); auto prog = migraphx::parse_onnx("add_bcast_test.onnx");
auto l5 = p.add_instruction(migraphx::op::add{}, l3, l4);
auto l6 = p.add_instruction(migraphx::op::relu{}, l5);
p.add_instruction(migraphx::op::pooling{"max", {{0, 0}}, {{2, 2}}, {{2, 2}}}, l6);
auto prog = migraphx::parse_onnx("conv_relu_maxpool.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(pytorch_conv_bn_relu_maxpool) TEST_CASE(add_fp16_test)
{ {
migraphx::program p; migraphx::program p;
auto l0 = p.add_parameter("0", {migraphx::shape::float_type, {1, 3, 32, 32}}); auto l0 =
auto l1 = p.add_parameter("1", {migraphx::shape::float_type, {1, 3, 5, 5}}); p.add_literal(migraphx::literal{migraphx::shape{migraphx::shape::half_type, {1}}, {1.5}});
auto l2 = p.add_parameter("2", {migraphx::shape::float_type, {1}}); auto l1 =
p.add_literal(migraphx::literal{migraphx::shape{migraphx::shape::half_type, {1}}, {2.5}});
auto p3 = p.add_parameter("3", {migraphx::shape::float_type, {1}}); p.add_instruction(migraphx::op::add{}, l0, l1);
auto p4 = p.add_parameter("4", {migraphx::shape::float_type, {1}}); auto prog = migraphx::parse_onnx("add_fp16_test.onnx");
auto p5 = p.add_parameter("5", {migraphx::shape::float_type, {1}});
auto p6 = p.add_parameter("6", {migraphx::shape::float_type, {1}});
uint64_t axis = 1;
auto l3 = p.add_instruction(migraphx::op::convolution{}, l0, l1);
auto l4 = p.add_instruction(migraphx::op::broadcast{axis, l3->get_shape().lens()}, l2);
auto l5 = p.add_instruction(migraphx::op::add{}, l3, l4);
auto l6 = p.add_instruction(migraphx::op::batch_norm_inference{1.0e-5f}, l5, p3, p4, p5, p6);
auto l7 = p.add_instruction(migraphx::op::relu{}, l6);
p.add_instruction(migraphx::op::pooling{"max", {{0, 0}}, {{2, 2}}, {{2, 2}}}, l7);
auto prog = migraphx::parse_onnx("conv_bn_relu_maxpool.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(pytorch_conv_relu_maxpool_x2) TEST_CASE(add_scalar_test)
{ {
migraphx::program p; migraphx::program p;
auto l0 = p.add_parameter("0", {migraphx::shape::float_type, {1, 3, 32, 32}}); auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 5}});
auto l1 = p.add_parameter("1", {migraphx::shape::float_type, {5, 3, 5, 5}}); auto l1 = p.add_literal(migraphx::literal{migraphx::shape{migraphx::shape::float_type}, {1}});
auto l2 = p.add_parameter("2", {migraphx::shape::float_type, {5}}); auto m0 = p.add_instruction(migraphx::op::multibroadcast{{2, 3, 4, 5}}, l0);
uint64_t axis = 1; auto m1 = p.add_instruction(migraphx::op::multibroadcast{{2, 3, 4, 5}}, l1);
auto l3 = p.add_instruction(migraphx::op::convolution{}, l0, l1); p.add_instruction(migraphx::op::add{}, m0, m1);
auto l4 = p.add_instruction(migraphx::op::broadcast{axis, l3->get_shape().lens()}, l2); auto prog = migraphx::parse_onnx("add_scalar_test.onnx");
auto l5 = p.add_instruction(migraphx::op::add{}, l3, l4);
auto l6 = p.add_instruction(migraphx::op::relu{}, l5);
auto l7 = p.add_instruction(migraphx::op::pooling{"max", {{0, 0}}, {{2, 2}}, {{2, 2}}}, l6);
auto l8 = p.add_parameter("3", {migraphx::shape::float_type, {1, 5, 5, 5}});
auto l9 = p.add_parameter("4", {migraphx::shape::float_type, {1}});
auto l10 = p.add_instruction(migraphx::op::convolution{}, l7, l8);
auto l11 = p.add_instruction(migraphx::op::broadcast{axis, l10->get_shape().lens()}, l9);
auto l12 = p.add_instruction(migraphx::op::add{}, l10, l11);
auto l13 = p.add_instruction(migraphx::op::relu{}, l12);
p.add_instruction(migraphx::op::pooling{"max", {{0, 0}}, {{2, 2}}, {{2, 2}}}, l13);
auto prog = migraphx::parse_onnx("conv_relu_maxpoolX2.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(leaky_relu_test) TEST_CASE(argmax_test)
{ {
migraphx::program p; migraphx::program p;
float alpha = 0.01f; auto l0 = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {3, 4, 5, 6}});
auto l0 = p.add_parameter("0", {migraphx::shape::float_type, {3}}); auto ins = p.add_instruction(migraphx::op::argmax{2}, l0);
p.add_instruction(migraphx::op::leaky_relu{alpha}, l0); p.add_instruction(migraphx::op::squeeze{{2}}, ins);
auto prog = migraphx::parse_onnx("argmax_test.onnx");
auto prog = migraphx::parse_onnx("leaky_relu.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(imagescaler_test) TEST_CASE(argmin_test)
{ {
migraphx::program p; migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {1, 3, 16, 16}}; auto l0 = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {3, 4, 5, 6}});
auto l0 = p.add_parameter("0", s); auto ins = p.add_instruction(migraphx::op::argmin{3}, l0);
auto scale_val = p.add_literal(0.5f); p.add_instruction(migraphx::op::squeeze{{3}}, ins);
auto bias_vals = p.add_literal( auto prog = migraphx::parse_onnx("argmin_test.onnx");
migraphx::literal{migraphx::shape{migraphx::shape::float_type, {3}}, {0.01, 0.02, 0.03}});
auto scaled_tensor = p.add_instruction(migraphx::op::scalar{s.lens()}, scale_val);
auto img_scaled = p.add_instruction(migraphx::op::mul{}, l0, scaled_tensor);
auto bias_bcast = p.add_instruction(migraphx::op::broadcast{1, s.lens()}, bias_vals);
p.add_instruction(migraphx::op::add{}, img_scaled, bias_bcast);
auto prog = migraphx::parse_onnx("imagescaler_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(globalavgpool_test) TEST_CASE(asin_test)
{ {
migraphx::program p; migraphx::program p;
auto input = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 3, 16, 16}}); auto input = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {10}});
auto op = migraphx::op::pooling{"average"}; p.add_instruction(migraphx::op::asin{}, input);
auto lens = input->get_shape().lens();
op.lengths = {lens[2], lens[3]};
p.add_instruction(op, input);
auto prog = migraphx::parse_onnx("globalavgpool_test.onnx"); auto prog = migraphx::parse_onnx("asin_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(globalmaxpool_test) TEST_CASE(atan_test)
{ {
migraphx::program p; migraphx::program p;
auto input = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 3, 16, 16}}); auto input = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {10}});
auto op = migraphx::op::pooling{"max"}; p.add_instruction(migraphx::op::atan{}, input);
auto lens = input->get_shape().lens();
op.lengths = {lens[2], lens[3]};
p.add_instruction(op, input);
auto prog = migraphx::parse_onnx("globalmaxpool_test.onnx"); auto prog = migraphx::parse_onnx("atan_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(transpose_test) TEST_CASE(cast_test)
{ {
migraphx::program p; migraphx::program p;
auto input = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 2, 2, 3}}); auto l = p.add_parameter("x", migraphx::shape{migraphx::shape::half_type, {10}});
std::vector<int64_t> perm{0, 3, 1, 2}; p.add_instruction(migraphx::op::convert{migraphx::shape::float_type}, l);
p.add_instruction(migraphx::op::transpose{perm}, input);
auto prog = migraphx::parse_onnx("transpose_test.onnx");
auto prog = migraphx::parse_onnx("cast_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(dropout_test) TEST_CASE(clip_test)
{ {
migraphx::program p; migraphx::program p;
auto input = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 3, 2, 2}}); auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {3}});
p.add_instruction(migraphx::op::identity{}, input); p.add_instruction(migraphx::op::clip{6.0, 0.0}, l0);
auto prog = migraphx::parse_onnx("clip_test.onnx");
auto prog = migraphx::parse_onnx("dropout_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(sum_test) TEST_CASE(concat_test)
{ {
migraphx::program p; migraphx::program p;
auto input0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {3}}); auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {2, 4, 3}});
auto input1 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {3}}); auto l1 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {7, 4, 3}});
auto input2 = p.add_parameter("2", migraphx::shape{migraphx::shape::float_type, {3}}); p.add_instruction(migraphx::op::concat{0}, l0, l1);
auto l0 = p.add_instruction(migraphx::op::add{}, input0, input1); auto prog = migraphx::parse_onnx("concat_test.onnx");
p.add_instruction(migraphx::op::add{}, l0, input2);
auto prog = migraphx::parse_onnx("sum_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(exp_test) TEST_CASE(constant_test)
{ {
migraphx::program p; migraphx::program p;
auto input = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {10}}); p.add_literal(migraphx::literal{migraphx::shape{migraphx::shape::float_type, {3}}, {0, 1, 2}});
p.add_instruction(migraphx::op::exp{}, input); auto prog = migraphx::parse_onnx("constant_test.onnx");
auto prog = migraphx::parse_onnx("exp_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(log_test) TEST_CASE(constant_fill_test)
{ {
migraphx::program p; migraphx::program p;
auto input = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {10}}); migraphx::shape s{migraphx::shape::float_type, {2, 3}};
p.add_instruction(migraphx::op::log{}, input); std::vector<float> value(s.elements(), 1.0);
p.add_literal(migraphx::literal{s, value});
auto prog = migraphx::parse_onnx("constant_fill_test.onnx");
auto prog = migraphx::parse_onnx("log_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(sin_test) TEST_CASE(constant_fill_input_as_shape_test)
{ {
migraphx::program p; migraphx::program p;
auto input = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {10}}); auto l0 = p.add_literal(migraphx::literal{{migraphx::shape::int32_type, {2}}, {2, 3}});
p.add_instruction(migraphx::op::sin{}, input); std::vector<std::size_t> dims(l0->get_shape().elements());
migraphx::literal ls = l0->get_literal();
ls.visit([&](auto s) { dims.assign(s.begin(), s.end()); });
migraphx::shape s{migraphx::shape::float_type, dims};
std::vector<float> value(s.elements(), 1.0);
p.add_literal(migraphx::literal{s, value});
auto prog = migraphx::parse_onnx("constant_fill_input_as_shape_test.onnx");
auto prog = migraphx::parse_onnx("sin_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(cos_test) TEST_CASE(constant_scalar_test)
{ {
migraphx::program p; migraphx::program p;
auto input = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {10}}); p.add_literal(migraphx::literal{migraphx::shape{migraphx::shape::int32_type, {1}}, {1}});
p.add_instruction(migraphx::op::cos{}, input); auto prog = migraphx::parse_onnx("constant_scalar_test.onnx");
auto prog = migraphx::parse_onnx("cos_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(tan_test) TEST_CASE(const_of_shape_empty_input_test)
{ {
migraphx::program p; migraphx::program p;
auto input = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {10}}); p.add_literal(migraphx::literal());
p.add_instruction(migraphx::op::tan{}, input); migraphx::shape s(migraphx::shape::int64_type, {1}, {0});
std::vector<int64_t> vec(s.elements(), 10);
p.add_literal(migraphx::literal(s, vec));
auto prog = migraphx::parse_onnx("tan_test.onnx"); auto prog = migraphx::parse_onnx("const_of_shape_empty_input_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(sinh_test) TEST_CASE(const_of_shape_float_test)
{ {
migraphx::program p; migraphx::program p;
auto input = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {10}}); migraphx::shape ss(migraphx::shape::int32_type, {3});
p.add_instruction(migraphx::op::sinh{}, input); p.add_literal(migraphx::literal(ss, {2, 3, 4}));
migraphx::shape s(migraphx::shape::float_type, {2, 3, 4});
auto prog = migraphx::parse_onnx("sinh_test.onnx"); std::vector<float> vec(s.elements(), 10.0f);
p.add_literal(migraphx::literal(s, vec));
auto prog = migraphx::parse_onnx("const_of_shape_float_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(cosh_test) TEST_CASE(const_of_shape_int64_test)
{ {
migraphx::program p; migraphx::program p;
auto input = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1}}); migraphx::shape ss(migraphx::shape::int32_type, {3});
p.add_instruction(migraphx::op::cosh{}, input); p.add_literal(migraphx::literal(ss, {2, 3, 4}));
migraphx::shape s(migraphx::shape::int64_type, {2, 3, 4});
auto prog = migraphx::parse_onnx("cosh_test.onnx"); std::vector<int64_t> vec(s.elements(), 10);
p.add_literal(migraphx::literal(s, vec));
auto prog = migraphx::parse_onnx("const_of_shape_int64_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(tanh_test) TEST_CASE(const_of_shape_no_value_attr_test)
{ {
migraphx::program p; migraphx::program p;
auto input = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1}}); migraphx::shape ss(migraphx::shape::int32_type, {3});
p.add_instruction(migraphx::op::tanh{}, input); p.add_literal(migraphx::literal(ss, {2, 3, 4}));
migraphx::shape s(migraphx::shape::float_type, {2, 3, 4});
auto prog = migraphx::parse_onnx("tanh_test.onnx"); std::vector<float> vec(s.elements(), 0.0f);
p.add_literal(migraphx::literal(s, vec));
auto prog = migraphx::parse_onnx("const_of_shape_no_value_attr_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(elu_test) TEST_CASE(conv_autopad_fail_test)
{ {
migraphx::program p; EXPECT(test::throws([&] { migraphx::parse_onnx("conv_autopad_fail_test.onnx"); }));
auto input = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {3}}); }
p.add_instruction(migraphx::op::elu{0.01}, input);
auto prog = migraphx::parse_onnx("elu_test.onnx"); TEST_CASE(conv_bias_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", {migraphx::shape::float_type, {1, 3, 32, 32}});
auto l1 = p.add_parameter("1", {migraphx::shape::float_type, {1, 3, 5, 5}});
auto l2 = p.add_parameter("2", {migraphx::shape::float_type, {1}});
uint64_t axis = 1;
auto l3 = p.add_instruction(migraphx::op::convolution{}, l0, l1);
auto l4 = p.add_instruction(migraphx::op::broadcast{axis, l3->get_shape().lens()}, l2);
p.add_instruction(migraphx::op::add{}, l3, l4);
auto prog = migraphx::parse_onnx("conv_bias_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(asin_test) TEST_CASE(conv_bn_relu_maxpool_test)
{ {
migraphx::program p; migraphx::program p;
auto input = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {10}}); auto l0 = p.add_parameter("0", {migraphx::shape::float_type, {1, 3, 32, 32}});
p.add_instruction(migraphx::op::asin{}, input); auto l1 = p.add_parameter("1", {migraphx::shape::float_type, {1, 3, 5, 5}});
auto l2 = p.add_parameter("2", {migraphx::shape::float_type, {1}});
auto prog = migraphx::parse_onnx("asin_test.onnx"); auto p3 = p.add_parameter("3", {migraphx::shape::float_type, {1}});
auto p4 = p.add_parameter("4", {migraphx::shape::float_type, {1}});
auto p5 = p.add_parameter("5", {migraphx::shape::float_type, {1}});
auto p6 = p.add_parameter("6", {migraphx::shape::float_type, {1}});
uint64_t axis = 1;
auto l3 = p.add_instruction(migraphx::op::convolution{}, l0, l1);
auto l4 = p.add_instruction(migraphx::op::broadcast{axis, l3->get_shape().lens()}, l2);
auto l5 = p.add_instruction(migraphx::op::add{}, l3, l4);
auto l6 = p.add_instruction(migraphx::op::batch_norm_inference{1.0e-5f}, l5, p3, p4, p5, p6);
auto l7 = p.add_instruction(migraphx::op::relu{}, l6);
p.add_instruction(migraphx::op::pooling{"max", {{0, 0}}, {{2, 2}}, {{2, 2}}}, l7);
auto prog = migraphx::parse_onnx("conv_bn_relu_maxpool_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(max_test) TEST_CASE(conv_relu_maxpool_test)
{ {
migraphx::program p; migraphx::program p;
auto input0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {3}}); auto l0 = p.add_parameter("0", {migraphx::shape::float_type, {1, 3, 32, 32}});
auto input1 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {3}}); auto l1 = p.add_parameter("1", {migraphx::shape::float_type, {1, 3, 5, 5}});
auto input2 = p.add_parameter("2", migraphx::shape{migraphx::shape::float_type, {3}}); auto l2 = p.add_parameter("2", {migraphx::shape::float_type, {1}});
auto l0 = p.add_instruction(migraphx::op::max{}, input0, input1); uint64_t axis = 1;
p.add_instruction(migraphx::op::max{}, l0, input2); auto l3 = p.add_instruction(migraphx::op::convolution{}, l0, l1);
auto l4 = p.add_instruction(migraphx::op::broadcast{axis, l3->get_shape().lens()}, l2);
auto l5 = p.add_instruction(migraphx::op::add{}, l3, l4);
auto l6 = p.add_instruction(migraphx::op::relu{}, l5);
p.add_instruction(migraphx::op::pooling{"max", {{0, 0}}, {{2, 2}}, {{2, 2}}}, l6);
migraphx::parse_onnx("max_test.onnx"); auto prog = migraphx::parse_onnx("conv_relu_maxpool_test.onnx");
EXPECT(p == prog);
} }
TEST_CASE(acos_test) TEST_CASE(conv_relu_maxpool_x2_test)
{ {
migraphx::program p; migraphx::program p;
auto input = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {10}}); auto l0 = p.add_parameter("0", {migraphx::shape::float_type, {1, 3, 32, 32}});
p.add_instruction(migraphx::op::acos{}, input); auto l1 = p.add_parameter("1", {migraphx::shape::float_type, {5, 3, 5, 5}});
auto l2 = p.add_parameter("2", {migraphx::shape::float_type, {5}});
uint64_t axis = 1;
auto l3 = p.add_instruction(migraphx::op::convolution{}, l0, l1);
auto l4 = p.add_instruction(migraphx::op::broadcast{axis, l3->get_shape().lens()}, l2);
auto l5 = p.add_instruction(migraphx::op::add{}, l3, l4);
auto l6 = p.add_instruction(migraphx::op::relu{}, l5);
auto l7 = p.add_instruction(migraphx::op::pooling{"max", {{0, 0}}, {{2, 2}}, {{2, 2}}}, l6);
auto prog = migraphx::parse_onnx("acos_test.onnx"); auto l8 = p.add_parameter("3", {migraphx::shape::float_type, {1, 5, 5, 5}});
auto l9 = p.add_parameter("4", {migraphx::shape::float_type, {1}});
auto l10 = p.add_instruction(migraphx::op::convolution{}, l7, l8);
auto l11 = p.add_instruction(migraphx::op::broadcast{axis, l10->get_shape().lens()}, l9);
auto l12 = p.add_instruction(migraphx::op::add{}, l10, l11);
auto l13 = p.add_instruction(migraphx::op::relu{}, l12);
p.add_instruction(migraphx::op::pooling{"max", {{0, 0}}, {{2, 2}}, {{2, 2}}}, l13);
auto prog = migraphx::parse_onnx("conv_relu_maxpool_x2_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(min_test) TEST_CASE(cos_test)
{ {
migraphx::program p; migraphx::program p;
auto input0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {3}}); auto input = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {10}});
auto input1 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {3}}); p.add_instruction(migraphx::op::cos{}, input);
auto input2 = p.add_parameter("2", migraphx::shape{migraphx::shape::float_type, {3}});
auto l0 = p.add_instruction(migraphx::op::min{}, input0, input1);
p.add_instruction(migraphx::op::min{}, l0, input2);
migraphx::parse_onnx("min_test.onnx"); auto prog = migraphx::parse_onnx("cos_test.onnx");
EXPECT(p == prog);
} }
TEST_CASE(atan_test) TEST_CASE(cosh_test)
{
migraphx::program p;
auto input = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1}});
p.add_instruction(migraphx::op::cosh{}, input);
auto prog = migraphx::parse_onnx("cosh_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(dropout_test)
{
migraphx::program p;
auto input = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 3, 2, 2}});
p.add_instruction(migraphx::op::identity{}, input);
auto prog = migraphx::parse_onnx("dropout_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(elu_test)
{
migraphx::program p;
auto input = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {3}});
p.add_instruction(migraphx::op::elu{0.01}, input);
auto prog = migraphx::parse_onnx("elu_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(erf_test)
{
migraphx::program p;
auto input = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {10, 15}});
p.add_instruction(migraphx::op::erf{}, input);
auto prog = migraphx::parse_onnx("erf_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(exp_test)
{ {
migraphx::program p; migraphx::program p;
auto input = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {10}}); auto input = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {10}});
p.add_instruction(migraphx::op::atan{}, input); p.add_instruction(migraphx::op::exp{}, input);
auto prog = migraphx::parse_onnx("atan_test.onnx"); auto prog = migraphx::parse_onnx("exp_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(expand_test)
{
migraphx::program p;
migraphx::shape s(migraphx::shape::float_type, {3, 1, 1});
auto param = p.add_parameter("x", s);
migraphx::shape ss(migraphx::shape::int32_type, {4});
p.add_literal(migraphx::literal(ss, {2, 3, 4, 5}));
p.add_instruction(migraphx::op::multibroadcast{{2, 3, 4, 5}}, param);
auto prog = migraphx::parse_onnx("expand_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(add_bcast_test) TEST_CASE(flatten_test)
{ {
migraphx::program p; migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 5}}); auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 5}});
auto l1 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {3, 4}}); p.add_instruction(migraphx::op::flatten{2}, l0);
auto l2 = p.add_instruction(migraphx::op::broadcast{1, l0->get_shape().lens()}, l1); p.add_instruction(migraphx::op::flatten{1}, l0);
p.add_instruction(migraphx::op::add{}, l0, l2); auto prog = migraphx::parse_onnx("flatten_test.onnx");
auto prog = migraphx::parse_onnx("add_bcast_test.onnx"); EXPECT(p == prog);
}
TEST_CASE(gather_test)
{
migraphx::program p;
auto l0 = p.add_parameter("data", migraphx::shape{migraphx::shape::float_type, {3, 4, 5, 6}});
auto l1 = p.add_parameter("indices", migraphx::shape{migraphx::shape::int32_type, {2, 3}});
int axis = 1;
p.add_instruction(migraphx::op::gather{axis}, l0, l1);
auto prog = migraphx::parse_onnx("gather_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(gemm_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {5, 7}});
auto l1 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {11, 5}});
p.add_parameter("2", migraphx::shape{migraphx::shape::float_type, {}});
auto t0 = p.add_instruction(migraphx::op::transpose{{1, 0}}, l0);
auto t1 = p.add_instruction(migraphx::op::transpose{{1, 0}}, l1);
auto alpha = 2.f;
auto beta = 2.0f;
p.add_instruction(migraphx::op::dot{alpha, beta}, t0, t1);
auto prog = migraphx::parse_onnx("gemm_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(gemm_ex_test)
{
migraphx::program p;
auto l0 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 6}});
auto l1 = p.add_parameter("2", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 7}});
auto l2 = p.add_parameter("3", migraphx::shape{migraphx::shape::float_type, {1, 1, 6, 7}});
auto t0 = p.add_instruction(migraphx::op::transpose{{0, 1, 3, 2}}, l0);
auto alpha = 0.5f;
auto beta = 0.8f;
p.add_instruction(migraphx::op::dot{alpha, beta}, t0, l1, l2);
auto prog = migraphx::parse_onnx("gemm_ex_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(gemm_ex_brcst_test)
{
migraphx::program p;
auto l0 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 6}});
auto l1 = p.add_parameter("2", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 7}});
auto l2 = p.add_parameter("3", migraphx::shape{migraphx::shape::float_type, {1, 1, 6, 1}});
auto t0 = p.add_instruction(migraphx::op::transpose{{0, 1, 3, 2}}, l0);
std::vector<std::size_t> out_lens{1, 1, 6, 7};
auto t2 = p.add_instruction(migraphx::op::multibroadcast{out_lens}, l2);
auto alpha = 0.5f;
auto beta = 0.8f;
p.add_instruction(migraphx::op::dot{alpha, beta}, t0, l1, t2);
auto prog = migraphx::parse_onnx("gemm_ex_brcst_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(globalavgpool_test)
{
migraphx::program p;
auto input = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 3, 16, 16}});
auto op = migraphx::op::pooling{"average"};
auto lens = input->get_shape().lens();
op.lengths = {lens[2], lens[3]};
p.add_instruction(op, input);
auto prog = migraphx::parse_onnx("globalavgpool_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(globalmaxpool_test)
{
migraphx::program p;
auto input = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 3, 16, 16}});
auto op = migraphx::op::pooling{"max"};
auto lens = input->get_shape().lens();
op.lengths = {lens[2], lens[3]};
p.add_instruction(op, input);
auto prog = migraphx::parse_onnx("globalmaxpool_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(group_conv_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 4, 16, 16}});
auto l1 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {4, 1, 3, 3}});
migraphx::op::convolution op;
op.group = 4;
p.add_instruction(op, l0, l1);
auto prog = migraphx::parse_onnx("group_conv_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(imagescaler_test)
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {1, 3, 16, 16}};
auto l0 = p.add_parameter("0", s);
auto scale_val = p.add_literal(0.5f);
auto bias_vals = p.add_literal(
migraphx::literal{migraphx::shape{migraphx::shape::float_type, {3}}, {0.01, 0.02, 0.03}});
auto scaled_tensor = p.add_instruction(migraphx::op::scalar{s.lens()}, scale_val);
auto img_scaled = p.add_instruction(migraphx::op::mul{}, l0, scaled_tensor);
auto bias_bcast = p.add_instruction(migraphx::op::broadcast{1, s.lens()}, bias_vals);
p.add_instruction(migraphx::op::add{}, img_scaled, bias_bcast);
auto prog = migraphx::parse_onnx("imagescaler_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
...@@ -350,25 +525,26 @@ TEST_CASE(implicit_add_bcast_test) ...@@ -350,25 +525,26 @@ TEST_CASE(implicit_add_bcast_test)
{ {
migraphx::program p; migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 5}}); auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 5}});
auto l1 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {3, 4}}); auto l1 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {3, 4, 1}});
auto l2 = p.add_instruction(migraphx::op::multibroadcast{{2, 3, 4, 5}}, l0); auto l2 = p.add_instruction(migraphx::op::multibroadcast{{2, 3, 4, 5}}, l0);
auto l3 = p.add_instruction(migraphx::op::multibroadcast{{2, 3, 4, 5}}, l1); auto l3 = p.add_instruction(migraphx::op::multibroadcast{{2, 3, 4, 5}}, l1);
p.add_instruction(migraphx::op::add{}, l2, l3); p.add_instruction(migraphx::op::add{}, l2, l3);
auto prog = migraphx::parse_onnx("implicit_bcast_test.onnx"); auto prog = migraphx::parse_onnx("implicit_add_bcast_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(sub_bcast_test) TEST_CASE(implicit_pow_bcast_test)
{ {
migraphx::program p; migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 5}}); auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 5}});
auto l1 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {3, 4}}); auto l1 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {3, 4, 1}});
auto l2 = p.add_instruction(migraphx::op::broadcast{1, l0->get_shape().lens()}, l1); auto l2 = p.add_instruction(migraphx::op::multibroadcast{{2, 3, 4, 5}}, l0);
p.add_instruction(migraphx::op::sub{}, l0, l2); auto l3 = p.add_instruction(migraphx::op::multibroadcast{{2, 3, 4, 5}}, l1);
p.add_instruction(migraphx::op::pow{}, l2, l3);
auto prog = migraphx::parse_onnx("sub_bcast_test.onnx"); auto prog = migraphx::parse_onnx("implicit_pow_bcast_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
...@@ -377,7 +553,7 @@ TEST_CASE(implicit_sub_bcast_test) ...@@ -377,7 +553,7 @@ TEST_CASE(implicit_sub_bcast_test)
{ {
migraphx::program p; migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 5}}); auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 5}});
auto l1 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {3, 4}}); auto l1 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {4, 5}});
auto l2 = p.add_instruction(migraphx::op::multibroadcast{{2, 3, 4, 5}}, l0); auto l2 = p.add_instruction(migraphx::op::multibroadcast{{2, 3, 4, 5}}, l0);
auto l3 = p.add_instruction(migraphx::op::multibroadcast{{2, 3, 4, 5}}, l1); auto l3 = p.add_instruction(migraphx::op::multibroadcast{{2, 3, 4, 5}}, l1);
p.add_instruction(migraphx::op::sub{}, l2, l3); p.add_instruction(migraphx::op::sub{}, l2, l3);
...@@ -387,323 +563,405 @@ TEST_CASE(implicit_sub_bcast_test) ...@@ -387,323 +563,405 @@ TEST_CASE(implicit_sub_bcast_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(unknown_test) TEST_CASE(leaky_relu_test)
{ {
migraphx::program p; migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 5}}); float alpha = 0.01f;
auto l1 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {3, 4}}); auto l0 = p.add_parameter("0", {migraphx::shape::float_type, {3}});
auto l2 = p.add_instruction(migraphx::op::unknown{"Unknown"}, l0, l1); p.add_instruction(migraphx::op::leaky_relu{alpha}, l0);
p.add_instruction(migraphx::op::unknown{"Unknown"}, l2);
auto prog = migraphx::parse_onnx("unknown_test.onnx"); auto prog = migraphx::parse_onnx("leaky_relu_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(softmax_test) TEST_CASE(log_test)
{ {
migraphx::program p; migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 3}}); auto input = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {10}});
auto r = p.add_instruction(migraphx::op::reshape{{1, 3, 1, 1}}, l0); p.add_instruction(migraphx::op::log{}, input);
auto s = p.add_instruction(migraphx::op::softmax{}, r);
p.add_instruction(migraphx::op::reshape{{1, 3}}, s);
auto prog = migraphx::parse_onnx("softmax_test.onnx");
auto prog = migraphx::parse_onnx("log_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(reshape_test) TEST_CASE(logsoftmax_test)
{ {
migraphx::program p; migraphx::program p;
migraphx::op::reshape op; auto l0 = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {3, 4, 5, 6}});
std::vector<int64_t> reshape_dims{3, 8}; int axis = 1;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {4, 2, 3}}); p.add_instruction(migraphx::op::logsoftmax{axis}, l0);
p.add_literal( auto prog = migraphx::parse_onnx("logsoftmax_test.onnx");
migraphx::literal{migraphx::shape{migraphx::shape::int64_type, {2}}, reshape_dims});
op.dims = reshape_dims; EXPECT(p == prog);
p.add_instruction(op, l0); }
TEST_CASE(lrn_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 28, 24, 24}});
migraphx::op::lrn op;
op.size = 5;
op.alpha = 0.0001;
op.beta = 0.75;
op.bias = 1.0;
p.add_instruction(op, l0); p.add_instruction(op, l0);
auto prog = migraphx::parse_onnx("reshape_test.onnx"); auto prog = migraphx::parse_onnx("lrn_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(matmul_bmbm_test)
{
migraphx::program p;
auto l0 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {3, 6, 7}});
auto l1 = p.add_parameter("2", migraphx::shape{migraphx::shape::float_type, {5, 2, 1, 7, 8}});
auto bl0 = p.add_instruction(migraphx::op::multibroadcast{{5, 2, 3, 6, 7}}, l0);
auto bl1 = p.add_instruction(migraphx::op::multibroadcast{{5, 2, 3, 7, 8}}, l1);
p.add_instruction(migraphx::op::dot{1.0f, 0.0f}, bl0, bl1);
auto prog = migraphx::parse_onnx("matmul_bmbm_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(matmul_bmv_test)
{
migraphx::program p;
auto l0 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {3, 6, 7}});
auto l1 = p.add_parameter("2", migraphx::shape{migraphx::shape::float_type, {7}});
auto sl1 = p.add_instruction(migraphx::op::unsqueeze{{1}}, l1);
auto bsl1 = p.add_instruction(migraphx::op::multibroadcast{{3, 7, 1}}, sl1);
auto res = p.add_instruction(migraphx::op::dot{1.0f, 0.0f}, l0, bsl1);
p.add_instruction(migraphx::op::squeeze{{2}}, res);
auto prog = migraphx::parse_onnx("matmul_bmv_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(matmul_mv_test)
{
migraphx::program p;
auto l0 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {6, 7}});
auto l1 = p.add_parameter("2", migraphx::shape{migraphx::shape::float_type, {7}});
auto sl1 = p.add_instruction(migraphx::op::unsqueeze{{1}}, l1);
auto res = p.add_instruction(migraphx::op::dot{1.0f, 0.0f}, l0, sl1);
p.add_instruction(migraphx::op::squeeze{{1}}, res);
auto prog = migraphx::parse_onnx("matmul_mv_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(matmul_vbm_test)
{
migraphx::program p;
auto l0 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {7}});
auto l1 = p.add_parameter("2", migraphx::shape{migraphx::shape::float_type, {5, 7, 8}});
auto sl0 = p.add_instruction(migraphx::op::unsqueeze{{0}}, l0);
auto bsl0 = p.add_instruction(migraphx::op::multibroadcast{{5, 1, 7}}, sl0);
std::cout << "ONNX_TEST" << std::endl;
auto res = p.add_instruction(migraphx::op::dot{1.0f, 0.0f}, bsl0, l1);
std::cout << "After Dot" << std::endl;
p.add_instruction(migraphx::op::squeeze{{1}}, res);
auto prog = migraphx::parse_onnx("matmul_vbm_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(matmul_vm_test)
{
migraphx::program p;
auto l0 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {7}});
auto l1 = p.add_parameter("2", migraphx::shape{migraphx::shape::float_type, {7, 8}});
auto sl0 = p.add_instruction(migraphx::op::unsqueeze{{0}}, l0);
auto res = p.add_instruction(migraphx::op::dot{1.0f, 0.0f}, sl0, l1);
p.add_instruction(migraphx::op::squeeze{{0}}, res);
auto prog = migraphx::parse_onnx("matmul_vm_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(matmul_vv_test)
{
migraphx::program p;
auto l0 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {7}});
auto l1 = p.add_parameter("2", migraphx::shape{migraphx::shape::float_type, {7}});
auto sl0 = p.add_instruction(migraphx::op::unsqueeze{{0}}, l0);
auto sl1 = p.add_instruction(migraphx::op::unsqueeze{{1}}, l1);
auto res = p.add_instruction(migraphx::op::dot{1.0f, 0.0f}, sl0, sl1);
auto sr0 = p.add_instruction(migraphx::op::squeeze{{0}}, res);
p.add_instruction(migraphx::op::squeeze{{0}}, sr0);
auto prog = migraphx::parse_onnx("matmul_vv_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(max_test)
{
migraphx::program p;
auto input0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {3}});
auto input1 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {3}});
auto input2 = p.add_parameter("2", migraphx::shape{migraphx::shape::float_type, {3}});
auto l0 = p.add_instruction(migraphx::op::max{}, input0, input1);
p.add_instruction(migraphx::op::max{}, l0, input2);
migraphx::parse_onnx("max_test.onnx");
}
TEST_CASE(min_test)
{
migraphx::program p;
auto input0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {3}});
auto input1 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {3}});
auto input2 = p.add_parameter("2", migraphx::shape{migraphx::shape::float_type, {3}});
auto l0 = p.add_instruction(migraphx::op::min{}, input0, input1);
p.add_instruction(migraphx::op::min{}, l0, input2);
migraphx::parse_onnx("min_test.onnx");
}
TEST_CASE(no_pad_test)
{
migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {2, 2}});
p.add_instruction(migraphx::op::identity{}, l0);
auto prog = migraphx::parse_onnx("no_pad_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(shape_test) TEST_CASE(pad_test)
{ {
migraphx::program p; migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {3, 4, 5, 6}}; auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {2, 2}});
auto l0 = p.add_parameter("x", s); p.add_instruction(migraphx::op::pad{{1, 1, 1, 1}}, l0);
migraphx::shape s_shape{migraphx::shape::int64_type, {4}}; auto prog = migraphx::parse_onnx("pad_test.onnx");
p.add_literal(s_shape, l0->get_shape().lens());
auto prog = migraphx::parse_onnx("shape_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(gather_test) TEST_CASE(pow_test)
{ {
migraphx::program p; migraphx::program p;
auto l0 = p.add_parameter("data", migraphx::shape{migraphx::shape::float_type, {3, 4, 5, 6}}); auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 5}});
auto l1 = p.add_parameter("indices", migraphx::shape{migraphx::shape::int32_type, {2, 3}}); auto l1 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 5}});
int axis = 1; p.add_instruction(migraphx::op::pow{}, l0, l1);
p.add_instruction(migraphx::op::gather{axis}, l0, l1);
auto prog = migraphx::parse_onnx("gather_test.onnx"); auto prog = migraphx::parse_onnx("pow_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(shape_gather_test) TEST_CASE(reducemean_test)
{ {
migraphx::program p; migraphx::program p;
auto l0 = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {7, 3, 10}}); auto l0 = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {3, 4, 5, 6}});
auto l1 = auto l1 = p.add_instruction(migraphx::op::reduce_mean{{2, 3}}, l0);
p.add_literal(migraphx::shape{migraphx::shape::int64_type, {3}}, l0->get_shape().lens()); p.add_instruction(migraphx::op::squeeze{{2, 3}}, l1);
migraphx::shape const_shape{migraphx::shape::int32_type, {1}}; auto prog = migraphx::parse_onnx("reducemean_test.onnx");
auto l2 = p.add_literal(migraphx::literal{const_shape, {1}});
int axis = 0;
p.add_instruction(migraphx::op::gather{axis}, l1, l2);
auto prog = migraphx::parse_onnx("shape_gather.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(flatten_test) TEST_CASE(reducemean_keepdims_test)
{ {
migraphx::program p; migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 5}}); auto l0 = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {3, 4, 5, 6}});
p.add_instruction(migraphx::op::flatten{2}, l0); p.add_instruction(migraphx::op::reduce_mean{{2}}, l0);
p.add_instruction(migraphx::op::flatten{1}, l0); auto prog = migraphx::parse_onnx("reducemean_keepdims_test.onnx");
auto prog = migraphx::parse_onnx("flatten_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(squeeze_unsqueeze_test) TEST_CASE(reducesum_test)
{ {
migraphx::program p; migraphx::program p;
std::vector<int64_t> squeeze_axes{0, 2, 3, 5}; auto l0 = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {3, 4, 5, 6}});
std::vector<int64_t> unsqueeze_axes{0, 1, 3, 5}; auto l1 = p.add_instruction(migraphx::op::reduce_sum{{2}}, l0);
auto l0 = p.add_instruction(migraphx::op::squeeze{{2}}, l1);
p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 3, 1, 1, 2, 1}}); auto prog = migraphx::parse_onnx("reducesum_test.onnx");
auto l1 = p.add_instruction(migraphx::op::squeeze{squeeze_axes}, l0);
p.add_instruction(migraphx::op::unsqueeze{unsqueeze_axes}, l1);
auto prog = migraphx::parse_onnx("squeeze_unsqueeze_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(concat_test) TEST_CASE(reducesum_multiaxis_test)
{ {
migraphx::program p; migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {2, 4, 3}}); auto l0 = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {3, 4, 5, 6}});
auto l1 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {7, 4, 3}}); auto l1 = p.add_instruction(migraphx::op::reduce_sum{{2, 3}}, l0);
p.add_instruction(migraphx::op::concat{0}, l0, l1); p.add_instruction(migraphx::op::squeeze{{2, 3}}, l1);
auto prog = migraphx::parse_onnx("concat_test.onnx"); auto prog = migraphx::parse_onnx("reducesum_multiaxis_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(slice_test) TEST_CASE(reducesum_keepdims_test)
{ {
migraphx::program p; migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {3, 2}}); auto l0 = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {3, 4, 5, 6}});
p.add_instruction(migraphx::op::slice{{0, 1}, {1, 0}, {2, 2}}, l0); p.add_instruction(migraphx::op::reduce_sum{{2, 3}}, l0);
auto prog = migraphx::parse_onnx("slice_test.onnx"); auto prog = migraphx::parse_onnx("reducesum_keepdims_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(constant_test) TEST_CASE(reshape_test)
{ {
migraphx::program p; migraphx::program p;
p.add_literal(migraphx::literal{migraphx::shape{migraphx::shape::float_type, {3}}, {0, 1, 2}}); migraphx::op::reshape op;
auto prog = migraphx::parse_onnx("constant_test.onnx"); std::vector<int64_t> reshape_dims{3, 8};
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {4, 2, 3}});
p.add_literal(
migraphx::literal{migraphx::shape{migraphx::shape::int64_type, {2}}, reshape_dims});
op.dims = reshape_dims;
p.add_instruction(op, l0);
p.add_instruction(op, l0);
auto prog = migraphx::parse_onnx("reshape_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(constant_test_scalar) TEST_CASE(reshape_non_standard_test)
{ {
migraphx::program p; migraphx::program p;
p.add_literal(migraphx::literal{migraphx::shape{migraphx::shape::int32_type, {1}}, {1}}); migraphx::op::reshape op;
auto prog = migraphx::parse_onnx("constant_scalar.onnx"); std::vector<int64_t> reshape_dims{4, 3, 2};
migraphx::shape s{migraphx::shape::float_type, {2, 3, 4}};
auto x = p.add_parameter("x", s);
auto tran_x = p.add_instruction(migraphx::op::transpose{{0, 2, 1}}, x);
auto cont_x = p.add_instruction(migraphx::op::contiguous{}, tran_x);
p.add_instruction(migraphx::op::reshape{{4, 3, 2}}, cont_x);
auto prog = migraphx::parse_onnx("reshape_non_standard_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(constant_fill_test) TEST_CASE(round_test)
{ {
{ migraphx::program p;
migraphx::program p; auto input = p.add_parameter("x", migraphx::shape{migraphx::shape::double_type, {10, 5}});
auto l0 = p.add_literal(migraphx::literal{{migraphx::shape::int32_type, {2}}, {2, 3}}); p.add_instruction(migraphx::op::round{}, input);
std::vector<std::size_t> dims(l0->get_shape().elements());
migraphx::literal ls = l0->get_literal();
ls.visit([&](auto s) { dims.assign(s.begin(), s.end()); });
migraphx::shape s{migraphx::shape::float_type, dims};
std::vector<float> value(s.elements(), 1.0);
p.add_literal(migraphx::literal{s, value});
auto prog = migraphx::parse_onnx("const_fill1.onnx");
EXPECT(p == prog);
}
{
migraphx::program p;
migraphx::shape s{migraphx::shape::float_type, {2, 3}};
std::vector<float> value(s.elements(), 1.0);
p.add_literal(migraphx::literal{s, value});
auto prog = migraphx::parse_onnx("const_fill2.onnx");
EXPECT(p == prog); auto prog = migraphx::parse_onnx("round_test.onnx");
} EXPECT(p == prog);
} }
TEST_CASE(gemm_test) TEST_CASE(shape_test)
{ {
migraphx::program p; migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {5, 7}}); migraphx::shape s{migraphx::shape::float_type, {3, 4, 5, 6}};
auto l1 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {11, 5}}); auto l0 = p.add_parameter("x", s);
p.add_parameter("2", migraphx::shape{migraphx::shape::float_type, {}}); migraphx::shape s_shape{migraphx::shape::int64_type, {4}};
auto t0 = p.add_instruction(migraphx::op::transpose{{1, 0}}, l0); p.add_literal(s_shape, l0->get_shape().lens());
auto t1 = p.add_instruction(migraphx::op::transpose{{1, 0}}, l1); auto prog = migraphx::parse_onnx("shape_test.onnx");
auto alpha = 2.f;
auto beta = 2.0f;
p.add_instruction(migraphx::op::dot{alpha, beta}, t0, t1);
auto prog = migraphx::parse_onnx("gemm_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(gemm_ex) TEST_CASE(shape_gather_test)
{ {
migraphx::program p; migraphx::program p;
auto l0 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 6}}); auto l0 = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {7, 3, 10}});
auto l1 = p.add_parameter("2", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 7}}); auto l1 =
auto l2 = p.add_parameter("3", migraphx::shape{migraphx::shape::float_type, {1, 1, 6, 7}}); p.add_literal(migraphx::shape{migraphx::shape::int64_type, {3}}, l0->get_shape().lens());
auto t0 = p.add_instruction(migraphx::op::transpose{{0, 1, 3, 2}}, l0); migraphx::shape const_shape{migraphx::shape::int32_type, {1}};
auto alpha = 0.5f; auto l2 = p.add_literal(migraphx::literal{const_shape, {1}});
auto beta = 0.8f; int axis = 0;
p.add_instruction(migraphx::op::dot{alpha, beta}, t0, l1, l2); p.add_instruction(migraphx::op::gather{axis}, l1, l2);
auto prog = migraphx::parse_onnx("gemm_test_ex.onnx"); auto prog = migraphx::parse_onnx("shape_gather_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(gemm_ex_brcst) TEST_CASE(sign_test)
{ {
migraphx::program p; migraphx::program p;
auto l0 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 6}}); auto input = p.add_parameter("x", migraphx::shape{migraphx::shape::double_type, {10, 5}});
auto l1 = p.add_parameter("2", migraphx::shape{migraphx::shape::float_type, {1, 1, 5, 7}}); p.add_instruction(migraphx::op::sign{}, input);
auto l2 = p.add_parameter("3", migraphx::shape{migraphx::shape::float_type, {1, 1, 6, 1}});
auto t0 = p.add_instruction(migraphx::op::transpose{{0, 1, 3, 2}}, l0);
std::vector<std::size_t> out_lens{1, 1, 6, 7};
auto t2 = p.add_instruction(migraphx::op::multibroadcast{out_lens}, l2);
auto alpha = 0.5f;
auto beta = 0.8f;
p.add_instruction(migraphx::op::dot{alpha, beta}, t0, l1, t2);
auto prog = migraphx::parse_onnx("gemm_test_ex1.onnx");
auto prog = migraphx::parse_onnx("sign_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(matmul_vv) TEST_CASE(sin_test)
{ {
migraphx::program p; migraphx::program p;
auto l0 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {7}}); auto input = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {10}});
auto l1 = p.add_parameter("2", migraphx::shape{migraphx::shape::float_type, {7}}); p.add_instruction(migraphx::op::sin{}, input);
auto sl0 = p.add_instruction(migraphx::op::unsqueeze{{0}}, l0);
auto sl1 = p.add_instruction(migraphx::op::unsqueeze{{1}}, l1);
auto res = p.add_instruction(migraphx::op::dot{1.0f, 0.0f}, sl0, sl1);
auto sr0 = p.add_instruction(migraphx::op::squeeze{{0}}, res);
p.add_instruction(migraphx::op::squeeze{{0}}, sr0);
auto prog = migraphx::parse_onnx("matmul_vv.onnx");
auto prog = migraphx::parse_onnx("sin_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(matmul_vm) TEST_CASE(sinh_test)
{ {
migraphx::program p; migraphx::program p;
auto l0 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {7}}); auto input = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {10}});
auto l1 = p.add_parameter("2", migraphx::shape{migraphx::shape::float_type, {7, 8}}); p.add_instruction(migraphx::op::sinh{}, input);
auto sl0 = p.add_instruction(migraphx::op::unsqueeze{{0}}, l0);
auto res = p.add_instruction(migraphx::op::dot{1.0f, 0.0f}, sl0, l1);
p.add_instruction(migraphx::op::squeeze{{0}}, res);
auto prog = migraphx::parse_onnx("matmul_vm.onnx"); auto prog = migraphx::parse_onnx("sinh_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(matmul_vbm) TEST_CASE(slice_test)
{ {
migraphx::program p; migraphx::program p;
auto l0 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {7}}); auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {3, 2}});
auto l1 = p.add_parameter("2", migraphx::shape{migraphx::shape::float_type, {5, 7, 8}}); p.add_instruction(migraphx::op::slice{{0, 1}, {1, 0}, {2, 2}}, l0);
auto sl0 = p.add_instruction(migraphx::op::unsqueeze{{0}}, l0); auto prog = migraphx::parse_onnx("slice_test.onnx");
auto bsl0 = p.add_instruction(migraphx::op::multibroadcast{{5, 1, 7}}, sl0);
std::cout << "ONNX_TEST" << std::endl;
auto res = p.add_instruction(migraphx::op::dot{1.0f, 0.0f}, bsl0, l1);
std::cout << "After Dot" << std::endl;
p.add_instruction(migraphx::op::squeeze{{1}}, res);
auto prog = migraphx::parse_onnx("matmul_vbm.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(matmul_mv) TEST_CASE(softmax_test)
{ {
migraphx::program p; migraphx::program p;
auto l0 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {6, 7}}); auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 3}});
auto l1 = p.add_parameter("2", migraphx::shape{migraphx::shape::float_type, {7}}); p.add_instruction(migraphx::op::softmax{1}, l0);
auto sl1 = p.add_instruction(migraphx::op::unsqueeze{{1}}, l1); auto prog = migraphx::parse_onnx("softmax_test.onnx");
auto res = p.add_instruction(migraphx::op::dot{1.0f, 0.0f}, l0, sl1);
p.add_instruction(migraphx::op::squeeze{{1}}, res);
auto prog = migraphx::parse_onnx("matmul_mv.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(matmul_bmv) TEST_CASE(sqrt_test)
{ {
migraphx::program p; migraphx::program p;
auto l0 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {3, 6, 7}}); auto input = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {10, 15}});
auto l1 = p.add_parameter("2", migraphx::shape{migraphx::shape::float_type, {7}}); p.add_instruction(migraphx::op::sqrt{}, input);
auto sl1 = p.add_instruction(migraphx::op::unsqueeze{{1}}, l1);
auto bsl1 = p.add_instruction(migraphx::op::multibroadcast{{3, 7, 1}}, sl1);
auto res = p.add_instruction(migraphx::op::dot{1.0f, 0.0f}, l0, bsl1);
p.add_instruction(migraphx::op::squeeze{{2}}, res);
auto prog = migraphx::parse_onnx("matmul_bmv.onnx");
auto prog = migraphx::parse_onnx("sqrt_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(matmul_bmbm) TEST_CASE(squeeze_unsqueeze_test)
{ {
migraphx::program p; migraphx::program p;
auto l0 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {3, 6, 7}}); std::vector<int64_t> squeeze_axes{0, 2, 3, 5};
auto l1 = p.add_parameter("2", migraphx::shape{migraphx::shape::float_type, {5, 2, 1, 7, 8}}); std::vector<int64_t> unsqueeze_axes{0, 1, 3, 5};
auto bl0 = p.add_instruction(migraphx::op::multibroadcast{{5, 2, 3, 6, 7}}, l0); auto l0 =
auto bl1 = p.add_instruction(migraphx::op::multibroadcast{{5, 2, 3, 7, 8}}, l1); p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 3, 1, 1, 2, 1}});
p.add_instruction(migraphx::op::dot{1.0f, 0.0f}, bl0, bl1); auto l1 = p.add_instruction(migraphx::op::squeeze{squeeze_axes}, l0);
p.add_instruction(migraphx::op::unsqueeze{unsqueeze_axes}, l1);
auto prog = migraphx::parse_onnx("matmul_bmbm.onnx"); auto prog = migraphx::parse_onnx("squeeze_unsqueeze_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(add_scalar_test) TEST_CASE(sub_bcast_test)
{ {
migraphx::program p; migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 5}}); auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 5}});
auto l1 = p.add_literal(migraphx::literal{migraphx::shape{migraphx::shape::float_type}, {1}}); auto l1 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {3, 4}});
auto m0 = p.add_instruction(migraphx::op::multibroadcast{{2, 3, 4, 5}}, l0); auto l2 = p.add_instruction(migraphx::op::broadcast{1, l0->get_shape().lens()}, l1);
auto m1 = p.add_instruction(migraphx::op::multibroadcast{{2, 3, 4, 5}}, l1); p.add_instruction(migraphx::op::sub{}, l0, l2);
p.add_instruction(migraphx::op::add{}, m0, m1);
auto prog = migraphx::parse_onnx("add_scalar_test.onnx"); auto prog = migraphx::parse_onnx("sub_bcast_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
...@@ -722,84 +980,86 @@ TEST_CASE(sub_scalar_test) ...@@ -722,84 +980,86 @@ TEST_CASE(sub_scalar_test)
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(group_conv_test) TEST_CASE(sum_test)
{ {
migraphx::program p; migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 4, 16, 16}}); auto input0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {3}});
auto l1 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {4, 1, 3, 3}}); auto input1 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {3}});
migraphx::op::convolution op; auto input2 = p.add_parameter("2", migraphx::shape{migraphx::shape::float_type, {3}});
op.group = 4; auto l0 = p.add_instruction(migraphx::op::add{}, input0, input1);
p.add_instruction(op, l0, l1); p.add_instruction(migraphx::op::add{}, l0, input2);
auto prog = migraphx::parse_onnx("group_conv_test.onnx");
auto prog = migraphx::parse_onnx("sum_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(pad_test) TEST_CASE(tan_test)
{ {
migraphx::program p; migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {2, 2}}); auto input = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {10}});
p.add_instruction(migraphx::op::pad{{1, 1, 1, 1}}, l0); p.add_instruction(migraphx::op::tan{}, input);
auto prog = migraphx::parse_onnx("pad_test.onnx");
auto prog = migraphx::parse_onnx("tan_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(lrn_test) TEST_CASE(tanh_test)
{ {
migraphx::program p; migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 28, 24, 24}}); auto input = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {1}});
migraphx::op::lrn op; p.add_instruction(migraphx::op::tanh{}, input);
op.size = 5;
op.alpha = 0.0001;
op.beta = 0.75;
op.bias = 1.0;
p.add_instruction(op, l0);
auto prog = migraphx::parse_onnx("lrn_test.onnx");
EXPECT(p == prog);
}
TEST_CASE(add_fp16_test) auto prog = migraphx::parse_onnx("tanh_test.onnx");
{
migraphx::program p;
auto l0 =
p.add_literal(migraphx::literal{migraphx::shape{migraphx::shape::half_type, {1}}, {1.5}});
auto l1 =
p.add_literal(migraphx::literal{migraphx::shape{migraphx::shape::half_type, {1}}, {2.5}});
p.add_instruction(migraphx::op::add{}, l0, l1);
auto prog = migraphx::parse_onnx("add_fp16_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(logsoftmax) TEST_CASE(transpose_test)
{ {
migraphx::program p; migraphx::program p;
auto l0 = p.add_parameter("x", migraphx::shape{migraphx::shape::float_type, {3, 4, 5, 6}}); auto input = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {1, 2, 2, 3}});
int axis = 1; std::vector<int64_t> perm{0, 3, 1, 2};
p.add_instruction(migraphx::op::logsoftmax{axis}, l0); p.add_instruction(migraphx::op::transpose{perm}, input);
auto prog = migraphx::parse_onnx("logsoftmax_test.onnx");
auto prog = migraphx::parse_onnx("transpose_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(no_pad_test) TEST_CASE(transpose_gather_test)
{ {
migraphx::program p; migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {2, 2}}); auto make_contiguous = [&p](migraphx::instruction_ref ins) {
p.add_instruction(migraphx::op::identity{}, l0); if(ins->get_shape().standard())
auto prog = migraphx::parse_onnx("no_pad_test.onnx"); {
return ins;
}
return p.add_instruction(migraphx::op::contiguous{}, ins);
};
auto data = p.add_parameter("data", migraphx::shape{migraphx::shape::float_type, {3, 5, 4, 6}});
auto ind =
p.add_parameter("indices", migraphx::shape{migraphx::shape::int32_type, {2, 4, 3, 5}});
auto tr_data = p.add_instruction(migraphx::op::transpose{{0, 2, 1, 3}}, data);
auto tr_ind = p.add_instruction(migraphx::op::transpose{{0, 2, 1, 3}}, ind);
int axis = 1;
p.add_instruction(
migraphx::op::gather{axis}, make_contiguous(tr_data), make_contiguous(tr_ind));
auto prog = migraphx::parse_onnx("transpose_gather_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
TEST_CASE(clip_test) TEST_CASE(unknown_test)
{ {
migraphx::program p; migraphx::program p;
auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {3}}); auto l0 = p.add_parameter("0", migraphx::shape{migraphx::shape::float_type, {2, 3, 4, 5}});
p.add_instruction(migraphx::op::clip{6.0, 0.0}, l0); auto l1 = p.add_parameter("1", migraphx::shape{migraphx::shape::float_type, {3, 4}});
auto prog = migraphx::parse_onnx("clip_test.onnx"); auto l2 = p.add_instruction(migraphx::op::unknown{"Unknown"}, l0, l1);
p.add_instruction(migraphx::op::unknown{"Unknown"}, l2);
auto prog = migraphx::parse_onnx("unknown_test.onnx");
EXPECT(p == prog); EXPECT(p == prog);
} }
......
pow2:q

0
1out"Powpow_testZ
0




Z
1



b
out




B
pow2:u

0
1out"Powpow_testZ
0




Z
1




b
out




B
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment